aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-10-30 12:42:58 -0400
committerDavid S. Miller <davem@davemloft.net>2016-10-30 12:42:58 -0400
commit27058af401e49d88a905df000dd26f443fcfa8ce (patch)
tree819f32113d3b8374b9fbf72e2202d4c4d4511a60 /drivers
parentfirewire: net: really fix maximum possible MTU (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff)
downloadlinux-dev-27058af401e49d88a905df000dd26f443fcfa8ce.tar.xz
linux-dev-27058af401e49d88a905df000dd26f443fcfa8ce.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Mostly simple overlapping changes. For example, David Ahern's adjacency list revamp in 'net-next' conflicted with an adjacency list traversal bug fix in 'net'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/acpi_pad.c5
-rw-r--r--drivers/acpi/acpica/dsinit.c11
-rw-r--r--drivers/acpi/acpica/dsmethod.c50
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c3
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/fan.c12
-rw-r--r--drivers/acpi/osl.c13
-rw-r--r--drivers/acpi/pci_link.c38
-rw-r--r--drivers/acpi/property.c117
-rw-r--r--drivers/android/binder.c35
-rw-r--r--drivers/ata/ahci.c156
-rw-r--r--drivers/ata/ahci.h24
-rw-r--r--drivers/ata/ahci_qoriq.c20
-rw-r--r--drivers/ata/ahci_st.c4
-rw-r--r--drivers/ata/libahci.c9
-rw-r--r--drivers/ata/libata-scsi.c288
-rw-r--r--drivers/ata/pata_at91.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c3
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/auxdisplay/Kconfig9
-rw-r--r--drivers/auxdisplay/Makefile1
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c443
-rw-r--r--drivers/base/Kconfig6
-rw-r--r--drivers/block/DAC960.c4
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/rbd.c50
-rw-r--r--drivers/bluetooth/btwilink.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c8
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/char/hw_random/core.c6
-rw-r--r--drivers/char/ipmi/Kconfig8
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/bt-bmc.c505
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c7
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/clk/at91/clk-programmable.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c11
-rw-r--r--drivers/clk/clk-max77686.c1
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c4
-rw-r--r--drivers/clk/mediatek/Kconfig2
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c11
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c1
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c20
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mio.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mux.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h2
-rw-r--r--drivers/clocksource/Kconfig10
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/jcore-pit.c249
-rw-r--r--drivers/clocksource/timer-sun5i.c16
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c19
-rw-r--r--drivers/cpufreq/intel_pstate.c81
-rw-r--r--drivers/cpuidle/Kconfig.mips2
-rw-r--r--drivers/cpuidle/cpuidle-cps.c2
-rw-r--r--drivers/dax/Kconfig2
-rw-r--r--drivers/dax/pmem.c2
-rw-r--r--drivers/devfreq/devfreq.c8
-rw-r--r--drivers/devfreq/event/Kconfig1
-rw-r--r--drivers/devfreq/event/exynos-nocp.c3
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/firewire/nosy.c13
-rw-r--r--drivers/firmware/efi/libstub/Makefile5
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-ath79.c1
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-mxs.c8
-rw-r--r--drivers/gpio/gpio-pca953x.c16
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-ts4800.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c7
-rw-r--r--drivers/gpio/gpiolib.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c15
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c18
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c7
-rw-r--r--drivers/gpu/drm/drm_info.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c56
-rw-r--r--drivers/hid/hid-dr.c83
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-led.c23
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/hv_util.c10
-rw-r--r--drivers/hwmon/adm9240.c6
-rw-r--r--drivers/hwmon/max31790.c4
-rw-r--r--drivers/i2c/busses/Kconfig12
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c17
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c1
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c2
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c1
-rw-r--r--drivers/i2c/busses/i2c-xlr.c1
-rw-r--r--drivers/i2c/i2c-core.c21
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c7
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c16
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/umem.c6
-rw-r--r--drivers/infiniband/core/umem_odp.c7
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c23
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h18
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c146
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c76
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c293
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c36
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c22
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c67
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig8
-rw-r--r--drivers/infiniband/hw/qedr/Makefile3
-rw-r--r--drivers/infiniband/hw/qedr/main.c914
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h495
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c622
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.h61
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi.h56
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h748
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c3547
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h101
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c54
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/input/mouse/alps.c87
-rw-r--r--drivers/input/mouse/alps.h2
-rw-r--r--drivers/input/mouse/elantech.c27
-rw-r--r--drivers/input/rmi4/rmi_i2c.c38
-rw-r--r--drivers/input/rmi4/rmi_spi.c22
-rw-r--r--drivers/input/serio/i8042-io.h2
-rw-r--r--drivers/input/serio/i8042-ip22io.h2
-rw-r--r--drivers/input/serio/i8042-ppcio.h2
-rw-r--r--drivers/input/serio/i8042-sparcio.h2
-rw-r--r--drivers/input/serio/i8042-unicore32io.h2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h96
-rw-r--r--drivers/input/serio/i8042.c55
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c38
-rw-r--r--drivers/ipack/ipack.c2
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/irq-eznps.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c10
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-i8259.c30
-rw-r--r--drivers/irqchip/irq-jcore-aic.c20
-rw-r--r--drivers/md/dm-raid.c15
-rw-r--r--drivers/md/dm-raid1.c22
-rw-r--r--drivers/md/dm-rq.c7
-rw-r--r--drivers/md/dm-table.c24
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c5
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/v4l2-core/Kconfig2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c6
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c6
-rw-r--r--drivers/misc/cxl/api.c11
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h24
-rw-r--r--drivers/misc/cxl/file.c15
-rw-r--r--drivers/misc/cxl/guest.c3
-rw-r--r--drivers/misc/cxl/main.c42
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c27
-rw-r--r--drivers/misc/genwqe/card_utils.c12
-rw-r--r--drivers/misc/mei/hw-txe.c6
-rw-r--r--drivers/misc/mic/scif/scif_rma.c3
-rw-r--r--drivers/misc/sgi-gru/grufault.c2
-rw-r--r--drivers/misc/sgi-gru/grumain.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c8
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c23
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c26
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c54
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci.c42
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/ubi/eba.c1
-rw-r--r--drivers/mtd/ubi/fastmap.c12
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c16
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c32
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c107
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c45
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig12
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c216
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c99
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c15
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c13
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c4
-rw-r--r--drivers/net/geneve.c47
-rw-r--r--drivers/net/hyperv/netvsc_drv.c25
-rw-r--r--drivers/net/macsec.c26
-rw-r--r--drivers/net/phy/at803x.c65
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/usb/asix_common.c8
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c17
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/net/vxlan.c82
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/slic_ds26522.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c75
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c8
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/namespace_devs.c14
-rw-r--r--drivers/nvdimm/pmem.c8
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/pci.c77
-rw-r--r--drivers/nvme/host/scsi.c4
-rw-r--r--drivers/nvme/target/admin-cmd.c8
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/discovery.c4
-rw-r--r--drivers/of/platform.c1
-rw-r--r--drivers/pci/host/pci-layerscape.c2
-rw-r--r--drivers/pci/host/pcie-designware-plat.c2
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c100
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c12
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c25
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c3
-rw-r--r--drivers/reset/reset-uniphier.c16
-rw-r--r--drivers/rtc/Kconfig38
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ac100.c5
-rw-r--r--drivers/rtc/rtc-asm9260.c20
-rw-r--r--drivers/rtc/rtc-at32ap700x.c2
-rw-r--r--drivers/rtc/rtc-bq32k.c16
-rw-r--r--drivers/rtc/rtc-cmos.c93
-rw-r--r--drivers/rtc/rtc-coh901331.c2
-rw-r--r--drivers/rtc/rtc-davinci.c2
-rw-r--r--drivers/rtc/rtc-digicolor.c2
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c54
-rw-r--r--drivers/rtc/rtc-ds1347.c96
-rw-r--r--drivers/rtc/rtc-gemini.c2
-rw-r--r--drivers/rtc/rtc-isl12057.c643
-rw-r--r--drivers/rtc/rtc-jz4740.c2
-rw-r--r--drivers/rtc/rtc-mcp795.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-nuc900.c2
-rw-r--r--drivers/rtc/rtc-omap.c170
-rw-r--r--drivers/rtc/rtc-palmas.c2
-rw-r--r--drivers/rtc/rtc-pcf2123.c5
-rw-r--r--drivers/rtc/rtc-pcf50633.c2
-rw-r--r--drivers/rtc/rtc-pic32.c1
-rw-r--r--drivers/rtc/rtc-rv8803.c50
-rw-r--r--drivers/rtc/rtc-rx6110.c3
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-spear.c2
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-sysfs.c4
-rw-r--r--drivers/rtc/rtc-tegra.c2
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/cio/chp.c6
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/scsi/NCR5380.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.c49
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/g_NCR5380.c699
-rw-r--r--drivers/scsi/g_NCR5380.h8
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/scsi_dh.c6
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/ufs/Kconfig2
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c10
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/qbman/Kconfig67
-rw-r--r--drivers/soc/fsl/qbman/Makefile12
-rw-r--r--drivers/soc/fsl/qbman/bman.c797
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c263
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c219
-rw-r--r--drivers/soc/fsl/qbman/bman_priv.h80
-rw-r--r--drivers/soc/fsl/qbman/bman_test.c53
-rw-r--r--drivers/soc/fsl/qbman/bman_test.h35
-rw-r--r--drivers/soc/fsl/qbman/bman_test_api.c151
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h103
-rw-r--r--drivers/soc/fsl/qbman/qman.c2881
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c808
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c355
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h371
-rw-r--r--drivers/soc/fsl/qbman/qman_test.c62
-rw-r--r--drivers/soc/fsl/qbman/qman_test.h36
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c252
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c617
-rw-r--r--drivers/soc/fsl/qe/gpio.c3
-rw-r--r--drivers/soc/fsl/qe/qe.c10
-rw-r--r--drivers/soc/fsl/qe/qe_common.c8
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c4
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/android/ion/ion_of.c2
-rw-r--r--drivers/staging/greybus/arche-platform.c1
-rw-r--r--drivers/staging/greybus/es2.c3
-rw-r--r--drivers/staging/greybus/gpio.c6
-rw-r--r--drivers/staging/greybus/module.c2
-rw-r--r--drivers/staging/greybus/uart.c2
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c3
-rw-r--r--drivers/staging/wilc1000/host_interface.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/target_core_transport.c39
-rw-r--r--drivers/target/target_core_user.c50
-rw-r--r--drivers/target/target_core_xcopy.c34
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c4
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c42
-rw-r--r--drivers/thermal/intel_pch_thermal.c60
-rw-r--r--drivers/thermal/intel_powerclamp.c14
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c3
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c4
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/atmel_serial.c26
-rw-r--r--drivers/tty/serial/fsl_lpuart.c3
-rw-r--r--drivers/tty/serial/pch_uart.c1
-rw-r--r--drivers/tty/serial/sc16is7xx.c8
-rw-r--r--drivers/tty/serial/serial_core.c8
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/vt/vt.c7
-rw-r--r--drivers/usb/chipidea/host.c2
-rw-r--r--drivers/usb/dwc2/core.c11
-rw-r--r--drivers/usb/dwc2/core.h7
-rw-r--r--drivers/usb/dwc2/gadget.c53
-rw-r--r--drivers/usb/dwc3/gadget.c26
-rw-r--r--drivers/usb/gadget/function/f_fs.c107
-rw-r--r--drivers/usb/gadget/function/u_ether.c5
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c5
-rw-r--r--drivers/usb/host/ehci-platform.c2
-rw-r--r--drivers/usb/host/ehci-sead3.c185
-rw-r--r--drivers/usb/host/ohci-at91.c9
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c41
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/omap2430.c7
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c8
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/wusbcore/crypto.c61
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c42
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/vme/vme.c4
-rw-r--r--drivers/watchdog/wdat_wdt.c4
-rw-r--r--drivers/xen/manage.c45
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c4
522 files changed, 20355 insertions, 4049 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index f0afdfb3c7df..194d20bee7dc 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -21,7 +21,7 @@ obj-y += video/
obj-y += idle/
# IPMI must come before ACPI in order to provide IPMI opregion support
-obj-$(CONFIG_IPMI_HANDLER) += char/ipmi/
+obj-y += char/ipmi/
obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_SFI) += sfi/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 8ea8211b2d58..eb76a4c10dbf 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <asm/mwait.h>
+#include <xen/xen.h>
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
@@ -477,6 +478,10 @@ static struct acpi_driver acpi_pad_driver = {
static int __init acpi_pad_init(void)
{
+ /* Xen ACPI PAD is used when running as Xen Dom0. */
+ if (xen_initial_domain())
+ return -ENODEV;
+
power_saving_mwait_init();
if (power_saving_mwait_eax == 0)
return -EINVAL;
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index f1e6dcc7a827..54d48b90de2c 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -46,6 +46,7 @@
#include "acdispat.h"
#include "acnamesp.h"
#include "actables.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsinit")
@@ -214,23 +215,17 @@ acpi_ds_initialize_objects(u32 table_index,
/* Walk entire namespace from the supplied root */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
/*
* We don't use acpi_walk_namespace since we do not want to acquire
* the namespace reader lock.
*/
status =
acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
- ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object,
- NULL, &info, NULL);
+ 0, acpi_ds_init_one_object, NULL, &info,
+ NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 32e9ddc0cf2b..2b3210f42a46 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -99,14 +99,11 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
"Method auto-serialization parse [%4.4s] %p\n",
acpi_ut_get_node_name(node), node));
- acpi_ex_enter_interpreter();
-
/* Create/Init a root op for the method parse tree */
op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
if (!op) {
- status = AE_NO_MEMORY;
- goto unlock;
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
acpi_ps_set_name(op, node->name.integer);
@@ -118,8 +115,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
if (!walk_state) {
acpi_ps_free_op(op);
- status = AE_NO_MEMORY;
- goto unlock;
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
status = acpi_ds_init_aml_walk(walk_state, op, node,
@@ -138,8 +134,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
status = acpi_ps_parse_aml(walk_state);
acpi_ps_delete_parse_tree(op);
-unlock:
- acpi_ex_exit_interpreter();
return_ACPI_STATUS(status);
}
@@ -731,26 +725,6 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_ds_method_data_delete_all(walk_state);
/*
- * If method is serialized, release the mutex and restore the
- * current sync level for this thread
- */
- if (method_desc->method.mutex) {
-
- /* Acquisition Depth handles recursive calls */
-
- method_desc->method.mutex->mutex.acquisition_depth--;
- if (!method_desc->method.mutex->mutex.acquisition_depth) {
- walk_state->thread->current_sync_level =
- method_desc->method.mutex->mutex.
- original_sync_level;
-
- acpi_os_release_mutex(method_desc->method.
- mutex->mutex.os_mutex);
- method_desc->method.mutex->mutex.thread_id = 0;
- }
- }
-
- /*
* Delete any namespace objects created anywhere within the
* namespace by the execution of this method. Unless:
* 1) This method is a module-level executable code method, in which
@@ -786,6 +760,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
~ACPI_METHOD_MODIFIED_NAMESPACE;
}
}
+
+ /*
+ * If method is serialized, release the mutex and restore the
+ * current sync level for this thread
+ */
+ if (method_desc->method.mutex) {
+
+ /* Acquisition Depth handles recursive calls */
+
+ method_desc->method.mutex->mutex.acquisition_depth--;
+ if (!method_desc->method.mutex->mutex.acquisition_depth) {
+ walk_state->thread->current_sync_level =
+ method_desc->method.mutex->mutex.
+ original_sync_level;
+
+ acpi_os_release_mutex(method_desc->method.
+ mutex->mutex.os_mutex);
+ method_desc->method.mutex->mutex.thread_id = 0;
+ }
+ }
}
/* Decrement the thread count on the method */
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 028b22a3154e..e36218206bb0 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -607,11 +607,9 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
}
}
- acpi_ex_exit_interpreter();
status =
acpi_ev_initialize_region
(acpi_ns_get_attached_object(node), FALSE);
- acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
/*
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 3843f1fc5dbb..75ddd160a716 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -45,6 +45,7 @@
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evrgnini")
@@ -597,9 +598,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
}
}
+ acpi_ex_exit_interpreter();
status =
acpi_ev_execute_reg_method(region_obj,
ACPI_REG_CONNECT);
+ acpi_ex_enter_interpreter();
if (acpi_ns_locked) {
status =
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 334d3c5ba617..d1f20143bb11 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -137,7 +137,9 @@ unlock:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Begin Table Object Initialization\n"));
+ acpi_ex_enter_interpreter();
status = acpi_ds_initialize_objects(table_index, node);
+ acpi_ex_exit_interpreter();
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Completed Table Object Initialization\n"));
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f0a029e68d3e..0d099a24f776 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes)
ghes_do_proc(ghes, ghes->estatus);
out:
ghes_clear_estatus(ghes);
- return 0;
+ return rc;
}
static void ghes_add_timer(struct ghes *ghes)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 680531062160..48e19d013170 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -526,6 +526,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
acpi_ec_clear(ec);
}
+#ifdef CONFIG_PM_SLEEP
static bool acpi_ec_query_flushed(struct acpi_ec *ec)
{
bool flushed;
@@ -557,6 +558,7 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
spin_unlock_irqrestore(&ec->lock, flags);
__acpi_ec_flush_event(ec);
}
+#endif /* CONFIG_PM_SLEEP */
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 384cfc3083e1..6cf4988206f2 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -129,8 +129,18 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
control = obj->package.elements[1].integer.value;
for (i = 0; i < fan->fps_count; i++) {
- if (control == fan->fps[i].control)
+ /*
+ * When Fine Grain Control is set, return the state
+ * corresponding to maximum fan->fps[i].control
+ * value compared to the current speed. Here the
+ * fan->fps[] is sorted array with increasing speed.
+ */
+ if (fan->fif.fine_grain_ctrl && control < fan->fps[i].control) {
+ i = (i > 0) ? i - 1 : 0;
break;
+ } else if (control == fan->fps[i].control) {
+ break;
+ }
}
if (i == fan->fps_count) {
dev_dbg(&device->dev, "Invalid control value returned\n");
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 4305ee9db4b2..416953a42510 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -162,11 +162,18 @@ void acpi_os_vprintf(const char *fmt, va_list args)
if (acpi_in_debugger) {
kdb_printf("%s", buffer);
} else {
- printk(KERN_CONT "%s", buffer);
+ if (printk_get_level(buffer))
+ printk("%s", buffer);
+ else
+ printk(KERN_CONT "%s", buffer);
}
#else
- if (acpi_debugger_write_log(buffer) < 0)
- printk(KERN_CONT "%s", buffer);
+ if (acpi_debugger_write_log(buffer) < 0) {
+ if (printk_get_level(buffer))
+ printk("%s", buffer);
+ else
+ printk(KERN_CONT "%s", buffer);
+ }
#endif
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index c983bf733ad3..bc3d914dfc3e 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -87,6 +87,7 @@ struct acpi_pci_link {
static LIST_HEAD(acpi_link_list);
static DEFINE_MUTEX(acpi_link_lock);
+static int sci_irq = -1, sci_penalty;
/* --------------------------------------------------------------------------
PCI Link Device Management
@@ -496,25 +497,13 @@ static int acpi_irq_get_penalty(int irq)
{
int penalty = 0;
- /*
- * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
- * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
- * use for PCI IRQs.
- */
- if (irq == acpi_gbl_FADT.sci_interrupt) {
- u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
-
- if (type != IRQ_TYPE_LEVEL_LOW)
- penalty += PIRQ_PENALTY_ISA_ALWAYS;
- else
- penalty += PIRQ_PENALTY_PCI_USING;
- }
+ if (irq == sci_irq)
+ penalty += sci_penalty;
if (irq < ACPI_MAX_ISA_IRQS)
return penalty + acpi_isa_irq_penalty[irq];
- penalty += acpi_irq_pci_sharing_penalty(irq);
- return penalty;
+ return penalty + acpi_irq_pci_sharing_penalty(irq);
}
int __init acpi_irq_penalty_init(void)
@@ -619,6 +608,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_device_bid(link->device));
return -ENODEV;
} else {
+ if (link->irq.active < ACPI_MAX_ISA_IRQS)
+ acpi_isa_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_USING;
+
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
@@ -849,7 +842,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
continue;
if (used)
- new_penalty = acpi_irq_get_penalty(irq) +
+ new_penalty = acpi_isa_irq_penalty[irq] +
PIRQ_PENALTY_ISA_USED;
else
new_penalty = 0;
@@ -871,7 +864,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
void acpi_penalize_isa_irq(int irq, int active)
{
if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
- acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
+ acpi_isa_irq_penalty[irq] +=
(active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
}
@@ -881,6 +874,17 @@ bool acpi_isa_irq_available(int irq)
acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
}
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+{
+ sci_irq = irq;
+
+ if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
+ polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
+ sci_penalty = PIRQ_PENALTY_PCI_USING;
+ else
+ sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
+}
+
/*
* Over-ride default table to reserve additional IRQs for use by ISA
* e.g. acpi_irq_isa=5
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index f2fd3fee588a..03f5ec11ab31 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -468,10 +468,11 @@ static int acpi_data_get_property_array(struct acpi_device_data *data,
}
/**
- * acpi_data_get_property_reference - returns handle to the referenced object
- * @data: ACPI device data object containing the property
+ * __acpi_node_get_property_reference - returns handle to the referenced object
+ * @fwnode: Firmware node to get the property from
* @propname: Name of the property
* @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
* @args: Location to store the returned reference with optional arguments
*
* Find property with @name, verifify that it is a package containing at least
@@ -482,17 +483,40 @@ static int acpi_data_get_property_array(struct acpi_device_data *data,
* If there's more than one reference in the property value package, @index is
* used to select the one to return.
*
+ * It is possible to leave holes in the property value set like in the
+ * example below:
+ *
+ * Package () {
+ * "cs-gpios",
+ * Package () {
+ * ^GPIO, 19, 0, 0,
+ * ^GPIO, 20, 0, 0,
+ * 0,
+ * ^GPIO, 21, 0, 0,
+ * }
+ * }
+ *
+ * Calling this function with index %2 return %-ENOENT and with index %3
+ * returns the last entry. If the property does not contain any more values
+ * %-ENODATA is returned. The NULL entry must be single integer and
+ * preferably contain value %0.
+ *
* Return: %0 on success, negative error code on failure.
*/
-static int acpi_data_get_property_reference(struct acpi_device_data *data,
- const char *propname, size_t index,
- struct acpi_reference_args *args)
+int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *propname, size_t index, size_t num_args,
+ struct acpi_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
+ struct acpi_device_data *data;
struct acpi_device *device;
int ret, idx = 0;
+ data = acpi_device_data_of_node(fwnode);
+ if (!data)
+ return -EINVAL;
+
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
if (ret)
return ret;
@@ -532,59 +556,54 @@ static int acpi_data_get_property_reference(struct acpi_device_data *data,
while (element < end) {
u32 nargs, i;
- if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
- return -EPROTO;
-
- ret = acpi_bus_get_device(element->reference.handle, &device);
- if (ret)
- return -ENODEV;
-
- element++;
- nargs = 0;
-
- /* assume following integer elements are all args */
- for (i = 0; element + i < end; i++) {
- int type = element[i].type;
+ if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
+ ret = acpi_bus_get_device(element->reference.handle,
+ &device);
+ if (ret)
+ return -ENODEV;
+
+ nargs = 0;
+ element++;
+
+ /* assume following integer elements are all args */
+ for (i = 0; element + i < end && i < num_args; i++) {
+ int type = element[i].type;
+
+ if (type == ACPI_TYPE_INTEGER)
+ nargs++;
+ else if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ break;
+ else
+ return -EPROTO;
+ }
- if (type == ACPI_TYPE_INTEGER)
- nargs++;
- else if (type == ACPI_TYPE_LOCAL_REFERENCE)
- break;
- else
+ if (nargs > MAX_ACPI_REFERENCE_ARGS)
return -EPROTO;
- }
- if (idx++ == index) {
- args->adev = device;
- args->nargs = nargs;
- for (i = 0; i < nargs; i++)
- args->args[i] = element[i].integer.value;
+ if (idx == index) {
+ args->adev = device;
+ args->nargs = nargs;
+ for (i = 0; i < nargs; i++)
+ args->args[i] = element[i].integer.value;
- return 0;
+ return 0;
+ }
+
+ element += nargs;
+ } else if (element->type == ACPI_TYPE_INTEGER) {
+ if (idx == index)
+ return -ENOENT;
+ element++;
+ } else {
+ return -EPROTO;
}
- element += nargs;
+ idx++;
}
- return -EPROTO;
-}
-
-/**
- * acpi_node_get_property_reference - get a handle to the referenced object.
- * @fwnode: Firmware node to get the property from.
- * @propname: Name of the property.
- * @index: Index of the reference to return.
- * @args: Location to store the returned reference with optional arguments.
- */
-int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
- const char *name, size_t index,
- struct acpi_reference_args *args)
-{
- struct acpi_device_data *data = acpi_device_data_of_node(fwnode);
-
- return data ? acpi_data_get_property_reference(data, name, index, args) : -EINVAL;
+ return -ENODATA;
}
-EXPORT_SYMBOL_GPL(acpi_node_get_property_reference);
+EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
static int acpi_data_prop_read_single(struct acpi_device_data *data,
const char *propname,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 562af94bec35..3c71b982bf2a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1002,7 +1002,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- uint32_t desc)
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1010,12 +1010,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc)
+ if (desc < ref->desc) {
n = n->rb_left;
- else if (desc > ref->desc)
+ } else if (desc > ref->desc) {
n = n->rb_right;
- else
+ } else if (need_strong_ref && !ref->strong) {
+ binder_user_error("tried to use weak ref as strong ref\n");
+ return NULL;
+ } else {
return ref;
+ }
}
return NULL;
}
@@ -1285,7 +1289,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
@@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc,
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle);
+ ref = binder_get_ref(proc, tr->target.handle, true);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
@@ -1577,7 +1584,9 @@ static void binder_transaction(struct binder_proc *proc,
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->binder = 0;
fp->handle = ref->desc;
+ fp->cookie = 0;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
@@ -1589,7 +1598,10 @@ static void binder_transaction(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
@@ -1624,7 +1636,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
+ fp->binder = 0;
fp->handle = new_ref->desc;
+ fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
@@ -1678,6 +1692,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
+ fp->binder = 0;
fp->handle = target_fd;
} break;
@@ -1800,7 +1815,9 @@ static int binder_thread_write(struct binder_proc *proc,
ref->desc);
}
} else
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target,
+ cmd == BC_ACQUIRE ||
+ cmd == BC_RELEASE);
if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target);
@@ -1996,7 +2013,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 90eabaf81215..9669fc7c19df 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1400,142 +1400,59 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
}
#endif
-/*
- * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
- * to single msi.
- */
-static int ahci_init_msix(struct pci_dev *pdev, unsigned int n_ports,
- struct ahci_host_priv *hpriv, unsigned long flags)
+static int ahci_get_irq_vector(struct ata_host *host, int port)
{
- int nvec, i, rc;
-
- /* Do not init MSI-X if MSI is disabled for the device */
- if (hpriv->flags & AHCI_HFLAG_NO_MSI)
- return -ENODEV;
-
- nvec = pci_msix_vec_count(pdev);
- if (nvec < 0)
- return nvec;
-
- /*
- * Proper MSI-X implementations will have a vector per-port.
- * Barring that, we prefer single-MSI over single-MSIX. If this
- * check fails (not enough MSI-X vectors for all ports) we will
- * be called again with the flag clear iff ahci_init_msi()
- * fails.
- */
- if (flags & AHCI_HFLAG_MULTI_MSIX) {
- if (nvec < n_ports)
- return -ENODEV;
- nvec = n_ports;
- } else if (nvec) {
- nvec = 1;
- } else {
- /*
- * Emit dev_err() since this was the non-legacy irq
- * method of last resort.
- */
- rc = -ENODEV;
- goto fail;
- }
-
- for (i = 0; i < nvec; i++)
- hpriv->msix[i].entry = i;
- rc = pci_enable_msix_exact(pdev, hpriv->msix, nvec);
- if (rc < 0)
- goto fail;
-
- if (nvec > 1)
- hpriv->flags |= AHCI_HFLAG_MULTI_MSIX;
- hpriv->irq = hpriv->msix[0].vector; /* for single msi-x */
-
- return nvec;
-fail:
- dev_err(&pdev->dev,
- "failed to enable MSI-X with error %d, # of vectors: %d\n",
- rc, nvec);
-
- return rc;
+ return pci_irq_vector(to_pci_dev(host->dev), port);
}
static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
struct ahci_host_priv *hpriv)
{
- int rc, nvec;
+ int nvec;
if (hpriv->flags & AHCI_HFLAG_NO_MSI)
return -ENODEV;
- nvec = pci_msi_vec_count(pdev);
- if (nvec < 0)
- return nvec;
-
/*
* If number of MSIs is less than number of ports then Sharing Last
* Message mode could be enforced. In this case assume that advantage
* of multipe MSIs is negated and use single MSI mode instead.
*/
- if (nvec < n_ports)
- goto single_msi;
+ if (n_ports > 1) {
+ nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nvec > 0) {
+ if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
+ hpriv->get_irq_vector = ahci_get_irq_vector;
+ hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+ return nvec;
+ }
- rc = pci_enable_msi_exact(pdev, nvec);
- if (rc == -ENOSPC)
- goto single_msi;
- if (rc < 0)
- return rc;
+ /*
+ * Fallback to single MSI mode if the controller
+ * enforced MRSM mode.
+ */
+ printk(KERN_INFO
+ "ahci: MRSM is on, fallback to single MSI\n");
+ pci_free_irq_vectors(pdev);
+ }
- /* fallback to single MSI mode if the controller enforced MRSM mode */
- if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) {
- pci_disable_msi(pdev);
- printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
- goto single_msi;
+ /*
+ * -ENOSPC indicated we don't have enough vectors. Don't bother
+ * trying a single vectors for any other error:
+ */
+ if (nvec < 0 && nvec != -ENOSPC)
+ return nvec;
}
- if (nvec > 1)
- hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
-
- goto out;
-
-single_msi:
- nvec = 1;
-
- rc = pci_enable_msi(pdev);
- if (rc < 0)
- return rc;
-out:
- hpriv->irq = pdev->irq;
-
- return nvec;
-}
-
-static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
- struct ahci_host_priv *hpriv)
-{
- int nvec;
-
/*
- * Try to enable per-port MSI-X. If the host is not capable
- * fall back to single MSI before finally attempting single
- * MSI-X.
+ * If the host is not capable of supporting per-port vectors, fall
+ * back to single MSI before finally attempting single MSI-X.
*/
- nvec = ahci_init_msix(pdev, n_ports, hpriv, AHCI_HFLAG_MULTI_MSIX);
- if (nvec >= 0)
- return nvec;
-
- nvec = ahci_init_msi(pdev, n_ports, hpriv);
- if (nvec >= 0)
- return nvec;
-
- /* try single-msix */
- nvec = ahci_init_msix(pdev, n_ports, hpriv, 0);
- if (nvec >= 0)
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec == 1)
return nvec;
-
- /* legacy intx interrupts */
- pci_intx(pdev, 1);
- hpriv->irq = pdev->irq;
-
- return 0;
+ return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
}
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1698,11 +1615,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!host)
return -ENOMEM;
host->private_data = hpriv;
- hpriv->msix = devm_kzalloc(&pdev->dev,
- sizeof(struct msix_entry) * n_ports, GFP_KERNEL);
- if (!hpriv->msix)
- return -ENOMEM;
- ahci_init_interrupts(pdev, n_ports, hpriv);
+
+ if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
+ /* legacy intx interrupts */
+ pci_intx(pdev, 1);
+ }
+ hpriv->irq = pci_irq_vector(pdev, 0);
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 70b06bcfb7e3..0cc08f892fea 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -242,12 +242,10 @@ enum {
AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
#ifdef CONFIG_PCI_MSI
- AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
- AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
+ AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */
#else
/* compile out MSI infrastructure */
AHCI_HFLAG_MULTI_MSI = 0,
- AHCI_HFLAG_MULTI_MSIX = 0,
#endif
AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
@@ -351,7 +349,6 @@ struct ahci_host_priv {
* the PHY position in this array.
*/
struct phy **phys;
- struct msix_entry *msix; /* Optional MSI-X support */
unsigned nports; /* Number of ports */
void *plat_data; /* Other platform data */
unsigned int irq; /* interrupt line */
@@ -362,22 +359,11 @@ struct ahci_host_priv {
*/
void (*start_engine)(struct ata_port *ap);
irqreturn_t (*irq_handler)(int irq, void *dev_instance);
-};
-#ifdef CONFIG_PCI_MSI
-static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
-{
- if (hpriv->flags & AHCI_HFLAG_MULTI_MSIX)
- return hpriv->msix[port].vector;
- else
- return hpriv->irq + port;
-}
-#else
-static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
-{
- return hpriv->irq;
-}
-#endif
+ /* only required for per-port MSI(-X) support */
+ int (*get_irq_vector)(struct ata_host *host,
+ int port);
+};
extern int ahci_ignore_sss;
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 7bdee9bd8786..1eba8dff875e 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -30,24 +30,23 @@
#define PORT_PHY3 0xB0
#define PORT_PHY4 0xB4
#define PORT_PHY5 0xB8
+#define PORT_AXICC 0xBC
#define PORT_TRANS 0xC8
/* port register default value */
#define AHCI_PORT_PHY_1_CFG 0xa003fffe
#define AHCI_PORT_TRANS_CFG 0x08000029
+#define AHCI_PORT_AXICC_CFG 0x3fffffff
/* for ls1021a */
#define LS1021A_PORT_PHY2 0x28183414
#define LS1021A_PORT_PHY3 0x0e080e06
#define LS1021A_PORT_PHY4 0x064a080b
#define LS1021A_PORT_PHY5 0x2aa86470
+#define LS1021A_AXICC_ADDR 0xC0
#define SATA_ECC_DISABLE 0x00020000
-/* for ls1043a */
-#define LS1043A_PORT_PHY2 0x28184d1f
-#define LS1043A_PORT_PHY3 0x0e081509
-
enum ahci_qoriq_type {
AHCI_LS1021A,
AHCI_LS1043A,
@@ -137,7 +136,7 @@ static struct ata_port_operations ahci_qoriq_ops = {
.hardreset = ahci_qoriq_hardreset,
};
-static struct ata_port_info ahci_qoriq_port_info = {
+static const struct ata_port_info ahci_qoriq_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -162,18 +161,19 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4);
writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + LS1021A_AXICC_ADDR);
break;
case AHCI_LS1043A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
- writel(LS1043A_PORT_PHY2, reg_base + PORT_PHY2);
- writel(LS1043A_PORT_PHY3, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
case AHCI_LS2080A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
}
@@ -221,12 +221,6 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
if (rc)
goto disable_resources;
- /* Workaround for ls2080a */
- if (qoriq_priv->type == AHCI_LS2080A) {
- hpriv->flags |= AHCI_HFLAG_NO_NCQ;
- ahci_qoriq_port_info.flags &= ~ATA_FLAG_NCQ;
- }
-
rc = ahci_platform_init_host(pdev, hpriv, &ahci_qoriq_port_info,
&ahci_qoriq_sht);
if (rc)
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 8ff428fe8e0f..bc345f249555 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -147,6 +147,7 @@ static struct scsi_host_template ahci_platform_sht = {
static int st_ahci_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct st_ahci_drv_data *drv_data;
struct ahci_host_priv *hpriv;
int err;
@@ -170,6 +171,9 @@ static int st_ahci_probe(struct platform_device *pdev)
st_ahci_configure_oob(hpriv->mmio);
+ of_property_read_u32(dev->of_node,
+ "ports-implemented", &hpriv->force_port_map);
+
err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
&ahci_platform_sht);
if (err) {
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index dcf2c724fd06..0d028ead99e8 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2520,7 +2520,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
*/
for (i = 0; i < host->n_ports; i++) {
struct ahci_port_priv *pp = host->ports[i]->private_data;
- int irq = ahci_irq_vector(hpriv, i);
+ int irq = hpriv->get_irq_vector(host, i);
/* Do not receive interrupts sent by dummy ports */
if (!pp) {
@@ -2556,10 +2556,15 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
int irq = hpriv->irq;
int rc;
- if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
+ if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
if (hpriv->irq_handler)
dev_warn(host->dev,
"both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
+ if (!hpriv->get_irq_vector) {
+ dev_err(host->dev,
+ "AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
+ return -EIO;
+ }
rc = ahci_host_activate_multi_irqs(host, sht);
} else {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e207b33e4ce9..9cceb4a875a5 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1159,8 +1159,6 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
- sdev->no_report_opcodes = 1;
- sdev->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer() callback and
* it needs to see every deferred qc. Set dev_blocked to 1 to
@@ -3282,18 +3280,125 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
return 1;
}
+/**
+ * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
+ * @cmd: SCSI command being translated
+ * @trmax: Maximum number of entries that will fit in sector_size bytes.
+ * @sector: Starting sector
+ * @count: Total Range of request in logical sectors
+ *
+ * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
+ * descriptor.
+ *
+ * Upto 64 entries of the format:
+ * 63:48 Range Length
+ * 47:0 LBA
+ *
+ * Range Length of 0 is ignored.
+ * LBA's should be sorted order and not overlap.
+ *
+ * NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET
+ *
+ * Return: Number of bytes copied into sglist.
+ */
+static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax,
+ u64 sector, u32 count)
+{
+ struct scsi_device *sdp = cmd->device;
+ size_t len = sdp->sector_size;
+ size_t r;
+ __le64 *buf;
+ u32 i = 0;
+ unsigned long flags;
+
+ WARN_ON(len > ATA_SCSI_RBUF_SIZE);
+
+ if (len > ATA_SCSI_RBUF_SIZE)
+ len = ATA_SCSI_RBUF_SIZE;
+
+ spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+ buf = ((void *)ata_scsi_rbuf);
+ memset(buf, 0, len);
+ while (i < trmax) {
+ u64 entry = sector |
+ ((u64)(count > 0xffff ? 0xffff : count) << 48);
+ buf[i++] = __cpu_to_le64(entry);
+ if (count <= 0xffff)
+ break;
+ count -= 0xffff;
+ sector += 0xffff;
+ }
+ r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
+ spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+
+ return r;
+}
+
+/**
+ * ata_format_dsm_trim_descr() - SATL Write Same to ATA SCT Write Same
+ * @cmd: SCSI command being translated
+ * @lba: Starting sector
+ * @num: Number of sectors to be zero'd.
+ *
+ * Rewrite the WRITE SAME payload to be an SCT Write Same formatted
+ * descriptor.
+ * NOTE: Writes a pattern (0's) in the foreground.
+ *
+ * Return: Number of bytes copied into sglist.
+ */
+static size_t ata_format_sct_write_same(struct scsi_cmnd *cmd, u64 lba, u64 num)
+{
+ struct scsi_device *sdp = cmd->device;
+ size_t len = sdp->sector_size;
+ size_t r;
+ u16 *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+ buf = ((void *)ata_scsi_rbuf);
+
+ put_unaligned_le16(0x0002, &buf[0]); /* SCT_ACT_WRITE_SAME */
+ put_unaligned_le16(0x0101, &buf[1]); /* WRITE PTRN FG */
+ put_unaligned_le64(lba, &buf[2]);
+ put_unaligned_le64(num, &buf[6]);
+ put_unaligned_le32(0u, &buf[10]); /* pattern */
+
+ WARN_ON(len > ATA_SCSI_RBUF_SIZE);
+
+ if (len > ATA_SCSI_RBUF_SIZE)
+ len = ATA_SCSI_RBUF_SIZE;
+
+ r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
+ spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+
+ return r;
+}
+
+/**
+ * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
+ * @qc: Command to be translated
+ *
+ * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
+ * an SCT Write Same command.
+ * Based on WRITE SAME has the UNMAP flag
+ * When set translate to DSM TRIM
+ * When clear translate to SCT Write Same
+ */
static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
struct scsi_cmnd *scmd = qc->scsicmd;
+ struct scsi_device *sdp = scmd->device;
+ size_t len = sdp->sector_size;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
u64 block;
u32 n_block;
+ const u32 trmax = len >> 3;
u32 size;
- void *buf;
u16 fp;
u8 bp = 0xff;
+ u8 unmap = cdb[1] & 0x8;
/* we may not issue DMA commands if no DMA mode is set */
if (unlikely(!dev->dma_mode))
@@ -3305,11 +3410,26 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
}
scsi_16_lba_len(cdb, &block, &n_block);
- /* for now we only support WRITE SAME with the unmap bit set */
- if (unlikely(!(cdb[1] & 0x8))) {
- fp = 1;
- bp = 3;
- goto invalid_fld;
+ if (unmap) {
+ /* If trim is not enabled the cmd is invalid. */
+ if ((dev->horkage & ATA_HORKAGE_NOTRIM) ||
+ !ata_id_has_trim(dev->id)) {
+ fp = 1;
+ bp = 3;
+ goto invalid_fld;
+ }
+ /* If the request is too large the cmd is invalid */
+ if (n_block > 0xffff * trmax) {
+ fp = 2;
+ goto invalid_fld;
+ }
+ } else {
+ /* If write same is not available the cmd is invalid */
+ if (!ata_id_sct_write_same(dev->id)) {
+ fp = 1;
+ bp = 3;
+ goto invalid_fld;
+ }
}
/*
@@ -3319,32 +3439,54 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
if (!scsi_sg_count(scmd))
goto invalid_param_len;
- buf = page_address(sg_page(scsi_sglist(scmd)));
-
- if (n_block <= 65535 * ATA_MAX_TRIM_RNUM) {
- size = ata_set_lba_range_entries(buf, ATA_MAX_TRIM_RNUM, block, n_block);
- } else {
- fp = 2;
- goto invalid_fld;
- }
+ /*
+ * size must match sector size in bytes
+ * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count)
+ * is defined as number of 512 byte blocks to be transferred.
+ */
+ if (unmap) {
+ size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block);
+ if (size != len)
+ goto invalid_param_len;
- if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
- /* Newer devices support queued TRIM commands */
- tf->protocol = ATA_PROT_NCQ;
- tf->command = ATA_CMD_FPDMA_SEND;
- tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
- tf->nsect = qc->tag << 3;
- tf->hob_feature = (size / 512) >> 8;
- tf->feature = size / 512;
+ if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
+ /* Newer devices support queued TRIM commands */
+ tf->protocol = ATA_PROT_NCQ;
+ tf->command = ATA_CMD_FPDMA_SEND;
+ tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
+ tf->nsect = qc->tag << 3;
+ tf->hob_feature = (size / 512) >> 8;
+ tf->feature = size / 512;
- tf->auxiliary = 1;
+ tf->auxiliary = 1;
+ } else {
+ tf->protocol = ATA_PROT_DMA;
+ tf->hob_feature = 0;
+ tf->feature = ATA_DSM_TRIM;
+ tf->hob_nsect = (size / 512) >> 8;
+ tf->nsect = size / 512;
+ tf->command = ATA_CMD_DSM;
+ }
} else {
- tf->protocol = ATA_PROT_DMA;
+ size = ata_format_sct_write_same(scmd, block, n_block);
+ if (size != len)
+ goto invalid_param_len;
+
tf->hob_feature = 0;
- tf->feature = ATA_DSM_TRIM;
- tf->hob_nsect = (size / 512) >> 8;
- tf->nsect = size / 512;
- tf->command = ATA_CMD_DSM;
+ tf->feature = 0;
+ tf->hob_nsect = 0;
+ tf->nsect = 1;
+ tf->lbah = 0;
+ tf->lbam = 0;
+ tf->lbal = ATA_CMD_STANDBYNOW1;
+ tf->hob_lbah = 0;
+ tf->hob_lbam = 0;
+ tf->hob_lbal = 0;
+ tf->device = ATA_CMD_STANDBYNOW1;
+ tf->protocol = ATA_PROT_DMA;
+ tf->command = ATA_CMD_WRITE_LOG_DMA_EXT;
+ if (unlikely(dev->flags & ATA_DFLAG_PIO))
+ tf->command = ATA_CMD_WRITE_LOG_EXT;
}
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
@@ -3368,6 +3510,76 @@ invalid_opcode:
}
/**
+ * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
+ * @args: device MAINTENANCE_IN data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Yields a subset to satisfy scsi_report_opcode()
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+{
+ struct ata_device *dev = args->dev;
+ u8 *cdb = args->cmd->cmnd;
+ u8 supported = 0;
+ unsigned int err = 0;
+
+ if (cdb[2] != 1) {
+ ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
+ err = 2;
+ goto out;
+ }
+ switch (cdb[3]) {
+ case INQUIRY:
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ case READ_CAPACITY:
+ case SERVICE_ACTION_IN_16:
+ case REPORT_LUNS:
+ case REQUEST_SENSE:
+ case SYNCHRONIZE_CACHE:
+ case REZERO_UNIT:
+ case SEEK_6:
+ case SEEK_10:
+ case TEST_UNIT_READY:
+ case SEND_DIAGNOSTIC:
+ case MAINTENANCE_IN:
+ case READ_6:
+ case READ_10:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_16:
+ case ATA_12:
+ case ATA_16:
+ case VERIFY:
+ case VERIFY_16:
+ case MODE_SELECT:
+ case MODE_SELECT_10:
+ case START_STOP:
+ supported = 3;
+ break;
+ case WRITE_SAME_16:
+ if (!ata_id_sct_write_same(dev->id))
+ break;
+ /* fallthrough: if SCT ... only enable for ZBC */
+ case ZBC_IN:
+ case ZBC_OUT:
+ if (ata_id_zoned_cap(dev->id) ||
+ dev->class == ATA_DEV_ZAC)
+ supported = 3;
+ break;
+ default:
+ break;
+ }
+out:
+ rbuf[1] = supported; /* supported */
+ return err;
+}
+
+/**
* ata_scsi_report_zones_complete - convert ATA output
* @qc: command structure returning the data
*
@@ -3610,7 +3822,7 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc,
{
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
- char mpage[CACHE_MPAGE_LEN];
+ u8 mpage[CACHE_MPAGE_LEN];
u8 wce;
int i;
@@ -3666,7 +3878,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
- char mpage[CONTROL_MPAGE_LEN];
+ u8 mpage[CONTROL_MPAGE_LEN];
u8 d_sense;
int i;
@@ -3701,8 +3913,6 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
dev->flags |= ATA_DFLAG_D_SENSE;
else
dev->flags &= ~ATA_DFLAG_D_SENSE;
- qc->scsicmd->result = SAM_STAT_GOOD;
- qc->scsicmd->scsi_done(qc->scsicmd);
return 0;
}
@@ -3829,6 +4039,8 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
if (ata_mselect_control(qc, p, pg_len, &fp) < 0) {
fp += hdr_len + bd_len;
goto invalid_param;
+ } else {
+ goto skip; /* No ATA command to send */
}
break;
default: /* invalid page code */
@@ -4147,6 +4359,13 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
ata_scsi_invalid_field(dev, cmd, 1);
break;
+ case MAINTENANCE_IN:
+ if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
+ else
+ ata_scsi_invalid_field(dev, cmd, 1);
+ break;
+
/* all other commands */
default:
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
@@ -4179,7 +4398,6 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
shost->max_lun = 1;
shost->max_channel = 1;
shost->max_cmd_len = 16;
- shost->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer()
* callback and it needs to see every deferred qc.
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 9f27b14009f9..1611e0e8d767 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -347,10 +347,8 @@ static int at91sam9_smc_fields_init(struct device *dev)
field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC);
fields.mode = devm_regmap_field_alloc(dev, smc, field);
- if (IS_ERR(fields.mode))
- return PTR_ERR(fields.mode);
- return 0;
+ return PTR_ERR_OR_ZERO(fields.mode);
}
static int pata_at91_probe(struct platform_device *pdev)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 27245957eee3..475a00669427 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -152,8 +152,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
div = 8;
T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
- if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
- BUG();
+ BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
t1 = timing.setup;
if (t1)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 745489a1c86a..efc48bf89d51 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1727,15 +1727,13 @@ static int mv_port_start(struct ata_port *ap)
return -ENOMEM;
ap->private_data = pp;
- pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
+ pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
if (!pp->crqb)
return -ENOMEM;
- memset(pp->crqb, 0, MV_CRQB_Q_SZ);
- pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
+ pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
if (!pp->crpb)
goto out_port_free_dma_mem;
- memset(pp->crpb, 0, MV_CRPB_Q_SZ);
/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index c07e725ea93d..10e1b9eee10e 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -119,4 +119,13 @@ config CFAG12864B_RATE
If you compile this as a module, you can still override this
value using the module parameters.
+config IMG_ASCII_LCD
+ tristate "Imagination Technologies ASCII LCD Display"
+ default y if MIPS_MALTA || MIPS_SEAD3
+ select SYSCON
+ help
+ Enable this to support the simple ASCII LCD displays found on
+ development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
+ from Imagination Technologies.
+
endif # AUXDISPLAY
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 8a8936a468b9..3127175c89df 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
+obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
new file mode 100644
index 000000000000..bf43b5d2aafc
--- /dev/null
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <generated/utsrelease.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+struct img_ascii_lcd_ctx;
+
+/**
+ * struct img_ascii_lcd_config - Configuration information about an LCD model
+ * @num_chars: the number of characters the LCD can display
+ * @external_regmap: true if registers are in a system controller, else false
+ * @update: function called to update the LCD
+ */
+struct img_ascii_lcd_config {
+ unsigned int num_chars;
+ bool external_regmap;
+ void (*update)(struct img_ascii_lcd_ctx *ctx);
+};
+
+/**
+ * struct img_ascii_lcd_ctx - Private data structure
+ * @pdev: the ASCII LCD platform device
+ * @base: the base address of the LCD registers
+ * @regmap: the regmap through which LCD registers are accessed
+ * @offset: the offset within regmap to the start of the LCD registers
+ * @cfg: pointer to the LCD model configuration
+ * @message: the full message to display or scroll on the LCD
+ * @message_len: the length of the @message string
+ * @scroll_pos: index of the first character of @message currently displayed
+ * @scroll_rate: scroll interval in jiffies
+ * @timer: timer used to implement scrolling
+ * @curr: the string currently displayed on the LCD
+ */
+struct img_ascii_lcd_ctx {
+ struct platform_device *pdev;
+ union {
+ void __iomem *base;
+ struct regmap *regmap;
+ };
+ u32 offset;
+ const struct img_ascii_lcd_config *cfg;
+ char *message;
+ unsigned int message_len;
+ unsigned int scroll_pos;
+ unsigned int scroll_rate;
+ struct timer_list timer;
+ char curr[] __aligned(8);
+};
+
+/*
+ * MIPS Boston development board
+ */
+
+static void boston_update(struct img_ascii_lcd_ctx *ctx)
+{
+ ulong val;
+
+#if BITS_PER_LONG == 64
+ val = *((u64 *)&ctx->curr[0]);
+ __raw_writeq(val, ctx->base);
+#elif BITS_PER_LONG == 32
+ val = *((u32 *)&ctx->curr[0]);
+ __raw_writel(val, ctx->base);
+ val = *((u32 *)&ctx->curr[4]);
+ __raw_writel(val, ctx->base + 4);
+#else
+# error Not 32 or 64 bit
+#endif
+}
+
+static struct img_ascii_lcd_config boston_config = {
+ .num_chars = 8,
+ .update = boston_update,
+};
+
+/*
+ * MIPS Malta development board
+ */
+
+static void malta_update(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ctx->cfg->num_chars; i++) {
+ err = regmap_write(ctx->regmap,
+ ctx->offset + (i * 8), ctx->curr[i]);
+ if (err)
+ break;
+ }
+
+ if (unlikely(err))
+ pr_err_ratelimited("Failed to update LCD display: %d\n", err);
+}
+
+static struct img_ascii_lcd_config malta_config = {
+ .num_chars = 8,
+ .external_regmap = true,
+ .update = malta_update,
+};
+
+/*
+ * MIPS SEAD3 development board
+ */
+
+enum {
+ SEAD3_REG_LCD_CTRL = 0x00,
+#define SEAD3_REG_LCD_CTRL_SETDRAM BIT(7)
+ SEAD3_REG_LCD_DATA = 0x08,
+ SEAD3_REG_CPLD_STATUS = 0x10,
+#define SEAD3_REG_CPLD_STATUS_BUSY BIT(0)
+ SEAD3_REG_CPLD_DATA = 0x18,
+#define SEAD3_REG_CPLD_DATA_BUSY BIT(7)
+};
+
+static int sead3_wait_sm_idle(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int status;
+ int err;
+
+ do {
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_CPLD_STATUS,
+ &status);
+ if (err)
+ return err;
+ } while (status & SEAD3_REG_CPLD_STATUS_BUSY);
+
+ return 0;
+
+}
+
+static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int cpld_data;
+ int err;
+
+ err = sead3_wait_sm_idle(ctx);
+ if (err)
+ return err;
+
+ do {
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_CTRL,
+ &cpld_data);
+ if (err)
+ return err;
+
+ err = sead3_wait_sm_idle(ctx);
+ if (err)
+ return err;
+
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_CPLD_DATA,
+ &cpld_data);
+ if (err)
+ return err;
+ } while (cpld_data & SEAD3_REG_CPLD_DATA_BUSY);
+
+ return 0;
+}
+
+static void sead3_update(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ctx->cfg->num_chars; i++) {
+ err = sead3_wait_lcd_idle(ctx);
+ if (err)
+ break;
+
+ err = regmap_write(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_CTRL,
+ SEAD3_REG_LCD_CTRL_SETDRAM | i);
+ if (err)
+ break;
+
+ err = sead3_wait_lcd_idle(ctx);
+ if (err)
+ break;
+
+ err = regmap_write(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_DATA,
+ ctx->curr[i]);
+ if (err)
+ break;
+ }
+
+ if (unlikely(err))
+ pr_err_ratelimited("Failed to update LCD display: %d\n", err);
+}
+
+static struct img_ascii_lcd_config sead3_config = {
+ .num_chars = 16,
+ .external_regmap = true,
+ .update = sead3_update,
+};
+
+static const struct of_device_id img_ascii_lcd_matches[] = {
+ { .compatible = "img,boston-lcd", .data = &boston_config },
+ { .compatible = "mti,malta-lcd", .data = &malta_config },
+ { .compatible = "mti,sead3-lcd", .data = &sead3_config },
+};
+
+/**
+ * img_ascii_lcd_scroll() - scroll the display by a character
+ * @arg: really a pointer to the private data structure
+ *
+ * Scroll the current message along the LCD by one character, rearming the
+ * timer if required.
+ */
+static void img_ascii_lcd_scroll(unsigned long arg)
+{
+ struct img_ascii_lcd_ctx *ctx = (struct img_ascii_lcd_ctx *)arg;
+ unsigned int i, ch = ctx->scroll_pos;
+ unsigned int num_chars = ctx->cfg->num_chars;
+
+ /* update the current message string */
+ for (i = 0; i < num_chars;) {
+ /* copy as many characters from the string as possible */
+ for (; i < num_chars && ch < ctx->message_len; i++, ch++)
+ ctx->curr[i] = ctx->message[ch];
+
+ /* wrap around to the start of the string */
+ ch = 0;
+ }
+
+ /* update the LCD */
+ ctx->cfg->update(ctx);
+
+ /* move on to the next character */
+ ctx->scroll_pos++;
+ ctx->scroll_pos %= ctx->message_len;
+
+ /* rearm the timer */
+ if (ctx->message_len > ctx->cfg->num_chars)
+ mod_timer(&ctx->timer, jiffies + ctx->scroll_rate);
+}
+
+/**
+ * img_ascii_lcd_display() - set the message to be displayed
+ * @ctx: pointer to the private data structure
+ * @msg: the message to display
+ * @count: length of msg, or -1
+ *
+ * Display a new message @msg on the LCD. @msg can be longer than the number of
+ * characters the LCD can display, in which case it will begin scrolling across
+ * the LCD display.
+ *
+ * Return: 0 on success, -ENOMEM on memory allocation failure
+ */
+static int img_ascii_lcd_display(struct img_ascii_lcd_ctx *ctx,
+ const char *msg, ssize_t count)
+{
+ char *new_msg;
+
+ /* stop the scroll timer */
+ del_timer_sync(&ctx->timer);
+
+ if (count == -1)
+ count = strlen(msg);
+
+ /* if the string ends with a newline, trim it */
+ if (msg[count - 1] == '\n')
+ count--;
+
+ new_msg = devm_kmalloc(&ctx->pdev->dev, count + 1, GFP_KERNEL);
+ if (!new_msg)
+ return -ENOMEM;
+
+ memcpy(new_msg, msg, count);
+ new_msg[count] = 0;
+
+ if (ctx->message)
+ devm_kfree(&ctx->pdev->dev, ctx->message);
+
+ ctx->message = new_msg;
+ ctx->message_len = count;
+ ctx->scroll_pos = 0;
+
+ /* update the LCD */
+ img_ascii_lcd_scroll((unsigned long)ctx);
+
+ return 0;
+}
+
+/**
+ * message_show() - read message via sysfs
+ * @dev: the LCD device
+ * @attr: the LCD message attribute
+ * @buf: the buffer to read the message into
+ *
+ * Read the current message being displayed or scrolled across the LCD display
+ * into @buf, for reads from sysfs.
+ *
+ * Return: the number of characters written to @buf
+ */
+static ssize_t message_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", ctx->message);
+}
+
+/**
+ * message_store() - write a new message via sysfs
+ * @dev: the LCD device
+ * @attr: the LCD message attribute
+ * @buf: the buffer containing the new message
+ * @count: the size of the message in @buf
+ *
+ * Write a new message to display or scroll across the LCD display from sysfs.
+ *
+ * Return: the size of the message on success, else -ERRNO
+ */
+static ssize_t message_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev);
+ int err;
+
+ err = img_ascii_lcd_display(ctx, buf, count);
+ return err ?: count;
+}
+
+static DEVICE_ATTR_RW(message);
+
+/**
+ * img_ascii_lcd_probe() - probe an LCD display device
+ * @pdev: the LCD platform device
+ *
+ * Probe an LCD display device, ensuring that we have the required resources in
+ * order to access the LCD & setting up private data as well as sysfs files.
+ *
+ * Return: 0 on success, else -ERRNO
+ */
+static int img_ascii_lcd_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct img_ascii_lcd_config *cfg;
+ struct img_ascii_lcd_ctx *ctx;
+ struct resource *res;
+ int err;
+
+ match = of_match_device(img_ascii_lcd_matches, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ cfg = match->data;
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx) + cfg->num_chars,
+ GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (cfg->external_regmap) {
+ ctx->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(ctx->regmap))
+ return PTR_ERR(ctx->regmap);
+
+ if (of_property_read_u32(pdev->dev.of_node, "offset",
+ &ctx->offset))
+ return -EINVAL;
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->base))
+ return PTR_ERR(ctx->base);
+ }
+
+ ctx->pdev = pdev;
+ ctx->cfg = cfg;
+ ctx->message = NULL;
+ ctx->scroll_pos = 0;
+ ctx->scroll_rate = HZ / 2;
+
+ /* initialise a timer for scrolling the message */
+ init_timer(&ctx->timer);
+ ctx->timer.function = img_ascii_lcd_scroll;
+ ctx->timer.data = (unsigned long)ctx;
+
+ platform_set_drvdata(pdev, ctx);
+
+ /* display a default message */
+ err = img_ascii_lcd_display(ctx, "Linux " UTS_RELEASE " ", -1);
+ if (err)
+ goto out_del_timer;
+
+ err = device_create_file(&pdev->dev, &dev_attr_message);
+ if (err)
+ goto out_del_timer;
+
+ return 0;
+out_del_timer:
+ del_timer_sync(&ctx->timer);
+ return err;
+}
+
+/**
+ * img_ascii_lcd_remove() - remove an LCD display device
+ * @pdev: the LCD platform device
+ *
+ * Remove an LCD display device, freeing private resources & ensuring that the
+ * driver stops using the LCD display registers.
+ *
+ * Return: 0
+ */
+static int img_ascii_lcd_remove(struct platform_device *pdev)
+{
+ struct img_ascii_lcd_ctx *ctx = platform_get_drvdata(pdev);
+
+ device_remove_file(&pdev->dev, &dev_attr_message);
+ del_timer_sync(&ctx->timer);
+ return 0;
+}
+
+static struct platform_driver img_ascii_lcd_driver = {
+ .driver = {
+ .name = "img-ascii-lcd",
+ .of_match_table = img_ascii_lcd_matches,
+ },
+ .probe = img_ascii_lcd_probe,
+ .remove = img_ascii_lcd_remove,
+};
+module_platform_driver(img_ascii_lcd_driver);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fdf44cac08e6..d02e7c0f5bfd 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -213,14 +213,16 @@ config DEBUG_DEVRES
If you are unsure about this, Say N here.
config DEBUG_TEST_DRIVER_REMOVE
- bool "Test driver remove calls during probe"
+ bool "Test driver remove calls during probe (UNSTABLE)"
depends on DEBUG_KERNEL
help
Say Y here if you want the Driver core to test driver remove functions
by calling probe, remove, probe. This tests the remove path without
having to unbind the driver or unload the driver module.
- If you are unsure about this, say N here.
+ This option is expected to find errors and may render your system
+ unusable. You should say N here unless you are explicitly looking to
+ test this functionality.
config SYS_HYPERVISOR
bool
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 811e11c82f32..0809cda93cc0 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_PD_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)) {
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
@@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_P_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)){
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ba405b55329f..19a16b2dbb91 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd)
spin_lock(&nbd->sock_lock);
if (!nbd->sock) {
- spin_unlock_irq(&nbd->sock_lock);
+ spin_unlock(&nbd->sock_lock);
return;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index abb71628ab61..7b274ff4632c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -415,15 +415,15 @@ struct rbd_device {
};
/*
- * Flag bits for rbd_dev->flags. If atomicity is required,
- * rbd_dev->lock is used to protect access.
- *
- * Currently, only the "removing" flag (which is coupled with the
- * "open_count" field) requires atomic access.
+ * Flag bits for rbd_dev->flags:
+ * - REMOVING (which is coupled with rbd_dev->open_count) is protected
+ * by rbd_dev->lock
+ * - BLACKLISTED is protected by rbd_dev->lock_rwsem
*/
enum rbd_dev_flags {
RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
+ RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
};
static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
@@ -3926,6 +3926,7 @@ static void rbd_reregister_watch(struct work_struct *work)
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
struct rbd_device, watch_dwork);
bool was_lock_owner = false;
+ bool need_to_wake = false;
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
@@ -3935,19 +3936,27 @@ static void rbd_reregister_watch(struct work_struct *work)
was_lock_owner = rbd_release_lock(rbd_dev);
mutex_lock(&rbd_dev->watch_mutex);
- if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
- goto fail_unlock;
+ if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
+ }
ret = __rbd_register_watch(rbd_dev);
if (ret) {
rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
- if (ret != -EBLACKLISTED)
+ if (ret == -EBLACKLISTED || ret == -ENOENT) {
+ set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
+ need_to_wake = true;
+ } else {
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->watch_dwork,
RBD_RETRY_DELAY);
- goto fail_unlock;
+ }
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
}
+ need_to_wake = true;
rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
mutex_unlock(&rbd_dev->watch_mutex);
@@ -3963,13 +3972,10 @@ static void rbd_reregister_watch(struct work_struct *work)
ret);
}
+out:
up_write(&rbd_dev->lock_rwsem);
- wake_requests(rbd_dev, true);
- return;
-
-fail_unlock:
- mutex_unlock(&rbd_dev->watch_mutex);
- up_write(&rbd_dev->lock_rwsem);
+ if (need_to_wake)
+ wake_requests(rbd_dev, true);
}
/*
@@ -4074,7 +4080,9 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
up_read(&rbd_dev->lock_rwsem);
schedule();
down_read(&rbd_dev->lock_rwsem);
- } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
+ } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+
finish_wait(&rbd_dev->lock_waitq, &wait);
}
@@ -4166,8 +4174,16 @@ static void rbd_queue_workfn(struct work_struct *work)
if (must_be_locked) {
down_read(&rbd_dev->lock_rwsem);
- if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
+ if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
rbd_wait_state_locked(rbd_dev);
+
+ WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+ if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ result = -EBLACKLISTED;
+ goto err_unlock;
+ }
}
img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index ef51c9c864c5..b6bb58c41df5 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -310,7 +310,7 @@ static int bt_ti_probe(struct platform_device *pdev)
BT_DBG("HCI device registered (hdev %p)", hdev);
dev_set_drvdata(&pdev->dev, hst);
- return err;
+ return 0;
}
static int bt_ti_remove(struct platform_device *pdev)
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 5ccb90ef0146..8f6c23c20c52 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -643,6 +643,14 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
},
.driver_data = &acpi_active_low,
},
+ { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
+ .ident = "Lenovo ThinkPad 8",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ .driver_data = &acpi_active_low,
+ },
{ }
};
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 7010dcac9328..78751057164a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -111,6 +111,7 @@ config OMAP_OCP2SCP
config QCOM_EBI2
bool "Qualcomm External Bus Interface 2 (EBI2)"
depends on HAS_IOMEM
+ depends on ARCH_QCOM || COMPILE_TEST
help
Say y here to enable support for the Qualcomm External Bus
Interface 2, which can be used to connect things like NAND Flash,
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 482794526e8c..d2d2c89de5b4 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -84,14 +84,14 @@ static size_t rng_buffer_size(void)
static void add_early_randomness(struct hwrng *rng)
{
- unsigned char bytes[16];
int bytes_read;
+ size_t size = min_t(size_t, 16, rng_buffer_size());
mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ bytes_read = rng_get_data(rng, rng_buffer, size, 1);
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
- add_device_randomness(bytes, bytes_read);
+ add_device_randomness(rng_buffer, bytes_read);
}
static inline void cleanup_rng(struct kref *kref)
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 5a9350b1069a..7f816655cbbf 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -76,3 +76,11 @@ config IPMI_POWEROFF
the IPMI management controller is capable of this.
endif # IPMI_HANDLER
+
+config ASPEED_BT_IPMI_BMC
+ depends on ARCH_ASPEED
+ tristate "BT IPMI bmc driver"
+ help
+ Provides a driver for the BT (Block Transfer) IPMI interface
+ found on Aspeed SOCs (AST2400 and AST2500). The driver
+ implements the BMC side of the BT interface.
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index f3ffde1f5f1f..0d98cd91def1 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
+obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
new file mode 100644
index 000000000000..b49e61320952
--- /dev/null
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bt-bmc.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+/*
+ * This is a BMC device used to communicate to the host
+ */
+#define DEVICE_NAME "ipmi-bt-host"
+
+#define BT_IO_BASE 0xe4
+#define BT_IRQ 10
+
+#define BT_CR0 0x0
+#define BT_CR0_IO_BASE 16
+#define BT_CR0_IRQ 12
+#define BT_CR0_EN_CLR_SLV_RDP 0x8
+#define BT_CR0_EN_CLR_SLV_WRP 0x4
+#define BT_CR0_ENABLE_IBT 0x1
+#define BT_CR1 0x4
+#define BT_CR1_IRQ_H2B 0x01
+#define BT_CR1_IRQ_HBUSY 0x40
+#define BT_CR2 0x8
+#define BT_CR2_IRQ_H2B 0x01
+#define BT_CR2_IRQ_HBUSY 0x40
+#define BT_CR3 0xc
+#define BT_CTRL 0x10
+#define BT_CTRL_B_BUSY 0x80
+#define BT_CTRL_H_BUSY 0x40
+#define BT_CTRL_OEM0 0x20
+#define BT_CTRL_SMS_ATN 0x10
+#define BT_CTRL_B2H_ATN 0x08
+#define BT_CTRL_H2B_ATN 0x04
+#define BT_CTRL_CLR_RD_PTR 0x02
+#define BT_CTRL_CLR_WR_PTR 0x01
+#define BT_BMC2HOST 0x14
+#define BT_INTMASK 0x18
+#define BT_INTMASK_B2H_IRQEN 0x01
+#define BT_INTMASK_B2H_IRQ 0x02
+#define BT_INTMASK_BMC_HWRST 0x80
+
+#define BT_BMC_BUFFER_SIZE 256
+
+struct bt_bmc {
+ struct device dev;
+ struct miscdevice miscdev;
+ void __iomem *base;
+ int irq;
+ wait_queue_head_t queue;
+ struct timer_list poll_timer;
+ struct mutex mutex;
+};
+
+static atomic_t open_count = ATOMIC_INIT(0);
+
+static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
+{
+ return ioread8(bt_bmc->base + reg);
+}
+
+static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
+{
+ iowrite8(data, bt_bmc->base + reg);
+}
+
+static void clr_rd_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL);
+}
+
+static void clr_wr_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL);
+}
+
+static void clr_h2b_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL);
+}
+
+static void set_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY))
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void clr_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void set_b2h_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL);
+}
+
+static u8 bt_read(struct bt_bmc *bt_bmc)
+{
+ return bt_inb(bt_bmc, BT_BMC2HOST);
+}
+
+static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ buf[i] = bt_read(bt_bmc);
+ return n;
+}
+
+static void bt_write(struct bt_bmc *bt_bmc, u8 c)
+{
+ bt_outb(bt_bmc, c, BT_BMC2HOST);
+}
+
+static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ bt_write(bt_bmc, buf[i]);
+ return n;
+}
+
+static void set_sms_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL);
+}
+
+static struct bt_bmc *file_bt_bmc(struct file *file)
+{
+ return container_of(file->private_data, struct bt_bmc, miscdev);
+}
+
+static int bt_bmc_open(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ if (atomic_inc_return(&open_count) == 1) {
+ clr_b_busy(bt_bmc);
+ return 0;
+ }
+
+ atomic_dec(&open_count);
+ return -EBUSY;
+}
+
+/*
+ * The BT (Block Transfer) interface means that entire messages are
+ * buffered by the host before a notification is sent to the BMC that
+ * there is data to be read. The first byte is the length and the
+ * message data follows. The read operation just tries to capture the
+ * whole before returning it to userspace.
+ *
+ * BT Message format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5:N
+ * Length NetFn/LUN Seq Cmd Data
+ *
+ */
+static ssize_t bt_bmc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 len;
+ int len_byte = 1;
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nread;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ WARN_ON(*ppos);
+
+ if (wait_event_interruptible(bt_bmc->queue,
+ bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ set_b_busy(bt_bmc);
+ clr_h2b_atn(bt_bmc);
+ clr_rd_ptr(bt_bmc);
+
+ /*
+ * The BT frames start with the message length, which does not
+ * include the length byte.
+ */
+ kbuffer[0] = bt_read(bt_bmc);
+ len = kbuffer[0];
+
+ /* We pass the length back to userspace as well */
+ if (len + 1 > count)
+ len = count - 1;
+
+ while (len) {
+ nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte);
+
+ bt_readn(bt_bmc, kbuffer + len_byte, nread);
+
+ if (copy_to_user(buf, kbuffer, nread + len_byte)) {
+ ret = -EFAULT;
+ break;
+ }
+ len -= nread;
+ buf += nread + len_byte;
+ ret += nread + len_byte;
+ len_byte = 0;
+ }
+
+ clr_b_busy(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+/*
+ * BT Message response format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6:N
+ * Length NetFn/LUN Seq Cmd Code Data
+ */
+static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nwritten;
+
+ /*
+ * send a minimum response size
+ */
+ if (count < 5)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ WARN_ON(*ppos);
+
+ /*
+ * There's no interrupt for clearing bmc busy so we have to
+ * poll
+ */
+ if (wait_event_interruptible(bt_bmc->queue,
+ !(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ clr_wr_ptr(bt_bmc);
+
+ while (count) {
+ nwritten = min_t(ssize_t, count, sizeof(kbuffer));
+ if (copy_from_user(&kbuffer, buf, nwritten)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ bt_writen(bt_bmc, kbuffer, nwritten);
+
+ count -= nwritten;
+ buf += nwritten;
+ ret += nwritten;
+ }
+
+ set_b2h_atn(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+static long bt_bmc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ switch (cmd) {
+ case BT_BMC_IOCTL_SMS_ATN:
+ set_sms_atn(bt_bmc);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int bt_bmc_release(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ atomic_dec(&open_count);
+ set_b_busy(bt_bmc);
+ return 0;
+}
+
+static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ unsigned int mask = 0;
+ u8 ctrl;
+
+ poll_wait(file, &bt_bmc->queue, wait);
+
+ ctrl = bt_inb(bt_bmc, BT_CTRL);
+
+ if (ctrl & BT_CTRL_H2B_ATN)
+ mask |= POLLIN;
+
+ if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
+ mask |= POLLOUT;
+
+ return mask;
+}
+
+static const struct file_operations bt_bmc_fops = {
+ .owner = THIS_MODULE,
+ .open = bt_bmc_open,
+ .read = bt_bmc_read,
+ .write = bt_bmc_write,
+ .release = bt_bmc_release,
+ .poll = bt_bmc_poll,
+ .unlocked_ioctl = bt_bmc_ioctl,
+};
+
+static void poll_timer(unsigned long data)
+{
+ struct bt_bmc *bt_bmc = (void *)data;
+
+ bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
+ wake_up(&bt_bmc->queue);
+ add_timer(&bt_bmc->poll_timer);
+}
+
+static irqreturn_t bt_bmc_irq(int irq, void *arg)
+{
+ struct bt_bmc *bt_bmc = arg;
+ u32 reg;
+
+ reg = ioread32(bt_bmc->base + BT_CR2);
+ reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
+ if (!reg)
+ return IRQ_NONE;
+
+ /* ack pending IRQs */
+ iowrite32(reg, bt_bmc->base + BT_CR2);
+
+ wake_up(&bt_bmc->queue);
+ return IRQ_HANDLED;
+}
+
+static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ u32 reg;
+ int rc;
+
+ bt_bmc->irq = platform_get_irq(pdev, 0);
+ if (!bt_bmc->irq)
+ return -ENODEV;
+
+ rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
+ DEVICE_NAME, bt_bmc);
+ if (rc < 0) {
+ dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
+ bt_bmc->irq = 0;
+ return rc;
+ }
+
+ /*
+ * Configure IRQs on the bmc clearing the H2B and HBUSY bits;
+ * H2B will be asserted when the bmc has data for us; HBUSY
+ * will be cleared (along with B2H) when we can write the next
+ * message to the BT buffer
+ */
+ reg = ioread32(bt_bmc->base + BT_CR1);
+ reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
+ iowrite32(reg, bt_bmc->base + BT_CR1);
+
+ return 0;
+}
+
+static int bt_bmc_probe(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc;
+ struct device *dev;
+ struct resource *res;
+ int rc;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -ENODEV;
+
+ dev = &pdev->dev;
+ dev_info(dev, "Found bt bmc device\n");
+
+ bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL);
+ if (!bt_bmc)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, bt_bmc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bt_bmc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(bt_bmc->base))
+ return PTR_ERR(bt_bmc->base);
+
+ mutex_init(&bt_bmc->mutex);
+ init_waitqueue_head(&bt_bmc->queue);
+
+ bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR,
+ bt_bmc->miscdev.name = DEVICE_NAME,
+ bt_bmc->miscdev.fops = &bt_bmc_fops,
+ bt_bmc->miscdev.parent = dev;
+ rc = misc_register(&bt_bmc->miscdev);
+ if (rc) {
+ dev_err(dev, "Unable to register misc device\n");
+ return rc;
+ }
+
+ bt_bmc_config_irq(bt_bmc, pdev);
+
+ if (bt_bmc->irq) {
+ dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
+ } else {
+ dev_info(dev, "No IRQ; using timer\n");
+ setup_timer(&bt_bmc->poll_timer, poll_timer,
+ (unsigned long)bt_bmc);
+ bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
+ add_timer(&bt_bmc->poll_timer);
+ }
+
+ iowrite32((BT_IO_BASE << BT_CR0_IO_BASE) |
+ (BT_IRQ << BT_CR0_IRQ) |
+ BT_CR0_EN_CLR_SLV_RDP |
+ BT_CR0_EN_CLR_SLV_WRP |
+ BT_CR0_ENABLE_IBT,
+ bt_bmc->base + BT_CR0);
+
+ clr_b_busy(bt_bmc);
+
+ return 0;
+}
+
+static int bt_bmc_remove(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&bt_bmc->miscdev);
+ if (!bt_bmc->irq)
+ del_timer_sync(&bt_bmc->poll_timer);
+ return 0;
+}
+
+static const struct of_device_id bt_bmc_match[] = {
+ { .compatible = "aspeed,ast2400-bt-bmc" },
+ { },
+};
+
+static struct platform_driver bt_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = bt_bmc_match,
+ },
+ .probe = bt_bmc_probe,
+ .remove = bt_bmc_remove,
+};
+
+module_platform_driver(bt_bmc_driver);
+
+MODULE_DEVICE_TABLE(of, bt_bmc_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
+MODULE_DESCRIPTION("Linux device interface to the BT interface");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index d8619998cfb5..fcdd886819f5 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2891,11 +2891,11 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
intf->curr_channel = IPMI_MAX_CHANNELS;
}
+ rv = ipmi_bmc_register(intf, i);
+
if (rv == 0)
rv = add_proc_entries(intf, i);
- rv = ipmi_bmc_register(intf, i);
-
out:
if (rv) {
if (intf->proc_dir)
@@ -2982,8 +2982,6 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
int intf_num = intf->intf_num;
ipmi_user_t user;
- ipmi_bmc_unregister(intf);
-
mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
intf->intf_num = -1;
@@ -3007,6 +3005,7 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
mutex_unlock(&ipmi_interfaces_mutex);
remove_proc_entries(intf);
+ ipmi_bmc_unregister(intf);
/*
* Call all the watcher interfaces to tell them that
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d131e152c8ce..d6876d506220 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -479,8 +479,8 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
static void push_to_pool(struct work_struct *work);
-static __u32 input_pool_data[INPUT_POOL_WORDS];
-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 190122e64a3a..85a449cf61e3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -203,7 +203,7 @@ at91_clk_register_programmable(struct regmap *regmap,
ret = clk_hw_register(NULL, &prog->hw);
if (ret) {
kfree(prog);
- hw = &prog->hw;
+ hw = ERR_PTR(ret);
}
return hw;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index b68bf573dcfb..8c7763fd9efc 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -502,8 +502,12 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ const struct bcm2835_pll_data *data = pll->data;
u32 ndiv, fdiv;
+ rate = clamp(rate, data->min_rate, data->max_rate);
+
bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
@@ -608,13 +612,6 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
u32 ana[4];
int i;
- if (rate < data->min_rate || rate > data->max_rate) {
- dev_err(cprman->dev, "%s: rate out of spec: %lu vs (%lu, %lu)\n",
- clk_hw_get_name(hw), rate,
- data->min_rate, data->max_rate);
- return -EINVAL;
- }
-
if (rate > data->max_fb_rate) {
use_fb_prediv = true;
rate /= 2;
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index b637f5979023..eb953d3b0b69 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -216,6 +216,7 @@ static int max77686_clk_probe(struct platform_device *pdev)
return -EINVAL;
}
+ drv_data->num_clks = num_clks;
drv_data->max_clk_data = devm_kcalloc(dev, num_clks,
sizeof(*drv_data->max_clk_data),
GFP_KERNEL);
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index fe364e63f8de..c0e8e1f196aa 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -195,7 +195,7 @@ static void __init hi6220_clk_sys_init(struct device_node *np)
hi6220_clk_register_divider(hi6220_div_clks_sys,
ARRAY_SIZE(hi6220_div_clks_sys), clk_data);
}
-CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
/* clocks in media controller */
@@ -252,7 +252,7 @@ static void __init hi6220_clk_media_init(struct device_node *np)
hi6220_clk_register_divider(hi6220_div_clks_media,
ARRAY_SIZE(hi6220_div_clks_media), clk_data);
}
-CLK_OF_DECLARE(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
/* clocks in pmctrl */
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 380c372d528e..f042bd2a6a99 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -8,6 +8,7 @@ config COMMON_CLK_MEDIATEK
config COMMON_CLK_MT8135
bool "Clock driver for Mediatek MT8135"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
---help---
@@ -15,6 +16,7 @@ config COMMON_CLK_MT8135
config COMMON_CLK_MT8173
bool "Clock driver for Mediatek MT8173"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
---help---
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 45905fc0d75b..cecb0fdfaef6 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -305,7 +305,7 @@ static const struct of_device_id armada_3700_periph_clock_of_match[] = {
};
static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
void __iomem *reg, spinlock_t *lock,
- struct device *dev, struct clk_hw *hw)
+ struct device *dev, struct clk_hw **hw)
{
const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
*rate_ops = NULL;
@@ -329,6 +329,7 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
gate->lock = lock;
gate_ops = gate_hw->init->ops;
gate->reg = reg + (u64)gate->reg;
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
}
if (data->rate_hw) {
@@ -353,13 +354,13 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
}
}
- hw = clk_hw_register_composite(dev, data->name, data->parent_names,
+ *hw = clk_hw_register_composite(dev, data->name, data->parent_names,
data->num_parents, mux_hw,
mux_ops, rate_hw, rate_ops,
gate_hw, gate_ops, CLK_IGNORE_UNUSED);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(*hw))
+ return PTR_ERR(*hw);
return 0;
}
@@ -400,7 +401,7 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
spin_lock_init(&driver_data->lock);
for (i = 0; i < num_periph; i++) {
- struct clk_hw *hw = driver_data->hw_data->hws[i];
+ struct clk_hw **hw = &driver_data->hw_data->hws[i];
if (armada_3700_add_composite_clk(&data[i], reg,
&driver_data->lock, dev, hw))
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 51d152f735cc..17e68a724945 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -106,6 +106,7 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match);
static void exynos_audss_clk_teardown(void)
{
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index 5ffb898d0839..26c53f7963a4 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -79,7 +79,7 @@ static int uniphier_clk_probe(struct platform_device *pdev)
hw_data->num = clk_num;
/* avoid returning NULL for unused idx */
- for (; clk_num >= 0; clk_num--)
+ while (--clk_num >= 0)
hw_data->hws[clk_num] = ERR_PTR(-EINVAL);
for (p = data; p->name; p++) {
@@ -111,6 +111,10 @@ static int uniphier_clk_remove(struct platform_device *pdev)
static const struct of_device_id uniphier_clk_match[] = {
/* System clock */
{
+ .compatible = "socionext,uniphier-sld3-clock",
+ .data = uniphier_sld3_sys_clk_data,
+ },
+ {
.compatible = "socionext,uniphier-ld4-clock",
.data = uniphier_ld4_sys_clk_data,
},
@@ -138,7 +142,7 @@ static const struct of_device_id uniphier_clk_match[] = {
.compatible = "socionext,uniphier-ld20-clock",
.data = uniphier_ld20_sys_clk_data,
},
- /* Media I/O clock */
+ /* Media I/O clock, SD clock */
{
.compatible = "socionext,uniphier-sld3-mio-clock",
.data = uniphier_sld3_mio_clk_data,
@@ -156,20 +160,20 @@ static const struct of_device_id uniphier_clk_match[] = {
.data = uniphier_sld3_mio_clk_data,
},
{
- .compatible = "socionext,uniphier-pro5-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-pro5-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
{
- .compatible = "socionext,uniphier-pxs2-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-pxs2-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
{
.compatible = "socionext,uniphier-ld11-mio-clock",
.data = uniphier_sld3_mio_clk_data,
},
{
- .compatible = "socionext,uniphier-ld20-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-ld20-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
/* Peripheral clock */
{
diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c
index 6aa7ec768d0b..218d20f099ce 100644
--- a/drivers/clk/uniphier/clk-uniphier-mio.c
+++ b/drivers/clk/uniphier/clk-uniphier-mio.c
@@ -93,7 +93,7 @@ const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = {
{ /* sentinel */ }
};
-const struct uniphier_clk_data uniphier_pro5_mio_clk_data[] = {
+const struct uniphier_clk_data uniphier_pro5_sd_clk_data[] = {
UNIPHIER_MIO_CLK_SD_FIXED,
UNIPHIER_MIO_CLK_SD(0, 0),
UNIPHIER_MIO_CLK_SD(1, 1),
diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
index 15a2f2cbe0d9..2c243a894f3b 100644
--- a/drivers/clk/uniphier/clk-uniphier-mux.c
+++ b/drivers/clk/uniphier/clk-uniphier-mux.c
@@ -42,7 +42,7 @@ static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
int num_parents = clk_hw_get_num_parents(hw);
int ret;
- u32 val;
+ unsigned int val;
u8 i;
ret = regmap_read(mux->regmap, mux->reg, &val);
diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h
index 3ae184062388..0244dba1f4cf 100644
--- a/drivers/clk/uniphier/clk-uniphier.h
+++ b/drivers/clk/uniphier/clk-uniphier.h
@@ -115,7 +115,7 @@ extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[];
-extern const struct uniphier_clk_data uniphier_pro5_mio_clk_data[];
+extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[];
extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[];
extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[];
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 245190839359..e2c6e43cf8ca 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -417,6 +417,16 @@ config SYS_SUPPORTS_SH_TMU
config SYS_SUPPORTS_EM_STI
bool
+config CLKSRC_JCORE_PIT
+ bool "J-Core PIT timer driver" if COMPILE_TEST
+ depends on OF
+ depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ This enables build of clocksource and clockevent driver for
+ the integrated PIT in the J-Core synthesizable, open source SoC.
+
config SH_TIMER_CMT
bool "Renesas CMT timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index fd9d6df0bbc0..cf87f407f1ad 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
+obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
new file mode 100644
index 000000000000..54e1665aa03c
--- /dev/null
+++ b/drivers/clocksource/jcore-pit.c
@@ -0,0 +1,249 @@
+/*
+ * J-Core SoC PIT/clocksource driver
+ *
+ * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define PIT_IRQ_SHIFT 12
+#define PIT_PRIO_SHIFT 20
+#define PIT_ENABLE_SHIFT 26
+#define PIT_PRIO_MASK 0xf
+
+#define REG_PITEN 0x00
+#define REG_THROT 0x10
+#define REG_COUNT 0x14
+#define REG_BUSPD 0x18
+#define REG_SECHI 0x20
+#define REG_SECLO 0x24
+#define REG_NSEC 0x28
+
+struct jcore_pit {
+ struct clock_event_device ced;
+ void __iomem *base;
+ unsigned long periodic_delta;
+ u32 enable_val;
+};
+
+static void __iomem *jcore_pit_base;
+static struct jcore_pit __percpu *jcore_pit_percpu;
+
+static notrace u64 jcore_sched_clock_read(void)
+{
+ u32 seclo, nsec, seclo0;
+ __iomem void *base = jcore_pit_base;
+
+ seclo = readl(base + REG_SECLO);
+ do {
+ seclo0 = seclo;
+ nsec = readl(base + REG_NSEC);
+ seclo = readl(base + REG_SECLO);
+ } while (seclo0 != seclo);
+
+ return seclo * NSEC_PER_SEC + nsec;
+}
+
+static cycle_t jcore_clocksource_read(struct clocksource *cs)
+{
+ return jcore_sched_clock_read();
+}
+
+static int jcore_pit_disable(struct jcore_pit *pit)
+{
+ writel(0, pit->base + REG_PITEN);
+ return 0;
+}
+
+static int jcore_pit_set(unsigned long delta, struct jcore_pit *pit)
+{
+ jcore_pit_disable(pit);
+ writel(delta, pit->base + REG_THROT);
+ writel(pit->enable_val, pit->base + REG_PITEN);
+ return 0;
+}
+
+static int jcore_pit_set_state_shutdown(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_oneshot(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_periodic(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_set(pit->periodic_delta, pit);
+}
+
+static int jcore_pit_set_next_event(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_set(delta, pit);
+}
+
+static int jcore_pit_local_init(unsigned cpu)
+{
+ struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
+ unsigned buspd, freq;
+
+ pr_info("Local J-Core PIT init on cpu %u\n", cpu);
+
+ buspd = readl(pit->base + REG_BUSPD);
+ freq = DIV_ROUND_CLOSEST(NSEC_PER_SEC, buspd);
+ pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
+
+ clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
+
+ return 0;
+}
+
+static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id)
+{
+ struct jcore_pit *pit = this_cpu_ptr(dev_id);
+
+ if (clockevent_state_oneshot(&pit->ced))
+ jcore_pit_disable(pit);
+
+ pit->ced.event_handler(&pit->ced);
+
+ return IRQ_HANDLED;
+}
+
+static int __init jcore_pit_init(struct device_node *node)
+{
+ int err;
+ unsigned pit_irq, cpu;
+ unsigned long hwirq;
+ u32 irqprio, enable_val;
+
+ jcore_pit_base = of_iomap(node, 0);
+ if (!jcore_pit_base) {
+ pr_err("Error: Cannot map base address for J-Core PIT\n");
+ return -ENXIO;
+ }
+
+ pit_irq = irq_of_parse_and_map(node, 0);
+ if (!pit_irq) {
+ pr_err("Error: J-Core PIT has no IRQ\n");
+ return -ENXIO;
+ }
+
+ pr_info("Initializing J-Core PIT at %p IRQ %d\n",
+ jcore_pit_base, pit_irq);
+
+ err = clocksource_mmio_init(jcore_pit_base, "jcore_pit_cs",
+ NSEC_PER_SEC, 400, 32,
+ jcore_clocksource_read);
+ if (err) {
+ pr_err("Error registering clocksource device: %d\n", err);
+ return err;
+ }
+
+ sched_clock_register(jcore_sched_clock_read, 32, NSEC_PER_SEC);
+
+ jcore_pit_percpu = alloc_percpu(struct jcore_pit);
+ if (!jcore_pit_percpu) {
+ pr_err("Failed to allocate memory for clock event device\n");
+ return -ENOMEM;
+ }
+
+ err = request_irq(pit_irq, jcore_timer_interrupt,
+ IRQF_TIMER | IRQF_PERCPU,
+ "jcore_pit", jcore_pit_percpu);
+ if (err) {
+ pr_err("pit irq request failed: %d\n", err);
+ free_percpu(jcore_pit_percpu);
+ return err;
+ }
+
+ /*
+ * The J-Core PIT is not hard-wired to a particular IRQ, but
+ * integrated with the interrupt controller such that the IRQ it
+ * generates is programmable, as follows:
+ *
+ * The bit layout of the PIT enable register is:
+ *
+ * .....e..ppppiiiiiiii............
+ *
+ * where the .'s indicate unrelated/unused bits, e is enable,
+ * p is priority, and i is hard irq number.
+ *
+ * For the PIT included in AIC1 (obsolete but still in use),
+ * any hard irq (trap number) can be programmed via the 8
+ * iiiiiiii bits, and a priority (0-15) is programmable
+ * separately in the pppp bits.
+ *
+ * For the PIT included in AIC2 (current), the programming
+ * interface is equivalent modulo interrupt mapping. This is
+ * why a different compatible tag was not used. However only
+ * traps 64-127 (the ones actually intended to be used for
+ * interrupts, rather than syscalls/exceptions/etc.) can be
+ * programmed (the high 2 bits of i are ignored) and the
+ * priority pppp is <<2'd and or'd onto the irq number. This
+ * choice seems to have been made on the hardware engineering
+ * side under an assumption that preserving old AIC1 priority
+ * mappings was important. Future models will likely ignore
+ * the pppp field.
+ */
+ hwirq = irq_get_irq_data(pit_irq)->hwirq;
+ irqprio = (hwirq >> 2) & PIT_PRIO_MASK;
+ enable_val = (1U << PIT_ENABLE_SHIFT)
+ | (hwirq << PIT_IRQ_SHIFT)
+ | (irqprio << PIT_PRIO_SHIFT);
+
+ for_each_present_cpu(cpu) {
+ struct jcore_pit *pit = per_cpu_ptr(jcore_pit_percpu, cpu);
+
+ pit->base = of_iomap(node, cpu);
+ if (!pit->base) {
+ pr_err("Unable to map PIT for cpu %u\n", cpu);
+ continue;
+ }
+
+ pit->ced.name = "jcore_pit";
+ pit->ced.features = CLOCK_EVT_FEAT_PERIODIC
+ | CLOCK_EVT_FEAT_ONESHOT
+ | CLOCK_EVT_FEAT_PERCPU;
+ pit->ced.cpumask = cpumask_of(cpu);
+ pit->ced.rating = 400;
+ pit->ced.irq = pit_irq;
+ pit->ced.set_state_shutdown = jcore_pit_set_state_shutdown;
+ pit->ced.set_state_periodic = jcore_pit_set_state_periodic;
+ pit->ced.set_state_oneshot = jcore_pit_set_state_oneshot;
+ pit->ced.set_next_event = jcore_pit_set_next_event;
+
+ pit->enable_val = enable_val;
+ }
+
+ cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
+ "AP_JCORE_TIMER_STARTING",
+ jcore_pit_local_init, NULL);
+
+ return 0;
+}
+
+CLOCKSOURCE_OF_DECLARE(jcore_pit, "jcore,pit", jcore_pit_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index c184eb84101e..4f87f3e76d83 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -152,6 +152,13 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
+{
+ struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
+
+ return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
+}
+
static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -210,8 +217,13 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
base + TIMER_CTL_REG(1));
- ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name,
- rate, 340, 32, clocksource_mmio_readl_down);
+ cs->clksrc.name = node->name;
+ cs->clksrc.rating = 340;
+ cs->clksrc.read = sun5i_clksrc_read;
+ cs->clksrc.mask = CLOCKSOURCE_MASK(32);
+ cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
pr_err("Couldn't register clock source.\n");
goto err_remove_notifier;
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 1b2f28f69a81..4852d9efe74e 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -80,11 +80,17 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
{
struct cppc_cpudata *cpu;
struct cpufreq_freqs freqs;
+ u32 desired_perf;
int ret = 0;
cpu = all_cpu_data[policy->cpu];
- cpu->perf_ctrls.desired_perf = (u64)target_freq * policy->max / cppc_dmi_max_khz;
+ desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz;
+ /* Return if it is exactly the same perf */
+ if (desired_perf == cpu->perf_ctrls.desired_perf)
+ return ret;
+
+ cpu->perf_ctrls.desired_perf = desired_perf;
freqs.old = policy->cur;
freqs.new = target_freq;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 18da4f8051d3..13475890d792 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -17,6 +17,7 @@
struct cs_policy_dbs_info {
struct policy_dbs_info policy_dbs;
unsigned int down_skip;
+ unsigned int requested_freq;
};
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
@@ -61,6 +62,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ unsigned int requested_freq = dbs_info->requested_freq;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int load = dbs_update(policy);
@@ -72,10 +74,16 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
if (cs_tuners->freq_step == 0)
goto out;
+ /*
+ * If requested_freq is out of range, it is likely that the limits
+ * changed in the meantime, so fall back to current frequency in that
+ * case.
+ */
+ if (requested_freq > policy->max || requested_freq < policy->min)
+ requested_freq = policy->cur;
+
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
- unsigned int requested_freq = policy->cur;
-
dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
@@ -83,8 +91,11 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
goto out;
requested_freq += get_freq_target(cs_tuners, policy);
+ if (requested_freq > policy->max)
+ requested_freq = policy->max;
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
+ dbs_info->requested_freq = requested_freq;
goto out;
}
@@ -95,7 +106,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
- unsigned int freq_target, requested_freq = policy->cur;
+ unsigned int freq_target;
/*
* if we cannot reduce the frequency anymore, break out early
*/
@@ -109,6 +120,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
requested_freq = policy->min;
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
+ dbs_info->requested_freq = requested_freq;
}
out:
@@ -287,6 +299,7 @@ static void cs_start(struct cpufreq_policy *policy)
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
dbs_info->down_skip = 0;
+ dbs_info->requested_freq = policy->cur;
}
static struct dbs_governor cs_governor = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 806f2039571e..4737520ec823 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -179,6 +179,7 @@ struct _pid {
/**
* struct cpudata - Per CPU instance data storage
* @cpu: CPU number for this instance data
+ * @policy: CPUFreq policy value
* @update_util: CPUFreq utility callback information
* @update_util_set: CPUFreq utility callback is set
* @iowait_boost: iowait-related boost fraction
@@ -201,6 +202,7 @@ struct _pid {
struct cpudata {
int cpu;
+ unsigned int policy;
struct update_util_data update_util;
bool update_util_set;
@@ -225,7 +227,7 @@ struct cpudata {
static struct cpudata **all_cpu_data;
/**
- * struct pid_adjust_policy - Stores static PID configuration data
+ * struct pstate_adjust_policy - Stores static PID configuration data
* @sample_rate_ms: PID calculation sample rate in ms
* @sample_rate_ns: Sample rate calculation in ns
* @deadband: PID deadband
@@ -562,12 +564,12 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
int min, hw_min, max, hw_max, cpu, range, adj_range;
u64 value, cap;
- rdmsrl(MSR_HWP_CAPABILITIES, cap);
- hw_min = HWP_LOWEST_PERF(cap);
- hw_max = HWP_HIGHEST_PERF(cap);
- range = hw_max - hw_min;
-
for_each_cpu(cpu, cpumask) {
+ rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+ hw_min = HWP_LOWEST_PERF(cap);
+ hw_max = HWP_HIGHEST_PERF(cap);
+ range = hw_max - hw_min;
+
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
adj_range = limits->min_perf_pct * range / 100;
min = hw_min + adj_range;
@@ -1142,10 +1144,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
-static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
- int pstate = cpu->pstate.min_pstate;
-
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
cpu->pstate.current_pstate = pstate;
/*
@@ -1157,6 +1157,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
pstate_funcs.get_val(cpu, pstate));
}
+static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+{
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+}
+
+static void intel_pstate_max_within_limits(struct cpudata *cpu)
+{
+ int min_pstate, max_pstate;
+
+ update_turbo_state();
+ intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
+ intel_pstate_set_pstate(cpu, max_pstate);
+}
+
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -1232,6 +1246,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
int32_t busy_frac, boost;
+ int target, avg_pstate;
busy_frac = div_fp(sample->mperf, sample->tsc);
@@ -1242,7 +1257,26 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
busy_frac = boost;
sample->busy_scaled = busy_frac * 100;
- return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled);
+
+ target = limits->no_turbo || limits->turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+ target += target >> 2;
+ target = mul_fp(target, busy_frac);
+ if (target < cpu->pstate.min_pstate)
+ target = cpu->pstate.min_pstate;
+
+ /*
+ * If the average P-state during the previous cycle was higher than the
+ * current target, add 50% of the difference to the target to reduce
+ * possible performance oscillations and offset possible performance
+ * loss related to moving the workload from one CPU to another within
+ * a package/module.
+ */
+ avg_pstate = get_avg_pstate(cpu);
+ if (avg_pstate > target)
+ target += (avg_pstate - target) >> 1;
+
+ return target;
}
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
@@ -1251,10 +1285,11 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
u64 duration_ns;
/*
- * perf_scaled is the average performance during the last sampling
- * period scaled by the ratio of the maximum P-state to the P-state
- * requested last time (in percent). That measures the system's
- * response to the previous P-state selection.
+ * perf_scaled is the ratio of the average P-state during the last
+ * sampling period to the P-state requested last time (in percent).
+ *
+ * That measures the system's response to the previous P-state
+ * selection.
*/
max_pstate = cpu->pstate.max_pstate_physical;
current_pstate = cpu->pstate.current_pstate;
@@ -1304,7 +1339,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
from = cpu->pstate.current_pstate;
- target_pstate = pstate_funcs.get_target_pstate(cpu);
+ target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
+ cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
intel_pstate_update_pstate(cpu, target_pstate);
@@ -1470,7 +1506,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
policy->cpuinfo.max_freq, policy->max);
- cpu = all_cpu_data[0];
+ cpu = all_cpu_data[policy->cpu];
+ cpu->policy = policy->policy;
+
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
@@ -1478,7 +1516,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
policy->max = policy->cpuinfo.max_freq;
}
- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
limits = &performance_limits;
if (policy->max >= policy->cpuinfo.max_freq) {
pr_debug("set performance\n");
@@ -1514,6 +1552,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
out:
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ /*
+ * NOHZ_FULL CPUs need this as the governor callback may not
+ * be invoked on them.
+ */
+ intel_pstate_clear_update_util_hook(policy->cpu);
+ intel_pstate_max_within_limits(cpu);
+ }
+
intel_pstate_set_update_util_hook(policy->cpu);
intel_pstate_hwp_set_policy(policy);
diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips
index 4102be01d06a..512ee37b374b 100644
--- a/drivers/cpuidle/Kconfig.mips
+++ b/drivers/cpuidle/Kconfig.mips
@@ -5,7 +5,7 @@ config MIPS_CPS_CPUIDLE
bool "CPU Idle driver for MIPS CPS platforms"
depends on CPU_IDLE && MIPS_CPS
depends on SYS_SUPPORTS_MIPS_CPS
- select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT || CPU_MIPSR6
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select MIPS_CPS_PM
default y
diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c
index 1adb6980b707..926ba9871c62 100644
--- a/drivers/cpuidle/cpuidle-cps.c
+++ b/drivers/cpuidle/cpuidle-cps.c
@@ -163,7 +163,7 @@ static int __init cps_cpuidle_init(void)
core = cpu_data[cpu].core;
device = &per_cpu(cpuidle_dev, cpu);
device->cpu = cpu;
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
#endif
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index daadd20aa936..3e2ab3b14eea 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -14,7 +14,7 @@ if DEV_DAX
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
- depends on NVDIMM_DAX
+ depends on LIBNVDIMM && NVDIMM_DAX
default DEV_DAX
help
Support raw access to persistent memory. Note that this
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 9630d8837ba9..4a15fa5df98b 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -44,7 +44,6 @@ static void dax_pmem_percpu_exit(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_exit(ref);
- wait_for_completion(&dax_pmem->cmp);
}
static void dax_pmem_percpu_kill(void *data)
@@ -54,6 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_kill(ref);
+ wait_for_completion(&dax_pmem->cmp);
}
static int dax_pmem_probe(struct device *dev)
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 478006b7764a..bf3ea7603a58 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -137,6 +137,10 @@ static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
cur_time = jiffies;
+ /* Immediately exit if previous_freq is not initialized yet. */
+ if (!devfreq->previous_freq)
+ goto out;
+
prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
if (prev_lev < 0) {
ret = prev_lev;
@@ -594,17 +598,19 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (devfreq->governor)
err = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_START, NULL);
- mutex_unlock(&devfreq_list_lock);
if (err) {
dev_err(dev, "%s: Unable to start governor for the device\n",
__func__);
goto err_init;
}
+ mutex_unlock(&devfreq_list_lock);
return devfreq;
err_init:
list_del(&devfreq->node);
+ mutex_unlock(&devfreq_list_lock);
+
device_unregister(&devfreq->dev);
err_out:
return ERR_PTR(err);
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index 0fdae8608961..cd949800eed9 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -17,6 +17,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP
tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
+ select REGMAP_MMIO
help
This add the devfreq-event driver for Exynos SoC. It provides NoC
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index a5841403bde8..49e712aca0c1 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -176,9 +176,6 @@ static int exynos_nocp_get_event(struct devfreq_event_dev *edev,
return 0;
out:
- edata->load_count = 0;
- edata->total_count = 0;
-
dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
return ret;
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index ca957a5f4291..b8cde096a808 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -51,7 +51,7 @@ static void qcom_usb_extcon_detect_cable(struct work_struct *work)
if (ret)
return;
- extcon_set_state(info->edev, EXTCON_USB_HOST, !id);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !id);
}
static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id)
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 631c977b0da5..180f0a96528c 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -566,6 +566,11 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
PCILYNX_MAX_REGISTER);
+ if (lynx->registers == NULL) {
+ dev_err(&dev->dev, "Failed to map registers\n");
+ ret = -ENOMEM;
+ goto fail_deallocate_lynx;
+ }
lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
@@ -578,7 +583,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
lynx->rcv_buffer == NULL) {
dev_err(&dev->dev, "Failed to allocate receive buffer\n");
ret = -ENOMEM;
- goto fail_deallocate;
+ goto fail_deallocate_buffers;
}
lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
@@ -641,7 +646,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
dev_err(&dev->dev,
"Failed to allocate shared interrupt %d\n", dev->irq);
ret = -EIO;
- goto fail_deallocate;
+ goto fail_deallocate_buffers;
}
lynx->misc.parent = &dev->dev;
@@ -668,7 +673,7 @@ fail_free_irq:
reg_write(lynx, PCI_INT_ENABLE, 0);
free_irq(lynx->pci_device->irq, lynx);
-fail_deallocate:
+fail_deallocate_buffers:
if (lynx->rcv_start_pcl)
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
@@ -679,6 +684,8 @@ fail_deallocate:
pci_free_consistent(lynx->pci_device, PAGE_SIZE,
lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers);
+
+fail_deallocate_lynx:
kfree(lynx);
fail_disable:
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c06945160a41..5e23e2d305e7 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-mno-mmx -mno-sse
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
-cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
+cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \
-fno-builtin -fpic -mno-single-pic-base
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
@@ -79,5 +79,6 @@ quiet_cmd_stubcopy = STUBCPY $@
# decompressor. So move our .data to .data.efistub, which is preserved
# explicitly by the decompressor linker script.
#
-STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
+STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
+ -R ___ksymtab+sort -R ___kcrctab+sort
STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 26ee00f6bd58..d011cb89d25e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -284,7 +284,7 @@ config GPIO_MM_LANTIQ
config GPIO_MOCKUP
tristate "GPIO Testing Driver"
- depends on GPIOLIB
+ depends on GPIOLIB && SYSFS
select GPIO_SYSFS
help
This enables GPIO Testing driver, which provides a way to test GPIO
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 9457e2022bf6..dc37dbe4b46d 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -219,6 +219,7 @@ static const struct of_device_id ath79_gpio_of_match[] = {
{ .compatible = "qca,ar9340-gpio" },
{},
};
+MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
static int ath79_gpio_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 425501c39527..793518a30afe 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -239,7 +239,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, h->host_data);
- irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq);
return 0;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b9daa0bf32a4..ee1724806f46 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -308,8 +308,10 @@ static int mxs_gpio_probe(struct platform_device *pdev)
writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
- if (irq_base < 0)
- return irq_base;
+ if (irq_base < 0) {
+ err = irq_base;
+ goto out_iounmap;
+ }
port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
&irq_domain_simple_ops, NULL);
@@ -349,6 +351,8 @@ out_irqdomain_remove:
irq_domain_remove(port->domain);
out_irqdesc_free:
irq_free_descs(irq_base, 32);
+out_iounmap:
+ iounmap(port->base);
return err;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 45c8817d068c..e422568e14ad 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -794,6 +794,22 @@ static int pca953x_probe(struct i2c_client *client,
}
mutex_init(&chip->i2c_lock);
+ /*
+ * In case we have an i2c-mux controlled by a GPIO provided by an
+ * expander using the same driver higher on the device tree, read the
+ * i2c adapter nesting depth and use the retrieved value as lockdep
+ * subclass for chip->i2c_lock.
+ *
+ * REVISIT: This solution is not complete. It protects us from lockdep
+ * false positives when the expander controlling the i2c-mux is on
+ * a different level on the device tree, but not when it's on the same
+ * level on a different branch (in which case the subclass number
+ * would be the same).
+ *
+ * TODO: Once a correct solution is developed, a similar fix should be
+ * applied to all other i2c-controlled GPIO expanders (and potentially
+ * regmap-i2c).
+ */
lockdep_set_subclass(&chip->i2c_lock,
i2c_adapter_depth(client->adapter));
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index e7d422a6b90b..5b0042776ec7 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -409,7 +409,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
* 801/1801/1600, bits are cleared when read.
* Edge detect register is not present on 801/1600/1801
*/
- if (stmpe->partnum != STMPE801 || stmpe->partnum != STMPE1600 ||
+ if (stmpe->partnum != STMPE801 && stmpe->partnum != STMPE1600 &&
stmpe->partnum != STMPE1801) {
stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
stmpe_reg_write(stmpe,
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 99256115bea5..c2a80b4cbf32 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -66,6 +66,7 @@ static const struct of_device_id ts4800_gpio_of_match[] = {
{ .compatible = "technologic,ts4800-gpio", },
{},
};
+MODULE_DEVICE_TABLE(of, ts4800_gpio_of_match);
static struct platform_driver ts4800_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 58ece201b8e6..72a4b326fd0d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -653,14 +653,17 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
int idx, i;
unsigned int irq_flags;
+ int ret = -ENOENT;
for (i = 0, idx = 0; idx <= index; i++) {
struct acpi_gpio_info info;
struct gpio_desc *desc;
desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
- if (IS_ERR(desc))
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
break;
+ }
if (info.gpioint && idx++ == index) {
int irq = gpiod_to_irq(desc);
@@ -679,7 +682,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
}
}
- return -ENOENT;
+ return ret;
}
EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f0fc3a0d37c8..20e09b7c2de3 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -333,6 +333,13 @@ struct linehandle_state {
u32 numdescs;
};
+#define GPIOHANDLE_REQUEST_VALID_FLAGS \
+ (GPIOHANDLE_REQUEST_INPUT | \
+ GPIOHANDLE_REQUEST_OUTPUT | \
+ GPIOHANDLE_REQUEST_ACTIVE_LOW | \
+ GPIOHANDLE_REQUEST_OPEN_DRAIN | \
+ GPIOHANDLE_REQUEST_OPEN_SOURCE)
+
static long linehandle_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
@@ -344,6 +351,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
/* TODO: check if descriptors are really input */
for (i = 0; i < lh->numdescs; i++) {
val = gpiod_get_value_cansleep(lh->descs[i]);
@@ -444,6 +453,17 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
u32 lflags = handlereq.flags;
struct gpio_desc *desc;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
desc = &gdev->descs[offset];
ret = gpiod_request(desc, lh->label);
if (ret)
@@ -536,6 +556,10 @@ struct lineevent_state {
struct mutex read_lock;
};
+#define GPIOEVENT_REQUEST_VALID_FLAGS \
+ (GPIOEVENT_REQUEST_RISING_EDGE | \
+ GPIOEVENT_REQUEST_FALLING_EDGE)
+
static unsigned int lineevent_poll(struct file *filep,
struct poll_table_struct *wait)
{
@@ -623,6 +647,8 @@ static long lineevent_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
val = gpiod_get_value_cansleep(le->desc);
if (val < 0)
return val;
@@ -726,6 +752,18 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
lflags = eventreq.handleflags;
eflags = eventreq.eventflags;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
+ (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
/* This is just wrong: we don't look for events on output lines */
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
ret = -EINVAL;
@@ -823,6 +861,8 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
struct gpiochip_info chipinfo;
+ memset(&chipinfo, 0, sizeof(chipinfo));
+
strncpy(chipinfo.name, dev_name(&gdev->dev),
sizeof(chipinfo.name));
chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
@@ -839,7 +879,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- if (lineinfo.line_offset > gdev->ngpio)
+ if (lineinfo.line_offset >= gdev->ngpio)
return -EINVAL;
desc = &gdev->descs[lineinfo.line_offset];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 2e3a0543760d..e3281d4e3e41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -765,7 +765,7 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
return ret;
}
-static void amdgpu_connector_destroy(struct drm_connector *connector)
+static void amdgpu_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
@@ -773,6 +773,12 @@ static void amdgpu_connector_destroy(struct drm_connector *connector)
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
amdgpu_connector->ddc_bus->has_aux = false;
}
+}
+
+static void amdgpu_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
amdgpu_connector_free_edid(connector);
kfree(amdgpu_connector->con_priv);
drm_connector_unregister(connector);
@@ -826,6 +832,7 @@ static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_lcd_property,
};
@@ -936,6 +943,7 @@ static const struct drm_connector_funcs amdgpu_connector_vga_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_property,
};
@@ -1203,6 +1211,7 @@ static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = {
.detect = amdgpu_connector_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
@@ -1493,6 +1502,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
@@ -1502,6 +1512,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_lcd_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e203e5561107..a5e2fcbef0f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
}
+
+ ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7dbe85d67d26..b4f4a9239069 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1408,16 +1408,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
- adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
- continue;
- /* enable clockgating to save power */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
- return r;
- }
if (adev->ip_blocks[i].funcs->late_init) {
r = adev->ip_blocks[i].funcs->late_init((void *)adev);
if (r) {
@@ -1426,6 +1416,18 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
}
adev->ip_block_status[i].late_initialized = true;
}
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+ /* enable clockgating to save power */
+ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_GATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ return r;
+ }
+ }
}
return 0;
@@ -1435,6 +1437,30 @@ static int amdgpu_fini(struct amdgpu_device *adev)
{
int i, r;
+ /* need to disable SMC first */
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].hw)
+ continue;
+ if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+ /* ungate blocks before hw fini so that we can shutdown the blocks safely */
+ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ return r;
+ }
+ r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ }
+ adev->ip_block_status[i].hw = false;
+ break;
+ }
+ }
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].hw)
continue;
@@ -2073,7 +2099,8 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
if (!adev->ip_block_status[i].valid)
continue;
if (adev->ip_blocks[i].funcs->check_soft_reset)
- adev->ip_blocks[i].funcs->check_soft_reset(adev);
+ adev->ip_block_status[i].hang =
+ adev->ip_blocks[i].funcs->check_soft_reset(adev);
if (adev->ip_block_status[i].hang) {
DRM_INFO("IP block:%d is hang!\n", i);
asic_hang = true;
@@ -2102,12 +2129,20 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
{
- if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
- DRM_INFO("Some block need full reset!\n");
- return true;
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
+ if (adev->ip_block_status[i].hang) {
+ DRM_INFO("Some block need full reset!\n");
+ return true;
+ }
+ }
}
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index fe36caf1b7d7..14f57d9915e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
printk("\n");
}
+
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ vblank_in_pixels =
+ amdgpu_crtc->hw_mode.crtc_htotal *
+ (amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ (amdgpu_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index aa074fac0c7f..f3efb1c5dae9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -754,6 +754,10 @@ static const char *amdgpu_vram_names[] = {
int amdgpu_bo_init(struct amdgpu_device *adev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(adev->mc.aper_base,
+ adev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
adev->mc.aper_size);
@@ -769,6 +773,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
arch_phys_wc_del(adev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
}
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e1fa8731d1e2..3cb5e903cd62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -345,8 +345,8 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
ent = debugfs_create_file(name,
S_IFREG | S_IRUGO, root,
ring, &amdgpu_debugfs_ring_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ if (!ent)
+ return -ENOMEM;
i_size_write(ent->d_inode, ring->ring_size + 12);
ring->ent = ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 887483b8b818..dcaf691f56b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -555,10 +555,13 @@ struct amdgpu_ttm_tt {
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+ unsigned int flags = 0;
unsigned pinned = 0;
int r;
+ if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+ flags |= FOLL_WRITE;
+
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only use anonymous memory
to prevent problems with writeback */
@@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
list_add(&guptask.list, &gtt->guptasks);
spin_unlock(&gtt->guptasklock);
- r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+ r = get_user_pages(userptr, num_pages, flags, p, NULL);
spin_lock(&gtt->guptasklock);
list_del(&guptask.list);
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index f80a0834e889..3c082e143730 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1514,14 +1514,16 @@ static int cz_dpm_set_powergating_state(void *handle,
return 0;
}
-/* borrowed from KV, need future unify */
static int cz_dpm_get_temperature(struct amdgpu_device *adev)
{
int actual_temp = 0;
- uint32_t temp = RREG32_SMC(0xC0300E0C);
+ uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP);
+ uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
- if (temp)
+ if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
actual_temp = 1000 * ((temp / 8) - 49);
+ else
+ actual_temp = 1000 * (temp / 8);
return actual_temp;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 613ebb7ed50f..4108c686aa7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3188,16 +3188,11 @@ static int dce_v10_0_wait_for_idle(void *handle)
return 0;
}
-static int dce_v10_0_check_soft_reset(void *handle)
+static bool dce_v10_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (dce_v10_0_is_display_hung(adev))
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
- else
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
-
- return 0;
+ return dce_v10_0_is_display_hung(adev);
}
static int dce_v10_0_soft_reset(void *handle)
@@ -3205,9 +3200,6 @@ static int dce_v10_0_soft_reset(void *handle)
u32 srbm_soft_reset = 0, tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
- return 0;
-
if (dce_v10_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6c6ff57b1c95..ee6a48a09214 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4087,14 +4087,21 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
+ u32 tmp;
gfx_v8_0_rlc_stop(adev);
/* disable CG */
- WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
+ tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
+ RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+ WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10)
- WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
+ adev->asic_type == CHIP_POLARIS10) {
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
+ tmp &= ~0x3;
+ WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
+ }
/* disable PG */
WREG32(mmRLC_PG_CNTL, 0);
@@ -5137,7 +5144,7 @@ static int gfx_v8_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v8_0_check_soft_reset(void *handle)
+static bool gfx_v8_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -5189,16 +5196,14 @@ static int gfx_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
if (grbm_soft_reset || srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
adev->gfx.grbm_soft_reset = grbm_soft_reset;
adev->gfx.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
adev->gfx.grbm_soft_reset = 0;
adev->gfx.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
@@ -5226,7 +5231,8 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5264,7 +5270,8 @@ static int gfx_v8_0_soft_reset(void *handle)
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5334,7 +5341,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1b319f5bc696..c22ef140a542 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1099,7 +1099,7 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
-static int gmc_v8_0_check_soft_reset(void *handle)
+static bool gmc_v8_0_check_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1116,20 +1116,19 @@ static int gmc_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
adev->mc.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
adev->mc.srbm_soft_reset = 0;
+ return false;
}
- return 0;
}
static int gmc_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_stop(adev, &adev->mc.save);
@@ -1145,7 +1144,7 @@ static int gmc_v8_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->mc.srbm_soft_reset;
@@ -1175,7 +1174,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_resume(adev, &adev->mc.save);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index f325fd86430b..a9d10941fb53 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1268,7 +1268,7 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v3_0_check_soft_reset(void *handle)
+static bool sdma_v3_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -1281,14 +1281,12 @@ static int sdma_v3_0_check_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
adev->sdma.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
adev->sdma.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static int sdma_v3_0_pre_soft_reset(void *handle)
@@ -1296,7 +1294,7 @@ static int sdma_v3_0_pre_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1315,7 +1313,7 @@ static int sdma_v3_0_post_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1335,7 +1333,7 @@ static int sdma_v3_0_soft_reset(void *handle)
u32 srbm_soft_reset = 0;
u32 tmp;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 8bd08925b370..3de7bca5854b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3499,6 +3499,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
max_sclk = 75000;
max_mclk = 80000;
}
+ /* Limit clocks for some HD8600 parts */
+ if (adev->pdev->device == 0x6660 &&
+ adev->pdev->revision == 0x83) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index d127d59f953a..b4ea229bb449 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -373,7 +373,7 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int tonga_ih_check_soft_reset(void *handle)
+static bool tonga_ih_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -384,21 +384,19 @@ static int tonga_ih_check_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
adev->irq.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
adev->irq.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static int tonga_ih_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
return tonga_ih_hw_fini(adev);
@@ -408,7 +406,7 @@ static int tonga_ih_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
return tonga_ih_hw_init(adev);
@@ -419,7 +417,7 @@ static int tonga_ih_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->irq.srbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index e0fd9f21ed95..ab3df6d75656 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -770,7 +770,7 @@ static int uvd_v6_0_wait_for_idle(void *handle)
}
#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
-static int uvd_v6_0_check_soft_reset(void *handle)
+static bool uvd_v6_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -782,19 +782,19 @@ static int uvd_v6_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
adev->uvd.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
adev->uvd.srbm_soft_reset = 0;
+ return false;
}
- return 0;
}
+
static int uvd_v6_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
uvd_v6_0_stop(adev);
@@ -806,7 +806,7 @@ static int uvd_v6_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->uvd.srbm_soft_reset;
@@ -836,7 +836,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
mdelay(5);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 3f6db4ec0102..8533269ec160 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -561,7 +561,7 @@ static int vce_v3_0_wait_for_idle(void *handle)
#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
-static int vce_v3_0_check_soft_reset(void *handle)
+static bool vce_v3_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -591,16 +591,15 @@ static int vce_v3_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
adev->vce.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
adev->vce.srbm_soft_reset = 0;
+ return false;
}
- mutex_unlock(&adev->grbm_idx_mutex);
- return 0;
}
static int vce_v3_0_soft_reset(void *handle)
@@ -608,7 +607,7 @@ static int vce_v3_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->vce.srbm_soft_reset;
@@ -638,7 +637,7 @@ static int vce_v3_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
@@ -651,7 +650,7 @@ static int vce_v3_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index c934b78c9e2f..bec8125bceb0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -165,7 +165,7 @@ struct amd_ip_funcs {
/* poll for idle */
int (*wait_for_idle)(void *handle);
/* check soft reset the IP block */
- int (*check_soft_reset)(void *handle);
+ bool (*check_soft_reset)(void *handle);
/* pre soft reset the IP block */
int (*pre_soft_reset)(void *handle);
/* soft reset the IP block */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 92b117843875..8cee4e0f9fde 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -49,6 +49,7 @@ static const pem_event_action * const uninitialize_event[] = {
uninitialize_display_phy_access_tasks,
disable_gfx_voltage_island_power_gating_tasks,
disable_gfx_clock_gating_tasks,
+ uninitialize_thermal_controller_tasks,
set_boot_state_tasks,
adjust_power_state_tasks,
disable_dynamic_state_management_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 7e4fcbbbe086..960424913496 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1785,6 +1785,21 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
return 0;
}
+static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+ int actual_temp = 0;
+ uint32_t val = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
+ uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
+
+ if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
+ actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ else
+ actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return actual_temp;
+}
+
static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1881,6 +1896,9 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
case AMDGPU_PP_SENSOR_VCE_POWER:
*value = cz_hwmgr->vce_power_gated ? 0 : 1;
return 0;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ *value = cz_thermal_get_temperature(hwmgr);
+ return 0;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 508245d49d33..609996c84ad5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1030,20 +1030,19 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
/* disable SCLK dpm */
- if (!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Disable) == 0),
- "Failed to disable SCLK DPM!",
- return -EINVAL);
+ if (!data->sclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable SCLK DPM when DPM is disabled",
+ return 0);
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
+ }
/* disable MCLK dpm */
if (!data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Disable) == 0),
- "Failed to disable MCLK DPM!",
- return -EINVAL);
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable MCLK DPM when DPM is disabled",
+ return 0);
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
}
return 0;
@@ -1069,10 +1068,13 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
return -EINVAL);
}
- if (smu7_disable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
- return -EINVAL;
- }
+ smu7_disable_sclk_mclk_dpm(hwmgr);
+
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable voltage DPM when DPM is disabled",
+ return 0);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
return 0;
}
@@ -1226,7 +1228,7 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+ smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
tmp_result = smu7_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1306,6 +1308,12 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable thermal auto throttle!", result = tmp_result);
+ if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
+ PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)),
+ "Failed to disable AVFS!",
+ return -EINVAL);
+ }
+
tmp_result = smu7_stop_dpm(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to stop DPM!", result = tmp_result);
@@ -1452,8 +1460,10 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
- if (table_info != NULL)
- sclk_table = table_info->vdd_dep_on_sclk;
+ if (table_info == NULL)
+ return -EINVAL;
+
+ sclk_table = table_info->vdd_dep_on_sclk;
for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
@@ -3802,13 +3812,15 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
{
- const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1);
- const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2);
+ const struct smu7_power_state *psa;
+ const struct smu7_power_state *psb;
int i;
if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
return -EINVAL;
+ psa = cast_const_phw_smu7_power_state(pstate1);
+ psb = cast_const_phw_smu7_power_state(pstate2);
/* If the two states don't even have the same number of performance levels they cannot be the same state. */
if (psa->performance_level_count != psb->performance_level_count) {
*equal = false;
@@ -4324,6 +4336,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.set_mclk_od = smu7_set_mclk_od,
.get_clock_by_type = smu7_get_clock_by_type,
.read_sensor = smu7_read_sensor,
+ .dynamic_state_management_disable = smu7_disable_dpm_tasks,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index eda802bc63c8..8c889caba420 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2458,7 +2458,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
- if (!data->is_memory_gddr5) {
+ if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++) {
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2f58e9e2a59c..a51f8cbcfe26 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- if (dcrtc->dpms != dpms) {
- dcrtc->dpms = dpms;
- if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
- armada_drm_crtc_update(dcrtc);
- if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
- clk_disable_unprepare(dcrtc->clk);
+ if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
if (dpms_blanked(dpms))
armada_drm_vblank_off(dcrtc);
- else
+ else if (!IS_ERR(dcrtc->clk))
+ WARN_ON(clk_prepare_enable(dcrtc->clk));
+ dcrtc->dpms = dpms;
+ armada_drm_crtc_update(dcrtc);
+ if (!dpms_blanked(dpms))
drm_crtc_vblank_on(&dcrtc->crtc);
+ else if (!IS_ERR(dcrtc->clk))
+ clk_disable_unprepare(dcrtc->clk);
+ } else if (dcrtc->dpms != dpms) {
+ dcrtc->dpms = dpms;
}
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 608df4c90520..0743e65cb240 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -267,6 +267,8 @@ int ast_mm_init(struct ast_private *ast)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -275,11 +277,15 @@ int ast_mm_init(struct ast_private *ast)
void ast_mm_fini(struct ast_private *ast)
{
+ struct drm_device *dev = ast->dev;
+
ttm_bo_device_release(&ast->ttm.bdev);
ast_ttm_global_release(ast);
arch_phys_wc_del(ast->fb_mtrr);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void ast_ttm_placement(struct ast_bo *bo, int domain)
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bb2438dd8733..5e7e63ce7bce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -267,6 +267,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -276,6 +279,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
void cirrus_mm_fini(struct cirrus_device *cirrus)
{
+ struct drm_device *dev = cirrus->dev;
+
if (!cirrus->mm_inited)
return;
@@ -285,6 +290,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
arch_phys_wc_del(cirrus->fb_mtrr);
cirrus->fb_mtrr = 0;
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 1df2d33d0b40..ffb2ab389d1d 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -54,9 +54,6 @@ int drm_name_info(struct seq_file *m, void *data)
mutex_lock(&dev->master_mutex);
master = dev->master;
- if (!master)
- goto out_unlock;
-
seq_printf(m, "%s", dev->driver->name);
if (dev->dev)
seq_printf(m, " dev=%s", dev_name(dev->dev));
@@ -65,7 +62,6 @@ int drm_name_info(struct seq_file *m, void *data)
if (dev->unique)
seq_printf(m, " unique=%s", dev->unique);
seq_printf(m, "\n");
-out_unlock:
mutex_unlock(&dev->master_mutex);
return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index cb86c7e5495c..d9230132dfbc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -329,20 +329,34 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
/*
* Append a LINK to the submitted command buffer to return to
* the ring buffer. return_target is the ring target address.
- * We need three dwords: event, wait, link.
+ * We need at most 7 dwords in the return target: 2 cache flush +
+ * 2 semaphore stall + 1 event + 1 wait + 1 link.
*/
- return_dwords = 3;
+ return_dwords = 7;
return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
CMD_LINK(cmdbuf, return_dwords, return_target);
/*
- * Append event, wait and link pointing back to the wait
- * command to the ring buffer.
+ * Append a cache flush, stall, event, wait and link pointing back to
+ * the wait command to the ring buffer.
*/
+ if (gpu->exec_state == ETNA_PIPE_2D) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_PE2D);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_DEPTH |
+ VIVS_GL_FLUSH_CACHE_COLOR);
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ }
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, return_target + 8);
+ CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+ buffer->user_size - 4);
if (drm_debug & DRM_UT_DRIVER)
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5ce3603e6eac..0370b842d9cc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct page **pvec;
uintptr_t ptr;
+ unsigned int flags = 0;
pvec = drm_malloc_ab(npages, sizeof(struct page *));
if (!pvec)
return ERR_PTR(-ENOMEM);
+ if (!etnaviv_obj->userptr.ro)
+ flags |= FOLL_WRITE;
+
pinned = 0;
ptr = etnaviv_obj->userptr.ptr;
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
- !etnaviv_obj->userptr.ro, 0,
- pvec + pinned, NULL);
+ flags, pvec + pinned, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index d3796ed8d8c5..169ac96e8f08 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -330,7 +330,8 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
return (u32)buf->vram_node.start;
mutex_lock(&mmu->lock);
- ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size);
+ ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
+ buf->size + SZ_64K);
if (ret < 0) {
mutex_unlock(&mmu->lock);
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index aa92decf4233..fbd13fabdf2d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -488,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
goto err_free;
}
- ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
+ ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ g2d_userptr->vec);
if (ret != npages) {
DRM_ERROR("failed to get user pages from userptr.\n");
if (ret < 0)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 3371635cd4d7..b2d5e188b1b8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -51,6 +51,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
DCU_MODE_DCU_MODE(DCU_MODE_OFF));
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
DCU_UPDATE_MODE_READREG);
+ clk_disable_unprepare(fsl_dev->pix_clk);
}
static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
@@ -58,6 +59,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ clk_prepare_enable(fsl_dev->pix_clk);
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
DCU_MODE_DCU_MODE_MASK,
DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
@@ -116,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
return;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0884c45aefe8..e04efbed1a54 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -267,12 +267,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
return ret;
}
- ret = clk_prepare_enable(fsl_dev->pix_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable pix clk\n");
- goto disable_dcu_clk;
- }
-
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm);
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
@@ -284,10 +280,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
enable_irq(fsl_dev->irq);
return 0;
-
-disable_dcu_clk:
- clk_disable_unprepare(fsl_dev->clk);
- return ret;
}
#endif
@@ -401,18 +393,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
goto disable_clk;
}
- ret = clk_prepare_enable(fsl_dev->pix_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable pix clk\n");
- goto unregister_pix_clk;
- }
-
fsl_dev->tcon = fsl_tcon_init(dev);
drm = drm_dev_alloc(driver, dev);
if (IS_ERR(drm)) {
ret = PTR_ERR(drm);
- goto disable_pix_clk;
+ goto unregister_pix_clk;
}
fsl_dev->dev = dev;
@@ -433,8 +419,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
unref:
drm_dev_unref(drm);
-disable_pix_clk:
- clk_disable_unprepare(fsl_dev->pix_clk);
unregister_pix_clk:
clk_unregister(fsl_dev->pix_clk);
disable_clk:
@@ -447,7 +431,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
clk_disable_unprepare(fsl_dev->clk);
- clk_disable_unprepare(fsl_dev->pix_clk);
clk_unregister(fsl_dev->pix_clk);
drm_put_dev(fsl_dev->drm);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index a7e5486bd1e9..9e6f7d8112b3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -211,11 +211,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev)
for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
}
- regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_OFF));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
}
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 26edcc899712..e1dd75b18118 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -20,38 +20,6 @@
#include "fsl_dcu_drm_drv.h"
#include "fsl_tcon.h"
-static int
-fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return 0;
-}
-
-static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- if (fsl_dev->tcon)
- fsl_tcon_bypass_disable(fsl_dev->tcon);
-}
-
-static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- if (fsl_dev->tcon)
- fsl_tcon_bypass_enable(fsl_dev->tcon);
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .atomic_check = fsl_dcu_drm_encoder_atomic_check,
- .disable = fsl_dcu_drm_encoder_disable,
- .enable = fsl_dcu_drm_encoder_enable,
-};
-
static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
int ret;
encoder->possible_crtcs = 1;
+
+ /* Use bypass mode for parallel RGB/LVDS encoder */
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
+
ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
if (ret < 0)
return ret;
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e537930c64b5..c6f780f5abc9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
+ unsigned int flags = 0;
+
+ if (!obj->userptr.read_only)
+ flags |= FOLL_WRITE;
ret = -EFAULT;
if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
- !obj->userptr.read_only, 0,
+ flags,
pvec + pinned, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 919b35f2ad24..dcf7d11ac380 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -266,6 +266,9 @@ int mgag200_mm_init(struct mga_device *mdev)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -274,10 +277,14 @@ int mgag200_mm_init(struct mga_device *mdev)
void mgag200_mm_fini(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
+
ttm_bo_device_release(&mdev->ttm.bdev);
mgag200_ttm_global_release(mdev);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
mdev->fb_mtrr = 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1825dbc33192..a6dbe8258040 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -398,6 +398,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* VRAM init */
drm->gem.vram_available = drm->device.info.ram_user;
+ arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
drm->gem.vram_available >> PAGE_SHIFT);
if (ret) {
@@ -430,6 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
+ struct nvkm_device *device = nvxx_device(&drm->device);
+
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
@@ -439,4 +444,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
+ arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 6a4b020dd0b4..5a26eb4545aa 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
struct drm_device *dev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
- radeon_crtc->hw_mode.clock;
- vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
- radeon_crtc->hw_mode.crtc_vdisplay +
- (radeon_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ vblank_in_pixels =
+ radeon_crtc->hw_mode.crtc_htotal *
+ (radeon_crtc->hw_mode.crtc_vblank_end -
+ radeon_crtc->hw_mode.crtc_vdisplay +
+ (radeon_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 50e96d2c593d..e18839d52e3e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -927,6 +927,16 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
return ret;
}
+static void radeon_connector_unregister(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->ddc_bus->has_aux) {
+ drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
+ radeon_connector->ddc_bus->has_aux = false;
+ }
+}
+
static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -984,6 +994,7 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_lvds_set_property,
};
@@ -1111,6 +1122,7 @@ static const struct drm_connector_funcs radeon_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
@@ -1188,6 +1200,7 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_tv_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
@@ -1519,6 +1532,7 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
.detect = radeon_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1832,6 +1846,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1841,6 +1856,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1850,6 +1866,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b8ab30a7dd6d..cdb8cb568c15 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1675,20 +1675,20 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
- radeon_fbdev_fini(rdev);
- kfree(rdev->mode_info.bios_hardcoded_edid);
-
- /* free i2c buses */
- radeon_i2c_fini(rdev);
-
if (rdev->mode_info.mode_config_initialized) {
- radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_crtc_force_disable_all(rdev->ddev);
+ radeon_fbdev_fini(rdev);
+ radeon_afmt_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
+
+ kfree(rdev->mode_info.bios_hardcoded_edid);
+
+ /* free i2c buses */
+ radeon_i2c_fini(rdev);
}
static bool is_hdtv_mode(const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 91c8f4339566..00ea0002b539 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -96,9 +96,10 @@
* 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
* 2.46.0 - Add PFP_SYNC_ME support on evergreen
* 2.47.0 - Add UVD_NO_OP register support
+ * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 47
+#define KMS_DRIVER_MINOR 48
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 021aa005623f..29f7817af821 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -982,9 +982,8 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
return;
+ WARN_ON(i2c->has_aux);
i2c_del_adapter(&i2c->adapter);
- if (i2c->has_aux)
- drm_dp_aux_unregister(&i2c->aux);
kfree(i2c);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index be30861afae9..41b72ce6613f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -446,6 +446,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
int radeon_bo_init(struct radeon_device *rdev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(rdev->mc.aper_base,
+ rdev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
if (!rdev->fastfb_working) {
rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
@@ -463,6 +467,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
arch_phys_wc_del(rdev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
}
/* Returns how many bytes TTM can move per IB.
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 455268214b89..3de5e6e21662 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
struct page **pages = ttm->pages + pinned;
- r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
+ r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
+ pages, NULL);
if (r < 0)
goto release_pages;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 7ee9aafbdf74..e402be8821c4 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4431,6 +4431,7 @@ static bool si_vm_reg_valid(u32 reg)
case SPI_CONFIG_CNTL:
case SPI_CONFIG_CNTL_1:
case TA_CNTL_AUX:
+ case TA_CS_BC_BASE_ADDR:
return true;
default:
DRM_ERROR("Invalid register 0x%x in CS\n", reg);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index eb220eecba78..65a911ddd509 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1145,6 +1145,7 @@
#define SPI_LB_CU_MASK 0x9354
#define TA_CNTL_AUX 0x9508
+#define TA_CS_BC_BASE_ADDR 0x950C
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 7e2a12c4fed2..1a3ad769f8c8 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
down_read(&current->mm->mmap_sem);
ret = get_user_pages((unsigned long)xfer->mem_addr,
vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE),
- 0, vsg->pages, NULL);
+ (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
+ vsg->pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e8ae3dc476d1..18061a4bc2f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
-module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
-module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
+module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
-module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
+module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
-module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
-module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 070d750af16d..1e59a486bba8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -43,7 +43,7 @@
#define VMWGFX_DRIVER_DATE "20160210"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 10
+#define VMWGFX_DRIVER_MINOR 11
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dc5beff2b4aa..c7b53d987f06 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,17 +35,37 @@
#define VMW_RES_HT_ORDER 12
/**
+ * enum vmw_resource_relocation_type - Relocation type for resources
+ *
+ * @vmw_res_rel_normal: Traditional relocation. The resource id in the
+ * command stream is replaced with the actual id after validation.
+ * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
+ * with a NOP.
+ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
+ * after validation is -1, the command is replaced with a NOP. Otherwise no
+ * action.
+ */
+enum vmw_resource_relocation_type {
+ vmw_res_rel_normal,
+ vmw_res_rel_nop,
+ vmw_res_rel_cond_nop,
+ vmw_res_rel_max
+};
+
+/**
* struct vmw_resource_relocation - Relocation info for resources
*
* @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource.
- * @offset: Offset of 4 byte entries into the command buffer where the
+ * @offset: Offset of single byte entries into the command buffer where the
* id that needs fixup is located.
+ * @rel_type: Type of relocation.
*/
struct vmw_resource_relocation {
struct list_head head;
const struct vmw_resource *res;
- unsigned long offset;
+ u32 offset:29;
+ enum vmw_resource_relocation_type rel_type:3;
};
/**
@@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_dma_buffer *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
-
+/**
+ * vmw_ptr_diff - Compute the offset from a to b in bytes
+ *
+ * @a: A starting pointer.
+ * @b: A pointer offset in the same address space.
+ *
+ * Returns: The offset in bytes between the two pointers.
+ */
+static size_t vmw_ptr_diff(void *a, void *b)
+{
+ return (unsigned long) b - (unsigned long) a;
+}
/**
* vmw_resources_unreserve - unreserve resources previously reserved for
@@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
* @list: Pointer to head of relocation list.
* @res: The resource.
* @offset: Offset into the command buffer currently being parsed where the
- * id that needs fixup is located. Granularity is 4 bytes.
+ * id that needs fixup is located. Granularity is one byte.
+ * @rel_type: Relocation type.
*/
static int vmw_resource_relocation_add(struct list_head *list,
const struct vmw_resource *res,
- unsigned long offset)
+ unsigned long offset,
+ enum vmw_resource_relocation_type
+ rel_type)
{
struct vmw_resource_relocation *rel;
@@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
rel->res = res;
rel->offset = offset;
+ rel->rel_type = rel_type;
list_add_tail(&rel->head, list);
return 0;
@@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
+ /* Validate the struct vmw_resource_relocation member size */
+ BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
+ BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
+
list_for_each_entry(rel, list, head) {
- if (likely(rel->res != NULL))
- cb[rel->offset] = rel->res->id;
- else
- cb[rel->offset] = SVGA_3D_CMD_NOP;
+ u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
+ switch (rel->rel_type) {
+ case vmw_res_rel_normal:
+ *addr = rel->res->id;
+ break;
+ case vmw_res_rel_nop:
+ *addr = SVGA_3D_CMD_NOP;
+ break;
+ default:
+ if (rel->res->id == -1)
+ *addr = SVGA_3D_CMD_NOP;
+ break;
+ }
}
}
@@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
*p_val = NULL;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id_loc - sw_context->buf_start);
+ vmw_ptr_diff(sw_context->buf_start,
+ id_loc),
+ vmw_res_rel_normal);
if (unlikely(ret != 0))
return ret;
@@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id_loc - sw_context->buf_start);
+ vmw_ptr_diff(sw_context->buf_start, id_loc),
+ vmw_res_rel_normal);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
@@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
-
- return 0;
+ NULL,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_nop);
}
/**
@@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
-
- return 0;
+ NULL,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_nop);
}
/**
@@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
* @header: Pointer to the command header in the command stream.
*
* Check that the view exists, and if it was not created using this
- * command batch, make sure it's validated (present in the device) so that
- * the remove command will not confuse the device.
+ * command batch, conditionally make this command a NOP.
*/
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
return ret;
/*
- * Add view to the validate list iff it was not created using this
- * command batch.
+ * If the view wasn't created during this command batch, it might
+ * have been removed due to a context swapout, so add a
+ * relocation to conditionally make this command a NOP to avoid
+ * device errors.
*/
- return vmw_view_res_val_add(sw_context, view);
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ view,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_cond_nop);
}
/**
@@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
cmd->body.shaderResourceViewId);
}
+/**
+ * vmw_cmd_dx_transfer_from_buffer -
+ * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXTransferFromBuffer body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcSid, NULL);
+ if (ret != 0)
+ return ret;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.destSid, NULL);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_buffer_copy_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
&vmw_cmd_pred_copy_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
+ &vmw_cmd_dx_transfer_from_buffer,
+ true, false, true),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
int ret;
*header = NULL;
- if (!dev_priv->cman || kernel_commands)
- return kernel_commands;
-
if (command_size > SVGA_CB_MAX_SIZE) {
DRM_ERROR("Command buffer is too large.\n");
return ERR_PTR(-EINVAL);
}
+ if (!dev_priv->cman || kernel_commands)
+ return kernel_commands;
+
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
@@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
- DRM_INFO("Dummy query bo pin count: %d\n",
- dev_priv->dummy_query_bo->pin_count);
-
out_unlock:
return;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6a328d507a28..52ca1c9d070e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
- if (nonblock)
- return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
-
- lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
+ lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index c2a721a8cef9..b445ce9b9757 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
if (res->id != -1) {
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
return;
@@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
submit_size = vmw_surface_define_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
@@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
uint8_t *cmd;
struct vmw_private *dev_priv = res->dev_priv;
- BUG_ON(val_buf->bo == NULL);
-
+ BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"DMA.\n");
return -ENOMEM;
@@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
submit_size = vmw_surface_destroy_size();
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"eviction.\n");
return -ENOMEM;
@@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
int ret;
struct vmw_resource *res = &srf->res;
- BUG_ON(res_free == NULL);
+ BUG_ON(!res_free);
if (!dev_priv->has_mob)
vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_init(dev_priv, res, true, res_free,
@@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct drm_vmw_size __user *user_sizes;
int ret;
int i, j;
uint32_t cur_bo_offset;
@@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
+ if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
@@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
srf->num_sizes = num_sizes;
user_srf->size = size;
-
- srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
+ srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr,
+ sizeof(*srf->sizes) * srf->num_sizes);
+ if (IS_ERR(srf->sizes)) {
+ ret = PTR_ERR(srf->sizes);
goto out_no_sizes;
}
- srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
- GFP_KERNEL);
- if (unlikely(srf->offsets == NULL)) {
+ srf->offsets = kmalloc_array(srf->num_sizes,
+ sizeof(*srf->offsets),
+ GFP_KERNEL);
+ if (unlikely(!srf->offsets)) {
ret = -ENOMEM;
goto out_no_offsets;
}
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- req->size_addr;
-
- ret = copy_from_user(srf->sizes, user_sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- goto out_no_copy;
- }
-
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->multisample_count = 0;
@@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
- if (unlikely(base == NULL)) {
+ if (unlikely(!base)) {
DRM_ERROR("Could not find surface to reference.\n");
goto out_no_lookup;
}
@@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd = vmw_fifo_reserve(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
@@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd1 == NULL)) {
+ if (unlikely(!cmd1)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"binding.\n");
return -ENOMEM;
@@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"unbinding.\n");
return -ENOMEM;
@@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
@@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- if (srf->res.backup == NULL) {
+ if (!srf->res.backup) {
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
@@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
+ if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 8fd4bf77f264..818ea7d93533 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -234,58 +234,6 @@ static __u8 pid0011_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 pid0006_rdesc_fixed[] = {
- 0x05, 0x01, /* Usage Page (Generic Desktop) */
- 0x09, 0x04, /* Usage (Joystick) */
- 0xA1, 0x01, /* Collection (Application) */
- 0xA1, 0x02, /* Collection (Logical) */
- 0x75, 0x08, /* Report Size (8) */
- 0x95, 0x05, /* Report Count (5) */
- 0x15, 0x00, /* Logical Minimum (0) */
- 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
- 0x35, 0x00, /* Physical Minimum (0) */
- 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
- 0x09, 0x30, /* Usage (X) */
- 0x09, 0x33, /* Usage (Ry) */
- 0x09, 0x32, /* Usage (Z) */
- 0x09, 0x31, /* Usage (Y) */
- 0x09, 0x34, /* Usage (Ry) */
- 0x81, 0x02, /* Input (Variable) */
- 0x75, 0x04, /* Report Size (4) */
- 0x95, 0x01, /* Report Count (1) */
- 0x25, 0x07, /* Logical Maximum (7) */
- 0x46, 0x3B, 0x01, /* Physical Maximum (315) */
- 0x65, 0x14, /* Unit (Centimeter) */
- 0x09, 0x39, /* Usage (Hat switch) */
- 0x81, 0x42, /* Input (Variable) */
- 0x65, 0x00, /* Unit (None) */
- 0x75, 0x01, /* Report Size (1) */
- 0x95, 0x0C, /* Report Count (12) */
- 0x25, 0x01, /* Logical Maximum (1) */
- 0x45, 0x01, /* Physical Maximum (1) */
- 0x05, 0x09, /* Usage Page (Button) */
- 0x19, 0x01, /* Usage Minimum (0x01) */
- 0x29, 0x0C, /* Usage Maximum (0x0C) */
- 0x81, 0x02, /* Input (Variable) */
- 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined) */
- 0x75, 0x01, /* Report Size (1) */
- 0x95, 0x08, /* Report Count (8) */
- 0x25, 0x01, /* Logical Maximum (1) */
- 0x45, 0x01, /* Physical Maximum (1) */
- 0x09, 0x01, /* Usage (0x01) */
- 0x81, 0x02, /* Input (Variable) */
- 0xC0, /* End Collection */
- 0xA1, 0x02, /* Collection (Logical) */
- 0x75, 0x08, /* Report Size (8) */
- 0x95, 0x07, /* Report Count (7) */
- 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
- 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
- 0x09, 0x02, /* Usage (0x02) */
- 0x91, 0x02, /* Output (Variable) */
- 0xC0, /* End Collection */
- 0xC0 /* End Collection */
-};
-
static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -296,16 +244,34 @@ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(pid0011_rdesc_fixed);
}
break;
- case 0x0006:
- if (*rsize == sizeof(pid0006_rdesc_fixed)) {
- rdesc = pid0006_rdesc_fixed;
- *rsize = sizeof(pid0006_rdesc_fixed);
- }
- break;
}
return rdesc;
}
+#define map_abs(c) hid_map_usage(hi, usage, bit, max, EV_ABS, (c))
+#define map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c))
+
+static int dr_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid) {
+ /*
+ * revert to the old hid-input behavior where axes
+ * can be randomly assigned when hid->usage is reused.
+ */
+ case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
+ case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
+ if (field->flags & HID_MAIN_ITEM_RELATIVE)
+ map_rel(usage->hid & 0xf);
+ else
+ map_abs(usage->hid & 0xf);
+ return 1;
+ }
+
+ return 0;
+}
+
static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
@@ -352,6 +318,7 @@ static struct hid_driver dr_driver = {
.id_table = dr_devices,
.report_fixup = dr_report_fixup,
.probe = dr_probe,
+ .input_mapping = dr_input_mapping,
};
module_hid_driver(dr_driver);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index cd59c79eebdd..6cfb5cacc253 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -64,6 +64,9 @@
#define USB_VENDOR_ID_AKAI 0x2011
#define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715
+#define USB_VENDOR_ID_AKAI_09E8 0x09E8
+#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX 0x0031
+
#define USB_VENDOR_ID_ALCOR 0x058f
#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c
index d8d55f37b4f5..d3e1ab162f7c 100644
--- a/drivers/hid/hid-led.c
+++ b/drivers/hid/hid-led.c
@@ -100,6 +100,7 @@ struct hidled_device {
const struct hidled_config *config;
struct hid_device *hdev;
struct hidled_rgb *rgb;
+ u8 *buf;
struct mutex lock;
};
@@ -118,13 +119,19 @@ static int hidled_send(struct hidled_device *ldev, __u8 *buf)
mutex_lock(&ldev->lock);
+ /*
+ * buffer provided to hid_hw_raw_request must not be on the stack
+ * and must not be part of a data structure
+ */
+ memcpy(ldev->buf, buf, ldev->config->report_size);
+
if (ldev->config->report_type == RAW_REQUEST)
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
else if (ldev->config->report_type == OUTPUT_REPORT)
- ret = hid_hw_output_report(ldev->hdev, buf,
+ ret = hid_hw_output_report(ldev->hdev, ldev->buf,
ldev->config->report_size);
else
ret = -EINVAL;
@@ -147,17 +154,21 @@ static int hidled_recv(struct hidled_device *ldev, __u8 *buf)
mutex_lock(&ldev->lock);
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ memcpy(ldev->buf, buf, ldev->config->report_size);
+
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0)
goto err;
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
+
+ memcpy(buf, ldev->buf, ldev->config->report_size);
err:
mutex_unlock(&ldev->lock);
@@ -447,6 +458,10 @@ static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!ldev)
return -ENOMEM;
+ ldev->buf = devm_kmalloc(&hdev->dev, MAX_REPORT_SIZE, GFP_KERNEL);
+ if (!ldev->buf)
+ return -ENOMEM;
+
ret = hid_parse(hdev);
if (ret)
return ret;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0a0eca5da47d..354d49ea36dd 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -56,6 +56,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 4aa3cb63fd41..bcd06306f3e8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -314,10 +314,14 @@ static void heartbeat_onchannelcallback(void *context)
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
struct icmsg_negotiate *negop = NULL;
- vmbus_recvpacket(channel, hbeat_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ while (1) {
+
+ vmbus_recvpacket(channel, hbeat_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (!recvlen)
+ break;
- if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 98114cef1e43..2fe1828bd10b 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -194,10 +194,10 @@ static struct adm9240_data *adm9240_update_device(struct device *dev)
* 0.5'C per two measurement cycles thus ignore possible
* but unlikely aliasing error on lsb reading. --Grant
*/
- data->temp = ((i2c_smbus_read_byte_data(client,
+ data->temp = (i2c_smbus_read_byte_data(client,
ADM9240_REG_TEMP) << 8) |
i2c_smbus_read_byte_data(client,
- ADM9240_REG_TEMP_CONF)) / 128;
+ ADM9240_REG_TEMP_CONF);
for (i = 0; i < 2; i++) { /* read fans */
data->fan[i] = i2c_smbus_read_byte_data(client,
@@ -263,7 +263,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
- return sprintf(buf, "%d\n", data->temp * 500); /* 9-bit value */
+ return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
}
static ssize_t show_max(struct device *dev, struct device_attribute *devattr,
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index bef84e085973..c1b9275978f9 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -268,11 +268,13 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
long *val)
{
struct max31790_data *data = max31790_update_device(dev);
- u8 fan_config = data->fan_config[channel];
+ u8 fan_config;
if (IS_ERR(data))
return PTR_ERR(data);
+ fan_config = data->fan_config[channel];
+
switch (attr) {
case hwmon_pwm_input:
*val = data->pwm[channel] >> 8;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6d94e2ec5b4f..d252276feadf 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -79,12 +79,12 @@ config I2C_AMD8111
config I2C_HIX5HD2
tristate "Hix5hd2 high-speed I2C driver"
- depends on ARCH_HIX5HD2 || COMPILE_TEST
+ depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST
help
- Say Y here to include support for high-speed I2C controller in the
- Hisilicon based hix5hd2 SoCs.
+ Say Y here to include support for the high-speed I2C controller
+ used in HiSilicon hix5hd2 SoCs.
- This driver can also be built as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called i2c-hix5hd2.
config I2C_I801
@@ -589,10 +589,10 @@ config I2C_IMG
config I2C_IMX
tristate "IMX I2C interface"
- depends on ARCH_MXC || ARCH_LAYERSCAPE
+ depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
help
Say Y here if you want to use the IIC bus controller on
- the Freescale i.MX/MXC or Layerscape processors.
+ the Freescale i.MX/MXC, Layerscape or ColdFire processors.
This driver can also be built as a module. If so, the module
will be called i2c-imx.
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 1fe93c43215c..11e866d05368 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -95,6 +95,9 @@
#define DW_IC_STATUS_TFE BIT(2)
#define DW_IC_STATUS_MST_ACTIVITY BIT(5)
+#define DW_IC_SDA_HOLD_RX_SHIFT 16
+#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
+
#define DW_IC_ERR_TX_ABRT 0x1
#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
@@ -420,12 +423,20 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
/* Configure SDA Hold Time if required */
reg = dw_readl(dev, DW_IC_COMP_VERSION);
if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
- if (dev->sda_hold_time) {
- dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- } else {
+ if (!dev->sda_hold_time) {
/* Keep previous hold time setting if no one set it */
dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
}
+ /*
+ * Workaround for avoiding TX arbitration lost in case I2C
+ * slave pulls SDA down "too quickly" after falling egde of
+ * SCL by enabling non-zero SDA RX hold. Specification says it
+ * extends incoming SDA low to high transition while SCL is
+ * high but it apprears to help also above issue.
+ */
+ if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
+ dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
+ dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
} else {
dev_warn(dev->dev,
"Hardware too old to adjust SDA hold time.\n");
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 9604024e0eb0..49f2084f7bb5 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -368,6 +368,7 @@ static const struct of_device_id dc_i2c_match[] = {
{ .compatible = "cnxt,cx92755-i2c" },
{ },
};
+MODULE_DEVICE_TABLE(of, dc_i2c_match);
static struct platform_driver dc_i2c_driver = {
.probe = dc_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 08847e8b8998..eb3627f35d12 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -146,6 +146,7 @@
#define SMBHSTCFG_HST_EN 1
#define SMBHSTCFG_SMB_SMI_EN 2
#define SMBHSTCFG_I2C_EN 4
+#define SMBHSTCFG_SPD_WD 0x10
/* TCO configuration bits for TCOCTL */
#define TCOCTL_EN 0x0100
@@ -865,9 +866,16 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
block = 1;
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- /* NB: page 240 of ICH5 datasheet shows that the R/#W
- * bit should be cleared here, even when reading */
- outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
+ /*
+ * NB: page 240 of ICH5 datasheet shows that the R/#W
+ * bit should be cleared here, even when reading.
+ * However if SPD Write Disable is set (Lynx Point and later),
+ * the read will fail if we don't set the R/#W bit.
+ */
+ outb_p(((addr & 0x7f) << 1) |
+ ((priv->original_hstcfg & SMBHSTCFG_SPD_WD) ?
+ (read_write & 0x01) : 0),
+ SMBHSTADD(priv));
if (read_write == I2C_SMBUS_READ) {
/* NB: page 240 of ICH5 datasheet also shows
* that DATA1 is the cmd field when reading */
@@ -1573,6 +1581,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Disable SMBus interrupt feature if SMBus using SMI# */
priv->features &= ~FEATURE_IRQ;
}
+ if (temp & SMBHSTCFG_SPD_WD)
+ dev_info(&dev->dev, "SPD Write Disable is set\n");
/* Clear special mode bits */
if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 592a8f26a708..47fc1f1acff7 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1009,10 +1009,13 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
rinfo->sda_gpio = of_get_named_gpio(pdev->dev.of_node, "sda-gpios", 0);
rinfo->scl_gpio = of_get_named_gpio(pdev->dev.of_node, "scl-gpios", 0);
- if (!gpio_is_valid(rinfo->sda_gpio) ||
- !gpio_is_valid(rinfo->scl_gpio) ||
- IS_ERR(i2c_imx->pinctrl_pins_default) ||
- IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
+ if (rinfo->sda_gpio == -EPROBE_DEFER ||
+ rinfo->scl_gpio == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (!gpio_is_valid(rinfo->sda_gpio) ||
+ !gpio_is_valid(rinfo->scl_gpio) ||
+ IS_ERR(i2c_imx->pinctrl_pins_default) ||
+ IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
dev_dbg(&pdev->dev, "recovery information incomplete\n");
return 0;
}
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index b8ea62105f42..30132c3957cd 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -729,6 +729,7 @@ static const struct of_device_id jz4780_i2c_of_matches[] = {
{ .compatible = "ingenic,jz4780-i2c", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches);
static int jz4780_i2c_probe(struct platform_device *pdev)
{
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 50702c7bb244..df220666d627 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -694,6 +694,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
t_calc->div_low--;
t_calc->div_high--;
+ /* Give the tuning value 0, that would not update con register */
+ t_calc->tuning = 0;
/* Maximum divider supported by hw is 0xffff */
if (t_calc->div_low > 0xffff) {
t_calc->div_low = 0xffff;
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 263685c7a512..05cf192ef1ac 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
struct mbox_chan *mbox_chan;
struct mbox_client mbox_client;
struct completion rd_complete;
- u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];
+ u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
u32 *resp_msg;
};
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 2a972ed7aa0d..e29ff37a43bd 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -426,6 +426,7 @@ static const struct of_device_id xlp9xx_i2c_of_match[] = {
{ .compatible = "netlogic,xlp980-i2c", },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = {
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 0968f59b6df5..ad17d88d8573 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -358,6 +358,7 @@ static const struct of_device_id xlr_i2c_dt_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids);
static int xlr_i2c_probe(struct platform_device *pdev)
{
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 98fffa3a09f7..1704fc84d647 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1680,7 +1680,8 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
static void of_i2c_register_devices(struct i2c_adapter *adap)
{
- struct device_node *node;
+ struct device_node *bus, *node;
+ struct i2c_client *client;
/* Only register child devices if the adapter has a node pointer set */
if (!adap->dev.of_node)
@@ -1688,11 +1689,24 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
- for_each_available_child_of_node(adap->dev.of_node, node) {
+ bus = of_get_child_by_name(adap->dev.of_node, "i2c-bus");
+ if (!bus)
+ bus = of_node_get(adap->dev.of_node);
+
+ for_each_available_child_of_node(bus, node) {
if (of_node_test_and_set_flag(node, OF_POPULATED))
continue;
- of_i2c_register_device(adap, node);
+
+ client = of_i2c_register_device(adap, node);
+ if (IS_ERR(client)) {
+ dev_warn(&adap->dev,
+ "Failed to create I2C device for %s\n",
+ node->full_name);
+ of_node_clear_flag(node, OF_POPULATED);
+ }
}
+
+ of_node_put(bus);
}
static int of_dev_node_match(struct device *dev, void *data)
@@ -2293,6 +2307,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%s'\n",
rd->dn->full_name);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(client));
}
break;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7edcf3238620..99c051490eff 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -437,6 +437,8 @@ config STX104
config TI_ADC081C
tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADC081C,
ADC101C and ADC121C ADC chips.
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index bd321b305a0a..ef761a508630 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -213,13 +213,14 @@ static int atlas_check_ec_calibration(struct atlas_data *data)
struct device *dev = &data->client->dev;
int ret;
unsigned int val;
+ __be16 rval;
- ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);
if (ret)
return ret;
- dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
- be16_to_cpu(val) % 100);
+ val = be16_to_cpu(rval);
+ dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);
ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
if (ret)
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 39dd2026ccc9..066161a4bccd 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -123,22 +123,24 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
{
unsigned int storage_bytes = data->chip->read_size;
unsigned int shift = chan->scan_type.shift + (chan->address * 8);
- unsigned int buf;
+ __be16 buf16;
+ __be32 buf32;
int ret;
- ret = spi_read(data->spi, (void *) &buf, storage_bytes);
- if (ret)
- return ret;
-
switch (storage_bytes) {
case 2:
- *val = be16_to_cpu(buf);
+ ret = spi_read(data->spi, (void *)&buf16, storage_bytes);
+ *val = be16_to_cpu(buf16);
break;
case 4:
- *val = be32_to_cpu(buf);
+ ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
+ *val = be32_to_cpu(buf32);
break;
}
+ if (ret)
+ return ret;
+
/* check to be sure this is a valid reading */
if (*val & data->chip->status_bit)
return -EINVAL;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 19a418a1b631..fb3fb89640e5 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -89,4 +89,6 @@ source "drivers/infiniband/sw/rxe/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
+source "drivers/infiniband/hw/qedr/Kconfig"
+
endif # INFINIBAND
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c68746ce6624..224ad274ea0b 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
+ unsigned int gup_flags = FOLL_WRITE;
if (dmasync)
dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (ret)
goto out;
+ if (!umem->writable)
+ gup_flags |= FOLL_FORCE;
+
need_release = 1;
sg_list_start = umem->sg_head.sgl;
@@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
- 1, !umem->writable, page_list, vma_list);
+ gup_flags, page_list, vma_list);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 75077a018675..1f0fe3217f23 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
u64 off;
int j, k, ret = 0, start_idx, npages = 0;
u64 base_virt_addr;
+ unsigned int flags = 0;
if (access_mask == 0)
return -EINVAL;
@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
goto out_put_task;
}
+ if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ flags |= FOLL_WRITE;
+
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
k = start_idx;
@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
*/
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
- access_mask & ODP_WRITE_ALLOWED_BIT,
- 0, local_page_list, NULL);
+ flags, local_page_list, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 21fe401ff178..e7a5ed9f6f3f 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_HNS) += hns/
+obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 875597b0e69c..097365932b09 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -83,8 +83,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct hns_roce_mtt *hr_mtt,
struct hns_roce_uar *hr_uar,
- struct hns_roce_cq *hr_cq, int vector,
- int collapsed)
+ struct hns_roce_cq *hr_cq, int vector)
{
struct hns_roce_cmd_mailbox *mailbox = NULL;
struct hns_roce_cq_table *cq_table = NULL;
@@ -153,6 +152,9 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
hr_cq->cons_index = 0;
hr_cq->uar = hr_uar;
+ atomic_set(&hr_cq->refcount, 1);
+ init_completion(&hr_cq->free);
+
return 0;
err_radix:
@@ -192,6 +194,11 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
/* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+ /* wait for all interrupt processed */
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+ wait_for_completion(&hr_cq->free);
+
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
spin_unlock_irq(&cq_table->lock);
@@ -300,10 +307,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1;
- mutex_init(&hr_cq->resize_mutex);
spin_lock_init(&hr_cq->lock);
- hr_cq->hr_resize_buf = NULL;
- hr_cq->resize_umem = NULL;
if (context) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
@@ -338,8 +342,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
}
/* Allocate cq index, fill cq_context */
- ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
- uar, hr_cq, vector, 0);
+ ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
+ hr_cq, vector);
if (ret) {
dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
goto err_mtt;
@@ -353,12 +357,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
if (context) {
if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
ret = -EFAULT;
- goto err_mtt;
+ goto err_cqc;
}
}
return &hr_cq->ib_cq;
+err_cqc:
+ hns_roce_free_cq(hr_dev, hr_cq);
+
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (context)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index ea735800eb18..341731553a60 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -62,7 +62,7 @@
#define HNS_ROCE_AEQE_OF_VEC_NUM 1
/* 4G/4K = 1M */
-#define HNS_ROCE_SL_SHIFT 29
+#define HNS_ROCE_SL_SHIFT 28
#define HNS_ROCE_TCLASS_SHIFT 20
#define HNS_ROCE_FLOW_LABLE_MASK 0xfffff
@@ -74,7 +74,9 @@
#define MR_TYPE_DMA 0x03
#define PKEY_ID 0xffff
+#define GUID_LEN 8
#define NODE_DESC_SIZE 64
+#define DB_REG_OFFSET 0x1000
#define SERV_TYPE_RC 0
#define SERV_TYPE_RD 1
@@ -282,20 +284,11 @@ struct hns_roce_cq_buf {
struct hns_roce_mtt hr_mtt;
};
-struct hns_roce_cq_resize {
- struct hns_roce_cq_buf hr_buf;
- int cqe;
-};
-
struct hns_roce_cq {
struct ib_cq ib_cq;
struct hns_roce_cq_buf hr_buf;
- /* pointer to store information after resize*/
- struct hns_roce_cq_resize *hr_resize_buf;
spinlock_t lock;
- struct mutex resize_mutex;
struct ib_umem *umem;
- struct ib_umem *resize_umem;
void (*comp)(struct hns_roce_cq *);
void (*event)(struct hns_roce_cq *, enum hns_roce_event);
@@ -408,6 +401,7 @@ struct hns_roce_qp {
u32 buff_size;
struct mutex mutex;
u8 port;
+ u8 phy_port;
u8 sl;
u8 resp_depth;
u8 state;
@@ -471,7 +465,6 @@ struct hns_roce_caps {
u32 max_rq_desc_sz; /* 64 */
int max_qp_init_rdma;
int max_qp_dest_rdma;
- int sqp_start;
int num_cqs;
int max_cqes;
int reserved_cqs;
@@ -512,6 +505,8 @@ struct hns_roce_hw {
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector);
+ int (*clear_hem)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj);
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
@@ -533,7 +528,6 @@ struct hns_roce_dev {
struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock;
- spinlock_t cq_db_lock;
spinlock_t bt_cmd_lock;
struct hns_roce_ib_iboe iboe;
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
index 98af7fecf2f1..21e21b03cfb5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -66,9 +66,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
{
struct device *dev = &hr_dev->pdev->dev;
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -96,13 +93,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
default:
break;
}
-
- hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
}
static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
@@ -111,9 +101,6 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
{
struct device *dev = &hr_dev->pdev->dev;
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Access Violation Work Queue Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -141,13 +128,69 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
default:
break;
}
+}
+
+static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int phy_port;
+ int qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+ phy_port = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
+ if (qpn <= 1)
+ qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
+ "QP %d, phy_port %d.\n", qpn, phy_port);
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 cqn;
+
+ cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
+ break;
+ default:
+ break;
+ }
- hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ hns_roce_cq_event(hr_dev, cqn, event_type);
}
static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
@@ -185,7 +228,7 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_aeqe *aeqe;
int aeqes_found = 0;
- int qpn = 0;
+ int event_type;
while ((aeqe = next_aeqe_sw(eq))) {
dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
@@ -195,9 +238,10 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
/* Memory barrier */
rmb();
- switch (roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)) {
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
+ switch (event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
dev_warn(dev, "PATH MIG not supported\n");
break;
@@ -211,23 +255,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
dev_warn(dev, "PATH MIG failed\n");
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "qpn = 0x%lx\n",
- roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S));
- hns_roce_qp_event(hr_dev,
- roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
@@ -235,40 +265,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
dev_warn(dev, "SRQ not support!\n");
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%lx access err.\n",
- roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
- hns_roce_cq_event(hr_dev,
- le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- break;
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%lx overflow\n",
- roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
- hns_roce_cq_event(hr_dev,
- le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- break;
case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- dev_warn(dev, "CQ ID invalid.\n");
- hns_roce_cq_event(hr_dev,
- le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
break;
case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
dev_warn(dev, "port change.\n");
@@ -290,11 +289,8 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
break;
default:
- dev_warn(dev, "Unhandled event 0x%lx on EQ %d at index %u\n",
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S),
- eq->eqn, eq->cons_index);
+ dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
+ event_type, eq->eqn, eq->cons_index);
break;
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h
index fe4388191a3c..c6d212d12e03 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.h
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.h
@@ -107,6 +107,10 @@ struct hns_roce_aeqe {
#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
(((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \
+ (((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S)
+
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
(((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index d53d64362389..250d8f280390 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -36,14 +36,10 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
-#define HW_SYNC_TIMEOUT_MSECS 500
-#define HW_SYNC_SLEEP_TIME_INTERVAL 20
-
#define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17)
#define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17)
#define DMA_ADDR_T_SHIFT 12
-#define BT_CMD_SYNC_SHIFT 31
#define BT_BA_SHIFT 32
struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
@@ -213,74 +209,6 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return ret;
}
-static int hns_roce_clear_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long obj)
-{
- struct device *dev = &hr_dev->pdev->dev;
- unsigned long end = 0;
- unsigned long flags;
- void __iomem *bt_cmd;
- uint32_t bt_cmd_val[2];
- u32 bt_cmd_h_val = 0;
- int ret = 0;
-
- switch (table->type) {
- case HEM_TYPE_QPC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
- break;
- case HEM_TYPE_MTPT:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
- HEM_TYPE_MTPT);
- break;
- case HEM_TYPE_CQC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
- break;
- case HEM_TYPE_SRQC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
- HEM_TYPE_SRQC);
- break;
- default:
- return ret;
- }
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
- roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
- roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 0);
-
- spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
-
- bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
-
- end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
- while (1) {
- if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
- if (!(time_before(jiffies, end))) {
- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
- flags);
- return -EBUSY;
- }
- } else {
- break;
- }
- msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
- }
-
- bt_cmd_val[0] = 0;
- bt_cmd_val[1] = bt_cmd_h_val;
- hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
-
- return ret;
-}
-
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
@@ -333,7 +261,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
if (--table->hem[i]->refcount == 0) {
/* Clear HEM base address */
- if (hns_roce_clear_hem(hr_dev, table, obj))
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj))
dev_warn(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
@@ -456,7 +384,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
for (i = 0; i < table->num_hem; ++i)
if (table->hem[i]) {
- if (hns_roce_clear_hem(hr_dev, table,
+ if (hr_dev->hw->clear_hem(hr_dev, table,
i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
dev_err(dev, "Clear HEM base address failed.\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index ad6617588fba..435748858252 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -34,6 +34,10 @@
#ifndef _HNS_ROCE_HEM_H
#define _HNS_ROCE_HEM_H
+#define HW_SYNC_TIMEOUT_MSECS 500
+#define HW_SYNC_SLEEP_TIME_INTERVAL 20
+#define BT_CMD_SYNC_SHIFT 31
+
enum {
/* MAP HEM(Hardware Entry Memory) */
HEM_TYPE_QPC = 0,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 399f5dedaf2d..71232e5fabf6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -73,8 +73,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
u32 ind = 0;
int ret = 0;
- spin_lock_irqsave(&qp->sq.lock, flags);
+ if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
+ ibqp->qp_type != IB_QPT_RC)) {
+ dev_err(dev, "un-supported QP type\n");
+ *bad_wr = NULL;
+ return -EOPNOTSUPP;
+ }
+ spin_lock_irqsave(&qp->sq.lock, flags);
ind = qp->sq_next_wqe;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
@@ -162,7 +168,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_SGID_INDEX_M,
UD_SEND_WQE_U32_36_SGID_INDEX_S,
- hns_get_gid_index(hr_dev, qp->port,
+ hns_get_gid_index(hr_dev, qp->phy_port,
ah->av.gid_index));
roce_set_field(ud_sq_wqe->u32_40,
@@ -205,8 +211,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
(wr->send_flags & IB_SEND_FENCE ?
(cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
- wqe = (struct hns_roce_wqe_ctrl_seg *)wqe +
- sizeof(struct hns_roce_wqe_ctrl_seg);
+ wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
switch (wr->opcode) {
case IB_WR_RDMA_READ:
@@ -235,8 +240,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
}
ctrl->flag |= cpu_to_le32(ps_opcode);
- wqe = (struct hns_roce_wqe_raddr_seg *)wqe +
- sizeof(struct hns_roce_wqe_raddr_seg);
+ wqe += sizeof(struct hns_roce_wqe_raddr_seg);
dseg = wqe;
if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
@@ -253,8 +257,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
memcpy(wqe, ((void *) (uintptr_t)
wr->sg_list[i].addr),
wr->sg_list[i].length);
- wqe = (struct hns_roce_wqe_raddr_seg *)
- wqe + wr->sg_list[i].length;
+ wqe += wr->sg_list[i].length;
}
ctrl->flag |= HNS_ROCE_WQE_INLINE;
} else {
@@ -266,9 +269,6 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
HNS_ROCE_WQE_SGE_NUM_BIT);
}
ind++;
- } else {
- dev_dbg(dev, "unSupported QP type\n");
- break;
}
}
@@ -285,7 +285,7 @@ out:
SQ_DOORBELL_U32_4_SQ_HEAD_S,
(qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
- SQ_DOORBELL_U32_4_PORT_S, qp->port);
+ SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
@@ -365,14 +365,14 @@ out:
/* SW update GSI rq header */
reg_val = roce_read(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port);
+ QP1C_CFGN_OFFSET * hr_qp->phy_port);
roce_set_field(reg_val,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
hr_qp->rq.head);
roce_write(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port, reg_val);
+ QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
} else {
rq_db.u32_4 = 0;
rq_db.u32_8 = 0;
@@ -789,6 +789,66 @@ static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
}
}
+static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ int ret;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.qpc_buf.buf)
+ return -ENOMEM;
+
+ priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.mtpt_buf.buf) {
+ ret = -ENOMEM;
+ goto err_failed_alloc_mtpt_buf;
+ }
+
+ priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.cqc_buf.buf) {
+ ret = -ENOMEM;
+ goto err_failed_alloc_cqc_buf;
+ }
+
+ return 0;
+
+err_failed_alloc_cqc_buf:
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
+
+err_failed_alloc_mtpt_buf:
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
+
+ return ret;
+}
+
+static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
+}
+
/**
* hns_roce_v1_reset - reset RoCE
* @hr_dev: RoCE device struct pointer
@@ -879,7 +939,6 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
- caps->sqp_start = 0;
caps->reserved_lkey = 0;
caps->reserved_pds = 0;
caps->reserved_mrws = 1;
@@ -944,8 +1003,18 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
+ ret = hns_roce_bt_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "bt init failed!\n");
+ goto error_failed_bt_init;
+ }
+
return 0;
+error_failed_bt_init:
+ hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
+ hns_roce_raq_free(hr_dev);
+
error_failed_raq_init:
hns_roce_db_free(hr_dev);
return ret;
@@ -953,6 +1022,7 @@ error_failed_raq_init:
void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
{
+ hns_roce_bt_free(hr_dev);
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
hns_roce_raq_free(hr_dev);
hns_roce_db_free(hr_dev);
@@ -1192,9 +1262,7 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
return get_sw_cqe(hr_cq, hr_cq->cons_index);
}
-void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index,
- spinlock_t *doorbell_lock)
-
+void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
u32 doorbell[2];
@@ -1254,8 +1322,7 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
*/
wmb();
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index,
- &to_hr_dev(hr_cq->ib_cq.device)->cq_db_lock);
+ hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
}
}
@@ -1485,7 +1552,8 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
/* SQ conrespond to CQE */
sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
CQE_BYTE_4_WQE_INDEX_M,
- CQE_BYTE_4_WQE_INDEX_S));
+ CQE_BYTE_4_WQE_INDEX_S)&
+ ((*cur_qp)->sq.wqe_cnt-1));
switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) {
case HNS_ROCE_WQE_OPCODE_SEND:
wc->opcode = IB_WC_SEND;
@@ -1591,10 +1659,8 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
break;
}
- if (npolled) {
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index,
- &to_hr_dev(ibcq->device)->cq_db_lock);
- }
+ if (npolled)
+ hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
spin_unlock_irqrestore(&hr_cq->lock, flags);
@@ -1604,6 +1670,74 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return ret;
}
+int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ unsigned long end = 0, flags = 0;
+ uint32_t bt_cmd_val[2] = {0};
+ void __iomem *bt_cmd;
+ u64 bt_ba = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
+ bt_ba = priv->bt_table.qpc_buf.map >> 12;
+ break;
+ case HEM_TYPE_MTPT:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
+ bt_ba = priv->bt_table.mtpt_buf.map >> 12;
+ break;
+ case HEM_TYPE_CQC:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
+ bt_ba = priv->bt_table.cqc_buf.map >> 12;
+ break;
+ case HEM_TYPE_SRQC:
+ dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
+ return -EINVAL;
+ default:
+ return 0;
+ }
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
+ roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
+ roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
+
+ spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+ end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
+ while (1) {
+ if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+ if (!(time_before(jiffies, end))) {
+ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
+ flags);
+ return -EBUSY;
+ }
+ } else {
+ break;
+ }
+ msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
+ }
+
+ bt_cmd_val[0] = (uint32_t)bt_ba;
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
+ hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
+
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
+
+ return 0;
+}
+
static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt,
enum hns_roce_qp_state cur_state,
@@ -1733,13 +1867,10 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
- QP1C_BYTES_16_PORT_NUM_S, hr_qp->port);
+ QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
roce_set_bit(context->qp1c_bytes_16,
QP1C_BYTES_16_SIGNALING_TYPE_S,
hr_qp->sq_signal_bits);
- roce_set_bit(context->qp1c_bytes_16,
- QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S,
- hr_qp->sq_signal_bits);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
1);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
@@ -1784,7 +1915,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
/* Copy context to QP1C register */
addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(*context));
+ hr_qp->phy_port * sizeof(*context));
writel(context->qp1c_bytes_4, addr);
writel(context->sq_rq_bt_l, addr + 1);
@@ -1795,15 +1926,16 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
writel(context->qp1c_bytes_28, addr + 6);
writel(context->qp1c_bytes_32, addr + 7);
writel(context->cur_sq_wqe_ba_l, addr + 8);
+ writel(context->qp1c_bytes_40, addr + 9);
}
/* Modify QP1C status */
reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(*context));
+ hr_qp->phy_port * sizeof(*context));
roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(*context), reg_val);
+ hr_qp->phy_port * sizeof(*context), reg_val);
hr_qp->state = new_state;
if (new_state == IB_QPS_RESET) {
@@ -1836,12 +1968,10 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_qp_context *context;
- struct hns_roce_rq_db rq_db;
dma_addr_t dma_handle_2 = 0;
dma_addr_t dma_handle = 0;
uint32_t doorbell[2] = {0};
int rq_pa_start = 0;
- u32 reg_val = 0;
u64 *mtts_2 = NULL;
int ret = -EINVAL;
u64 *mtts = NULL;
@@ -2119,7 +2249,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_68,
QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
- QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, 0);
+ QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
+ hr_qp->rq.head);
roce_set_field(context->qpc_bytes_68,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
@@ -2186,7 +2317,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->port);
+ hr_qp->phy_port);
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_SL_M,
QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
@@ -2257,20 +2388,17 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_bit(context->qpc_bytes_140,
QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
- roce_set_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
- attr->qp_state);
-
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, 0);
+ QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
+ attr->retry_cnt);
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, 0);
+ QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
+ attr->rnr_retry);
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_LSN_M,
QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
@@ -2281,10 +2409,19 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
attr->retry_cnt);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
- attr->timeout);
+ if (attr->timeout < 0x12) {
+ dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
+ attr->timeout);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
+ 0x12);
+ } else {
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
+ attr->timeout);
+ }
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
@@ -2292,7 +2429,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->port);
+ hr_qp->phy_port);
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_SL_M,
QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
@@ -2357,21 +2494,15 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
0);
- } else if ((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
+ } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
- roce_set_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
- attr->qp_state);
-
- } else {
- dev_err(dev, "not support this modify\n");
+ (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
+ dev_err(dev, "not support this status migration\n");
goto out;
}
@@ -2397,43 +2528,32 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
/* Memory barrier */
wmb();
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
- /* SW update GSI rq header */
- reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port);
- roce_set_field(reg_val,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_write(hr_dev, ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port, reg_val);
- } else {
- rq_db.u32_4 = 0;
- rq_db.u32_8 = 0;
-
- roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
- RQ_DOORBELL_U32_4_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
- RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
- RQ_DOORBELL_U32_8_CMD_S, 1);
- roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
- 1);
- doorbell[0] = rq_db.u32_4;
- doorbell[1] = rq_db.u32_8;
-
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
+ RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
+ roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
+ RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
+ roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
+ RQ_DOORBELL_U32_8_CMD_S, 1);
+ roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
+
+ if (ibqp->uobject) {
+ hr_qp->rq.db_reg_l = hr_dev->reg_base +
+ ROCEE_DB_OTHERS_L_0_REG +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
}
+
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
}
hr_qp->state = new_state;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
- if (attr_mask & IB_QP_PORT)
- hr_qp->port = (attr->port_num - 1);
+ if (attr_mask & IB_QP_PORT) {
+ hr_qp->port = attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ }
if (new_state == IB_QPS_RESET && !ibqp->uobject) {
hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
@@ -2789,6 +2909,7 @@ struct hns_roce_hw hns_roce_hw_v1 = {
.set_mtu = hns_roce_v1_set_mtu,
.write_mtpt = hns_roce_v1_write_mtpt,
.write_cqc = hns_roce_v1_write_cqc,
+ .clear_hem = hns_roce_v1_clear_hem,
.modify_qp = hns_roce_v1_modify_qp,
.query_qp = hns_roce_v1_query_qp,
.destroy_qp = hns_roce_v1_destroy_qp,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 316b592b1636..539b0a3b92b0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -102,6 +102,8 @@
#define HNS_ROCE_V1_EXT_ODB_ALFUL \
(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
+
#define HNS_ROCE_ODB_POLL_MODE 0
#define HNS_ROCE_SDB_NORMAL_MODE 0
@@ -971,9 +973,16 @@ struct hns_roce_db_table {
struct hns_roce_ext_db *ext_db;
};
+struct hns_roce_bt_table {
+ struct hns_roce_buf_list qpc_buf;
+ struct hns_roce_buf_list mtpt_buf;
+ struct hns_roce_buf_list cqc_buf;
+};
+
struct hns_roce_v1_priv {
struct hns_roce_db_table db_table;
struct hns_roce_raq_table raq_table;
+ struct hns_roce_bt_table bt_table;
};
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index f64f0dde9a88..764e35a54457 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -355,8 +355,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_qp = hr_dev->caps.num_qps;
props->max_qp_wr = hr_dev->caps.max_wqes;
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
- IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_LOCAL_DMA_LKEY;
+ IB_DEVICE_RC_RNR_NAK_GEN;
props->max_sge = hr_dev->caps.max_sq_sg;
props->max_sge_rd = 1;
props->max_cq = hr_dev->caps.num_cqs;
@@ -372,6 +371,25 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
return 0;
}
+static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
+ u8 port_num)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct net_device *ndev;
+
+ if (port_num < 1 || port_num > hr_dev->caps.num_ports)
+ return NULL;
+
+ rcu_read_lock();
+
+ ndev = hr_dev->iboe.netdevs[port_num - 1];
+ if (ndev)
+ dev_hold(ndev);
+
+ rcu_read_unlock();
+ return ndev;
+}
+
static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr *props)
{
@@ -584,6 +602,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
struct device *dev = &hr_dev->pdev->dev;
iboe = &hr_dev->iboe;
+ spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev;
strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
@@ -618,6 +637,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->query_port = hns_roce_query_port;
ib_dev->modify_port = hns_roce_modify_port;
ib_dev->get_link_layer = hns_roce_get_link_layer;
+ ib_dev->get_netdev = hns_roce_get_netdev;
ib_dev->query_gid = hns_roce_query_gid;
ib_dev->query_pkey = hns_roce_query_pkey;
ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
@@ -667,8 +687,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
goto error_failed_setup_mtu_gids;
}
- spin_lock_init(&iboe->lock);
-
iboe->nb.notifier_call = hns_roce_netdev_event;
ret = register_netdevice_notifier(&iboe->nb);
if (ret) {
@@ -777,6 +795,15 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base);
+ /* read the node_guid of IB device from the DT or ACPI */
+ ret = device_property_read_u8_array(dev, "node-guid",
+ (u8 *)&hr_dev->ib_dev.node_guid,
+ GUID_LEN);
+ if (ret) {
+ dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
+ return ret;
+ }
+
/* get the RoCE associated ethernet ports or netdevices */
for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
if (dev_of_node(dev)) {
@@ -923,7 +950,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
struct device *dev = &hr_dev->pdev->dev;
spin_lock_init(&hr_dev->sm_lock);
- spin_lock_init(&hr_dev->cq_db_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);
ret = hns_roce_init_uar_table(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 59f5e2be046b..fb87883ead34 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -564,11 +564,14 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
mr->umem->page_size);
+ ret = -EINVAL;
+ goto err_umem;
}
if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
length);
+ ret = -EINVAL;
goto err_umem;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 16271b5bd170..05db7d59812a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -35,19 +35,7 @@
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
{
- struct device *dev = &hr_dev->pdev->dev;
- unsigned long pd_number;
- int ret = 0;
-
- ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number);
- if (ret == -1) {
- dev_err(dev, "alloc pdn from pdbitmap failed\n");
- return -ENOMEM;
- }
-
- *pdn = pd_number;
-
- return 0;
+ return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
}
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
@@ -117,9 +105,15 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
if (ret == -1)
return -ENOMEM;
- uar->index = (uar->index - 1) % hr_dev->caps.phy_num_uars + 1;
+ if (uar->index > 0)
+ uar->index = (uar->index - 1) %
+ (hr_dev->caps.phy_num_uars - 1) + 1;
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
+ return -EINVAL;
+ }
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 645c18d809a5..e86dd8d06777 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -32,14 +32,14 @@
*/
#include <linux/platform_device.h>
+#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
#include "hns_roce_user.h"
-#define DB_REG_OFFSET 0x1000
-#define SQP_NUM 12
+#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
@@ -113,16 +113,8 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
int align, unsigned long *base)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- int ret = 0;
- unsigned long qpn;
-
- ret = hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, &qpn);
- if (ret == -1)
- return -ENOMEM;
-
- *base = qpn;
- return 0;
+ return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
}
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
@@ -255,7 +247,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- if (base_qpn < (hr_dev->caps.sqp_start + 2 * hr_dev->caps.num_ports))
+ if (base_qpn < SQP_NUM)
return;
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
@@ -345,12 +337,10 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
- enum ib_qp_type type,
struct hns_roce_qp *hr_qp)
{
struct device *dev = &hr_dev->pdev->dev;
u32 max_cnt;
- (void)type;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
@@ -476,7 +466,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
/* Set SQ size */
ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
- init_attr->qp_type, hr_qp);
+ hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
goto err_out;
@@ -617,21 +607,19 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return ERR_PTR(-ENOMEM);
hr_qp = &hr_sqp->hr_qp;
+ hr_qp->port = init_attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
+ hr_dev->iboe.phy_port[hr_qp->port];
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
- hr_dev->caps.sqp_start +
- hr_dev->caps.num_ports +
- init_attr->port_num - 1, hr_qp);
+ hr_qp->ibqp.qp_num, hr_qp);
if (ret) {
dev_err(dev, "Create GSI QP failed!\n");
kfree(hr_sqp);
return ERR_PTR(ret);
}
- hr_qp->port = (init_attr->port_num - 1);
- hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start +
- hr_dev->caps.num_ports +
- init_attr->port_num - 1;
break;
}
default:{
@@ -670,6 +658,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct device *dev = &hr_dev->pdev->dev;
int ret = -EINVAL;
int p;
+ enum ib_mtu active_mtu;
mutex_lock(&hr_qp->mutex);
@@ -700,6 +689,19 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
}
+ if (attr_mask & IB_QP_PATH_MTU) {
+ p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+ active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
+
+ if (attr->path_mtu > IB_MTU_2048 ||
+ attr->path_mtu < IB_MTU_256 ||
+ attr->path_mtu > active_mtu) {
+ dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
+ attr->path_mtu);
+ goto out;
+ }
+ }
+
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
@@ -782,29 +784,11 @@ static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
{
- struct ib_qp *ibqp = &hr_qp->ibqp;
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
-
- if ((n < 0) || (n > hr_qp->rq.wqe_cnt)) {
- dev_err(&hr_dev->pdev->dev, "rq wqe index:%d,rq wqe cnt:%d\r\n",
- n, hr_qp->rq.wqe_cnt);
- return NULL;
- }
-
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
}
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
{
- struct ib_qp *ibqp = &hr_qp->ibqp;
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
-
- if ((n < 0) || (n > hr_qp->sq.wqe_cnt)) {
- dev_err(&hr_dev->pdev->dev, "sq wqe index:%d,sq wqe cnt:%d\r\n",
- n, hr_qp->sq.wqe_cnt);
- return NULL;
- }
-
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
}
@@ -837,8 +821,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
/* A port include two SQP, six port total 12 */
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
- hr_dev->caps.num_qps - 1,
- hr_dev->caps.sqp_start + SQP_NUM,
+ hr_dev->caps.num_qps - 1, SQP_NUM,
reserved_from_top);
if (ret) {
dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 22174774dbb8..63036c731626 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
- resp.cache_line_size = L1_CACHE_BYTES;
+ resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 41f4c2afbcdd..7ce97daf26c6 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -52,7 +52,6 @@ enum {
enum {
MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_CACHE_LINE_SIZE = 64,
};
static const u32 mlx5_ib_opcode[] = {
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 6c00d04b8b28..c6fe89d79248 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
+ ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
new file mode 100644
index 000000000000..6c9f3923e838
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -0,0 +1,8 @@
+config INFINIBAND_QEDR
+ tristate "QLogic RoCE driver"
+ depends on 64BIT && QEDE
+ select QED_LL2
+ select QED_RDMA
+ ---help---
+ This driver provides low-level InfiniBand over Ethernet
+ support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile
new file mode 100644
index 000000000000..ba7067c77f2f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
+
+qedr-y := main.o verbs.o qedr_cm.o
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
new file mode 100644
index 000000000000..7b74d09a8217
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -0,0 +1,914 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/netdevice.h>
+#include <linux/iommu.h>
+#include <net/addrconf.h>
+#include <linux/qed/qede_roce.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+
+MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(QEDR_MODULE_VERSION);
+
+#define QEDR_WQ_MULTIPLIER_DFT (3)
+
+void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
+ enum ib_event_type type)
+{
+ struct ib_event ibev;
+
+ ibev.device = &dev->ibdev;
+ ibev.element.port_num = port_num;
+ ibev.event = type;
+
+ ib_dispatch_event(&ibev);
+}
+
+static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
+ u8 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
+ size_t str_len)
+{
+ struct qedr_dev *qedr = get_qedr_dev(ibdev);
+ u32 fw_ver = (u32)qedr->attr.fw_ver;
+
+ snprintf(str, str_len, "%d. %d. %d. %d",
+ (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
+ (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
+}
+
+static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
+{
+ struct qedr_dev *qdev;
+
+ qdev = get_qedr_dev(dev);
+ dev_hold(qdev->ndev);
+
+ /* The HW vendor's device driver must guarantee
+ * that this function returns NULL before the net device reaches
+ * NETDEV_UNREGISTER_FINAL state.
+ */
+ return qdev->ndev;
+}
+
+static int qedr_register_device(struct qedr_dev *dev)
+{
+ strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
+
+ dev->ibdev.node_guid = dev->attr.node_guid;
+ memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
+ dev->ibdev.owner = THIS_MODULE;
+ dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
+
+ dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
+ QEDR_UVERBS(QUERY_DEVICE) |
+ QEDR_UVERBS(QUERY_PORT) |
+ QEDR_UVERBS(ALLOC_PD) |
+ QEDR_UVERBS(DEALLOC_PD) |
+ QEDR_UVERBS(CREATE_COMP_CHANNEL) |
+ QEDR_UVERBS(CREATE_CQ) |
+ QEDR_UVERBS(RESIZE_CQ) |
+ QEDR_UVERBS(DESTROY_CQ) |
+ QEDR_UVERBS(REQ_NOTIFY_CQ) |
+ QEDR_UVERBS(CREATE_QP) |
+ QEDR_UVERBS(MODIFY_QP) |
+ QEDR_UVERBS(QUERY_QP) |
+ QEDR_UVERBS(DESTROY_QP) |
+ QEDR_UVERBS(REG_MR) |
+ QEDR_UVERBS(DEREG_MR) |
+ QEDR_UVERBS(POLL_CQ) |
+ QEDR_UVERBS(POST_SEND) |
+ QEDR_UVERBS(POST_RECV);
+
+ dev->ibdev.phys_port_cnt = 1;
+ dev->ibdev.num_comp_vectors = dev->num_cnq;
+ dev->ibdev.node_type = RDMA_NODE_IB_CA;
+
+ dev->ibdev.query_device = qedr_query_device;
+ dev->ibdev.query_port = qedr_query_port;
+ dev->ibdev.modify_port = qedr_modify_port;
+
+ dev->ibdev.query_gid = qedr_query_gid;
+ dev->ibdev.add_gid = qedr_add_gid;
+ dev->ibdev.del_gid = qedr_del_gid;
+
+ dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
+ dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
+ dev->ibdev.mmap = qedr_mmap;
+
+ dev->ibdev.alloc_pd = qedr_alloc_pd;
+ dev->ibdev.dealloc_pd = qedr_dealloc_pd;
+
+ dev->ibdev.create_cq = qedr_create_cq;
+ dev->ibdev.destroy_cq = qedr_destroy_cq;
+ dev->ibdev.resize_cq = qedr_resize_cq;
+ dev->ibdev.req_notify_cq = qedr_arm_cq;
+
+ dev->ibdev.create_qp = qedr_create_qp;
+ dev->ibdev.modify_qp = qedr_modify_qp;
+ dev->ibdev.query_qp = qedr_query_qp;
+ dev->ibdev.destroy_qp = qedr_destroy_qp;
+
+ dev->ibdev.query_pkey = qedr_query_pkey;
+
+ dev->ibdev.create_ah = qedr_create_ah;
+ dev->ibdev.destroy_ah = qedr_destroy_ah;
+
+ dev->ibdev.get_dma_mr = qedr_get_dma_mr;
+ dev->ibdev.dereg_mr = qedr_dereg_mr;
+ dev->ibdev.reg_user_mr = qedr_reg_user_mr;
+ dev->ibdev.alloc_mr = qedr_alloc_mr;
+ dev->ibdev.map_mr_sg = qedr_map_mr_sg;
+
+ dev->ibdev.poll_cq = qedr_poll_cq;
+ dev->ibdev.post_send = qedr_post_send;
+ dev->ibdev.post_recv = qedr_post_recv;
+
+ dev->ibdev.process_mad = qedr_process_mad;
+ dev->ibdev.get_port_immutable = qedr_port_immutable;
+ dev->ibdev.get_netdev = qedr_get_netdev;
+
+ dev->ibdev.dma_device = &dev->pdev->dev;
+
+ dev->ibdev.get_link_layer = qedr_link_layer;
+ dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
+
+ return ib_register_device(&dev->ibdev, NULL);
+}
+
+/* This function allocates fast-path status block memory */
+static int qedr_alloc_mem_sb(struct qedr_dev *dev,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = dma_alloc_coherent(&dev->pdev->dev,
+ sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
+ if (!sb_virt)
+ return -ENOMEM;
+
+ rc = dev->ops->common->sb_init(dev->cdev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_CNQ);
+ if (rc) {
+ pr_err("Status block initialization failed\n");
+ dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
+ sb_virt, sb_phys);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qedr_free_mem_sb(struct qedr_dev *dev,
+ struct qed_sb_info *sb_info, int sb_id)
+{
+ if (sb_info->sb_virt) {
+ dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
+ dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt, sb_info->sb_phys);
+ }
+}
+
+static void qedr_free_resources(struct qedr_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->num_cnq; i++) {
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+ dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+ }
+
+ kfree(dev->cnq_array);
+ kfree(dev->sb_array);
+ kfree(dev->sgid_tbl);
+}
+
+static int qedr_alloc_resources(struct qedr_dev *dev)
+{
+ struct qedr_cnq *cnq;
+ __le16 *cons_pi;
+ u16 n_entries;
+ int i, rc;
+
+ dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
+ QEDR_MAX_SGID, GFP_KERNEL);
+ if (!dev->sgid_tbl)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->sgid_lock);
+
+ /* Allocate Status blocks for CNQ */
+ dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
+ GFP_KERNEL);
+ if (!dev->sb_array) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ dev->cnq_array = kcalloc(dev->num_cnq,
+ sizeof(*dev->cnq_array), GFP_KERNEL);
+ if (!dev->cnq_array) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
+
+ /* Allocate CNQ PBLs */
+ n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
+ for (i = 0; i < dev->num_cnq; i++) {
+ cnq = &dev->cnq_array[i];
+
+ rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
+ dev->sb_start + i);
+ if (rc)
+ goto err3;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ n_entries,
+ sizeof(struct regpair *),
+ &cnq->pbl);
+ if (rc)
+ goto err4;
+
+ cnq->dev = dev;
+ cnq->sb = &dev->sb_array[i];
+ cons_pi = dev->sb_array[i].sb_virt->pi_array;
+ cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
+ cnq->index = i;
+ sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
+ i, qed_chain_get_cons_idx(&cnq->pbl));
+ }
+
+ return 0;
+err4:
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+err3:
+ for (--i; i >= 0; i--) {
+ dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+ }
+ kfree(dev->cnq_array);
+err2:
+ kfree(dev->sb_array);
+err1:
+ kfree(dev->sgid_tbl);
+ return rc;
+}
+
+/* QEDR sysfs interface */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qedr_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+}
+
+static ssize_t show_hca_type(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
+
+static struct device_attribute *qedr_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_hca_type
+};
+
+static void qedr_remove_sysfiles(struct qedr_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+ device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
+}
+
+static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
+{
+ struct pci_dev *bridge;
+ u32 val;
+
+ dev->atomic_cap = IB_ATOMIC_NONE;
+
+ bridge = pdev->bus->self;
+ if (!bridge)
+ return;
+
+ /* Check whether we are connected directly or via a switch */
+ while (bridge && bridge->bus->parent) {
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
+ bridge->bus->number, bridge->bus->primary);
+ /* Need to check Atomic Op Routing Supported all the way to
+ * root complex.
+ */
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+ if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) {
+ pcie_capability_clear_word(pdev,
+ PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ return;
+ }
+ bridge = bridge->bus->parent->self;
+ }
+ bridge = pdev->bus->self;
+
+ /* according to bridge capability */
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+ if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) {
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ dev->atomic_cap = IB_ATOMIC_GLOB;
+ } else {
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ }
+}
+
+static const struct qed_rdma_ops *qed_ops;
+
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+static irqreturn_t qedr_irq_handler(int irq, void *handle)
+{
+ u16 hw_comp_cons, sw_comp_cons;
+ struct qedr_cnq *cnq = handle;
+ struct regpair *cq_handle;
+ struct qedr_cq *cq;
+
+ qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
+
+ qed_sb_update_sb_idx(cnq->sb);
+
+ hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+ /* Align protocol-index and chain reads */
+ rmb();
+
+ while (sw_comp_cons != hw_comp_cons) {
+ cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
+ cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
+ cq_handle->lo);
+
+ if (cq == NULL) {
+ DP_ERR(cnq->dev,
+ "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
+ cq_handle->hi, cq_handle->lo, sw_comp_cons,
+ hw_comp_cons);
+
+ break;
+ }
+
+ if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
+ DP_ERR(cnq->dev,
+ "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
+ cq_handle->hi, cq_handle->lo, cq);
+ break;
+ }
+
+ cq->arm_flags = 0;
+
+ if (cq->ibcq.comp_handler)
+ (*cq->ibcq.comp_handler)
+ (&cq->ibcq, cq->ibcq.cq_context);
+
+ sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+ cnq->n_comp++;
+
+ }
+
+ qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
+ sw_comp_cons);
+
+ qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
+
+ return IRQ_HANDLED;
+}
+
+static void qedr_sync_free_irqs(struct qedr_dev *dev)
+{
+ u32 vector;
+ int i;
+
+ for (i = 0; i < dev->int_info.used_cnt; i++) {
+ if (dev->int_info.msix_cnt) {
+ vector = dev->int_info.msix[i * dev->num_hwfns].vector;
+ synchronize_irq(vector);
+ free_irq(vector, &dev->cnq_array[i]);
+ }
+ }
+
+ dev->int_info.used_cnt = 0;
+}
+
+static int qedr_req_msix_irqs(struct qedr_dev *dev)
+{
+ int i, rc = 0;
+
+ if (dev->num_cnq > dev->int_info.msix_cnt) {
+ DP_ERR(dev,
+ "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
+ dev->num_cnq, dev->int_info.msix_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->num_cnq; i++) {
+ rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
+ qedr_irq_handler, 0, dev->cnq_array[i].name,
+ &dev->cnq_array[i]);
+ if (rc) {
+ DP_ERR(dev, "Request cnq %d irq failed\n", i);
+ qedr_sync_free_irqs(dev);
+ } else {
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
+ dev->cnq_array[i].name, i,
+ &dev->cnq_array[i]);
+ dev->int_info.used_cnt++;
+ }
+ }
+
+ return rc;
+}
+
+static int qedr_setup_irqs(struct qedr_dev *dev)
+{
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
+
+ /* Learn Interrupt configuration */
+ rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
+ if (rc < 0)
+ return rc;
+
+ rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
+ if (rc) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
+ return rc;
+ }
+
+ if (dev->int_info.msix_cnt) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
+ dev->int_info.msix_cnt);
+ rc = qedr_req_msix_irqs(dev);
+ if (rc)
+ return rc;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
+
+ return 0;
+}
+
+static int qedr_set_device_attr(struct qedr_dev *dev)
+{
+ struct qed_rdma_device *qed_attr;
+ struct qedr_device_attr *attr;
+ u32 page_size;
+
+ /* Part 1 - query core capabilities */
+ qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
+
+ /* Part 2 - check capabilities */
+ page_size = ~dev->attr.page_size_caps + 1;
+ if (page_size > PAGE_SIZE) {
+ DP_ERR(dev,
+ "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
+ PAGE_SIZE, page_size);
+ return -ENODEV;
+ }
+
+ /* Part 3 - copy and update capabilities */
+ attr = &dev->attr;
+ attr->vendor_id = qed_attr->vendor_id;
+ attr->vendor_part_id = qed_attr->vendor_part_id;
+ attr->hw_ver = qed_attr->hw_ver;
+ attr->fw_ver = qed_attr->fw_ver;
+ attr->node_guid = qed_attr->node_guid;
+ attr->sys_image_guid = qed_attr->sys_image_guid;
+ attr->max_cnq = qed_attr->max_cnq;
+ attr->max_sge = qed_attr->max_sge;
+ attr->max_inline = qed_attr->max_inline;
+ attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
+ attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
+ attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
+ attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
+ attr->max_dev_resp_rd_atomic_resc =
+ qed_attr->max_dev_resp_rd_atomic_resc;
+ attr->max_cq = qed_attr->max_cq;
+ attr->max_qp = qed_attr->max_qp;
+ attr->max_mr = qed_attr->max_mr;
+ attr->max_mr_size = qed_attr->max_mr_size;
+ attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
+ attr->max_mw = qed_attr->max_mw;
+ attr->max_fmr = qed_attr->max_fmr;
+ attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
+ attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
+ attr->max_pd = qed_attr->max_pd;
+ attr->max_ah = qed_attr->max_ah;
+ attr->max_pkey = qed_attr->max_pkey;
+ attr->max_srq = qed_attr->max_srq;
+ attr->max_srq_wr = qed_attr->max_srq_wr;
+ attr->dev_caps = qed_attr->dev_caps;
+ attr->page_size_caps = qed_attr->page_size_caps;
+ attr->dev_ack_delay = qed_attr->dev_ack_delay;
+ attr->reserved_lkey = qed_attr->reserved_lkey;
+ attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
+ attr->max_stats_queues = qed_attr->max_stats_queues;
+
+ return 0;
+}
+
+void qedr_unaffiliated_event(void *context,
+ u8 event_code)
+{
+ pr_err("unaffiliated event not implemented yet\n");
+}
+
+void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
+{
+#define EVENT_TYPE_NOT_DEFINED 0
+#define EVENT_TYPE_CQ 1
+#define EVENT_TYPE_QP 2
+ struct qedr_dev *dev = (struct qedr_dev *)context;
+ union event_ring_data *data = fw_handle;
+ u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
+ data->roce_handle.lo;
+ u8 event_type = EVENT_TYPE_NOT_DEFINED;
+ struct ib_event event;
+ struct ib_cq *ibcq;
+ struct ib_qp *ibqp;
+ struct qedr_cq *cq;
+ struct qedr_qp *qp;
+
+ switch (e_code) {
+ case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
+ event.event = IB_EVENT_CQ_ERR;
+ event_type = EVENT_TYPE_CQ;
+ break;
+ case ROCE_ASYNC_EVENT_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
+ event.event = IB_EVENT_QP_FATAL;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ default:
+ DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
+ roce_handle64);
+ }
+
+ switch (event_type) {
+ case EVENT_TYPE_CQ:
+ cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
+ if (cq) {
+ ibcq = &cq->ibcq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.element.cq = ibcq;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
+ } else {
+ WARN(1,
+ "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
+ roce_handle64);
+ }
+ DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq);
+ break;
+ case EVENT_TYPE_QP:
+ qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
+ if (qp) {
+ ibqp = &qp->ibqp;
+ if (ibqp->event_handler) {
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+ } else {
+ WARN(1,
+ "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
+ roce_handle64);
+ }
+ DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp);
+ break;
+ default:
+ break;
+ }
+}
+
+static int qedr_init_hw(struct qedr_dev *dev)
+{
+ struct qed_rdma_add_user_out_params out_params;
+ struct qed_rdma_start_in_params *in_params;
+ struct qed_rdma_cnq_params *cur_pbl;
+ struct qed_rdma_events events;
+ dma_addr_t p_phys_table;
+ u32 page_cnt;
+ int rc = 0;
+ int i;
+
+ in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
+ if (!in_params) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ in_params->desired_cnq = dev->num_cnq;
+ for (i = 0; i < dev->num_cnq; i++) {
+ cur_pbl = &in_params->cnq_pbl_list[i];
+
+ page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
+ cur_pbl->num_pbl_pages = page_cnt;
+
+ p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
+ cur_pbl->pbl_ptr = (u64)p_phys_table;
+ }
+
+ events.affiliated_event = qedr_affiliated_event;
+ events.unaffiliated_event = qedr_unaffiliated_event;
+ events.context = dev;
+
+ in_params->events = &events;
+ in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
+ in_params->max_mtu = dev->ndev->mtu;
+ ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
+
+ rc = dev->ops->rdma_init(dev->cdev, in_params);
+ if (rc)
+ goto out;
+
+ rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
+ if (rc)
+ goto out;
+
+ dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
+ dev->db_phys_addr = out_params.dpi_phys_addr;
+ dev->db_size = out_params.dpi_size;
+ dev->dpi = out_params.dpi;
+
+ rc = qedr_set_device_attr(dev);
+out:
+ kfree(in_params);
+ if (rc)
+ DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
+
+ return rc;
+}
+
+void qedr_stop_hw(struct qedr_dev *dev)
+{
+ dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
+ dev->ops->rdma_stop(dev->rdma_ctx);
+}
+
+static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
+ struct net_device *ndev)
+{
+ struct qed_dev_rdma_info dev_info;
+ struct qedr_dev *dev;
+ int rc = 0, i;
+
+ dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev) {
+ pr_err("Unable to allocate ib device\n");
+ return NULL;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
+
+ dev->pdev = pdev;
+ dev->ndev = ndev;
+ dev->cdev = cdev;
+
+ qed_ops = qed_get_rdma_ops();
+ if (!qed_ops) {
+ DP_ERR(dev, "Failed to get qed roce operations\n");
+ goto init_err;
+ }
+
+ dev->ops = qed_ops;
+ rc = qed_ops->fill_dev_info(cdev, &dev_info);
+ if (rc)
+ goto init_err;
+
+ dev->num_hwfns = dev_info.common.num_hwfns;
+ dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
+
+ dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
+ if (!dev->num_cnq) {
+ DP_ERR(dev, "not enough CNQ resources.\n");
+ goto init_err;
+ }
+
+ dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
+
+ qedr_pci_set_atomic(dev, pdev);
+
+ rc = qedr_alloc_resources(dev);
+ if (rc)
+ goto init_err;
+
+ rc = qedr_init_hw(dev);
+ if (rc)
+ goto alloc_err;
+
+ rc = qedr_setup_irqs(dev);
+ if (rc)
+ goto irq_err;
+
+ rc = qedr_register_device(dev);
+ if (rc) {
+ DP_ERR(dev, "Unable to allocate register device\n");
+ goto reg_err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+ if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
+ goto sysfs_err;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
+ return dev;
+
+sysfs_err:
+ ib_unregister_device(&dev->ibdev);
+reg_err:
+ qedr_sync_free_irqs(dev);
+irq_err:
+ qedr_stop_hw(dev);
+alloc_err:
+ qedr_free_resources(dev);
+init_err:
+ ib_dealloc_device(&dev->ibdev);
+ DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
+
+ return NULL;
+}
+
+static void qedr_remove(struct qedr_dev *dev)
+{
+ /* First unregister with stack to stop all the active traffic
+ * of the registered clients.
+ */
+ qedr_remove_sysfiles(dev);
+ ib_unregister_device(&dev->ibdev);
+
+ qedr_stop_hw(dev);
+ qedr_sync_free_irqs(dev);
+ qedr_free_resources(dev);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static int qedr_close(struct qedr_dev *dev)
+{
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+
+ return 0;
+}
+
+static void qedr_shutdown(struct qedr_dev *dev)
+{
+ qedr_close(dev);
+ qedr_remove(dev);
+}
+
+static void qedr_mac_address_change(struct qedr_dev *dev)
+{
+ union ib_gid *sgid = &dev->sgid_tbl[0];
+ u8 guid[8], mac_addr[6];
+ int rc;
+
+ /* Update SGID */
+ ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
+ guid[0] = mac_addr[0] ^ 2;
+ guid[1] = mac_addr[1];
+ guid[2] = mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = mac_addr[3];
+ guid[6] = mac_addr[4];
+ guid[7] = mac_addr[5];
+ sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ memcpy(&sgid->raw[8], guid, sizeof(guid));
+
+ /* Update LL2 */
+ rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev,
+ dev->gsi_ll2_mac_address,
+ dev->ndev->dev_addr);
+
+ ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+
+ if (rc)
+ DP_ERR(dev, "Error updating mac filter\n");
+}
+
+/* event handling via NIC driver ensures that all the NIC specific
+ * initialization done before RoCE driver notifies
+ * event to stack.
+ */
+static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
+{
+ switch (event) {
+ case QEDE_UP:
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ break;
+ case QEDE_DOWN:
+ qedr_close(dev);
+ break;
+ case QEDE_CLOSE:
+ qedr_shutdown(dev);
+ break;
+ case QEDE_CHANGE_ADDR:
+ qedr_mac_address_change(dev);
+ break;
+ default:
+ pr_err("Event not supported\n");
+ }
+}
+
+static struct qedr_driver qedr_drv = {
+ .name = "qedr_driver",
+ .add = qedr_add,
+ .remove = qedr_remove,
+ .notify = qedr_notify,
+};
+
+static int __init qedr_init_module(void)
+{
+ return qede_roce_register_driver(&qedr_drv);
+}
+
+static void __exit qedr_exit_module(void)
+{
+ qede_roce_unregister_driver(&qedr_drv);
+}
+
+module_init(qedr_init_module);
+module_exit(qedr_exit_module);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
new file mode 100644
index 000000000000..620badd7d4fb
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -0,0 +1,495 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_H__
+#define __QEDR_H__
+
+#include <linux/pci.h>
+#include <rdma/ib_addr.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qede_roce.h>
+#include "qedr_hsi.h"
+
+#define QEDR_MODULE_VERSION "8.10.10.0"
+#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
+#define DP_NAME(dev) ((dev)->ibdev.name)
+
+#define DP_DEBUG(dev, module, fmt, ...) \
+ pr_debug("(%s) " module ": " fmt, \
+ DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
+
+#define QEDR_MSG_INIT "INIT"
+#define QEDR_MSG_MISC "MISC"
+#define QEDR_MSG_CQ " CQ"
+#define QEDR_MSG_MR " MR"
+#define QEDR_MSG_RQ " RQ"
+#define QEDR_MSG_SQ " SQ"
+#define QEDR_MSG_QP " QP"
+#define QEDR_MSG_GSI " GSI"
+
+#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
+
+struct qedr_dev;
+
+struct qedr_cnq {
+ struct qedr_dev *dev;
+ struct qed_chain pbl;
+ struct qed_sb_info *sb;
+ char name[32];
+ u64 n_comp;
+ __le16 *hw_cons_ptr;
+ u8 index;
+};
+
+#define QEDR_MAX_SGID 128
+
+struct qedr_device_attr {
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u64 fw_ver;
+ u64 node_guid;
+ u64 sys_image_guid;
+ u8 max_cnq;
+ u8 max_sge;
+ u16 max_inline;
+ u32 max_sqe;
+ u32 max_rqe;
+ u8 max_qp_resp_rd_atomic_resc;
+ u8 max_qp_req_rd_atomic_resc;
+ u64 max_dev_resp_rd_atomic_resc;
+ u32 max_cq;
+ u32 max_qp;
+ u32 max_mr;
+ u64 max_mr_size;
+ u32 max_cqe;
+ u32 max_mw;
+ u32 max_fmr;
+ u32 max_mr_mw_fmr_pbl;
+ u64 max_mr_mw_fmr_size;
+ u32 max_pd;
+ u32 max_ah;
+ u8 max_pkey;
+ u32 max_srq;
+ u32 max_srq_wr;
+ u8 max_srq_sge;
+ u8 max_stats_queues;
+ u32 dev_caps;
+
+ u64 page_size_caps;
+ u8 dev_ack_delay;
+ u32 reserved_lkey;
+ u32 bad_pkey_counter;
+ struct qed_rdma_events events;
+};
+
+struct qedr_dev {
+ struct ib_device ibdev;
+ struct qed_dev *cdev;
+ struct pci_dev *pdev;
+ struct net_device *ndev;
+
+ enum ib_atomic_cap atomic_cap;
+
+ void *rdma_ctx;
+ struct qedr_device_attr attr;
+
+ const struct qed_rdma_ops *ops;
+ struct qed_int_info int_info;
+
+ struct qed_sb_info *sb_array;
+ struct qedr_cnq *cnq_array;
+ int num_cnq;
+ int sb_start;
+
+ void __iomem *db_addr;
+ u64 db_phys_addr;
+ u32 db_size;
+ u16 dpi;
+
+ union ib_gid *sgid_tbl;
+
+ /* Lock for sgid table */
+ spinlock_t sgid_lock;
+
+ u64 guid;
+
+ u32 dp_module;
+ u8 dp_level;
+ u8 num_hwfns;
+ uint wq_multiplier;
+ u8 gsi_ll2_mac_address[ETH_ALEN];
+ int gsi_qp_created;
+ struct qedr_cq *gsi_sqcq;
+ struct qedr_cq *gsi_rqcq;
+ struct qedr_qp *gsi_qp;
+};
+
+#define QEDR_MAX_SQ_PBL (0x8000)
+#define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *))
+#define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge))
+#define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
+ QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+ QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\
+ (RDMA_RING_PAGE_SIZE) / \
+ (QEDR_SQE_ELEMENT_SIZE) /\
+ (QEDR_MAX_SQE_ELEMENTS_PER_SQE))
+/* RQ */
+#define QEDR_MAX_RQ_PBL (0x2000)
+#define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *))
+#define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge))
+#define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+ QEDR_RQE_ELEMENT_SIZE)
+#define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\
+ (RDMA_RING_PAGE_SIZE) / \
+ (QEDR_RQE_ELEMENT_SIZE) /\
+ (QEDR_MAX_RQE_ELEMENTS_PER_RQE))
+
+#define QEDR_CQE_SIZE (sizeof(union rdma_cqe))
+#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
+#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
+ sizeof(u64)) - 1)
+#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
+ (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
+
+#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
+
+#define QEDR_MAX_PORT (1)
+
+#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+
+#define QEDR_ROCE_PKEY_MAX 1
+#define QEDR_ROCE_PKEY_TABLE_LEN 1
+#define QEDR_ROCE_PKEY_DEFAULT 0xffff
+
+struct qedr_pbl {
+ struct list_head list_entry;
+ void *va;
+ dma_addr_t pa;
+};
+
+struct qedr_ucontext {
+ struct ib_ucontext ibucontext;
+ struct qedr_dev *dev;
+ struct qedr_pd *pd;
+ u64 dpi_addr;
+ u64 dpi_phys_addr;
+ u32 dpi_size;
+ u16 dpi;
+
+ struct list_head mm_head;
+
+ /* Lock to protect mm list */
+ struct mutex mm_list_lock;
+};
+
+union db_prod64 {
+ struct rdma_pwm_val32_data data;
+ u64 raw;
+};
+
+enum qedr_cq_type {
+ QEDR_CQ_TYPE_GSI,
+ QEDR_CQ_TYPE_KERNEL,
+ QEDR_CQ_TYPE_USER,
+};
+
+struct qedr_pbl_info {
+ u32 num_pbls;
+ u32 num_pbes;
+ u32 pbl_size;
+ u32 pbe_size;
+ bool two_layered;
+};
+
+struct qedr_userq {
+ struct ib_umem *umem;
+ struct qedr_pbl_info pbl_info;
+ struct qedr_pbl *pbl_tbl;
+ u64 buf_addr;
+ size_t buf_len;
+};
+
+struct qedr_cq {
+ struct ib_cq ibcq;
+
+ enum qedr_cq_type cq_type;
+ u32 sig;
+
+ u16 icid;
+
+ /* Lock to protect completion handler */
+ spinlock_t comp_handler_lock;
+
+ /* Lock to protect multiplem CQ's */
+ spinlock_t cq_lock;
+ u8 arm_flags;
+ struct qed_chain pbl;
+
+ void __iomem *db_addr;
+ union db_prod64 db;
+
+ u8 pbl_toggle;
+ union rdma_cqe *latest_cqe;
+ union rdma_cqe *toggle_cqe;
+
+ u32 cq_cons;
+
+ struct qedr_userq q;
+};
+
+struct qedr_pd {
+ struct ib_pd ibpd;
+ u32 pd_id;
+ struct qedr_ucontext *uctx;
+};
+
+struct qedr_mm {
+ struct {
+ u64 phy_addr;
+ unsigned long len;
+ } key;
+ struct list_head entry;
+};
+
+union db_prod32 {
+ struct rdma_pwm_val16_data data;
+ u32 raw;
+};
+
+struct qedr_qp_hwq_info {
+ /* WQE Elements */
+ struct qed_chain pbl;
+ u64 p_phys_addr_tbl;
+ u32 max_sges;
+
+ /* WQE */
+ u16 prod;
+ u16 cons;
+ u16 wqe_cons;
+ u16 gsi_cons;
+ u16 max_wr;
+
+ /* DB */
+ void __iomem *db;
+ union db_prod32 db_data;
+};
+
+#define QEDR_INC_SW_IDX(p_info, index) \
+ do { \
+ p_info->index = (p_info->index + 1) & \
+ qed_chain_get_capacity(p_info->pbl) \
+ } while (0)
+
+enum qedr_qp_err_bitmap {
+ QEDR_QP_ERR_SQ_FULL = 1,
+ QEDR_QP_ERR_RQ_FULL = 2,
+ QEDR_QP_ERR_BAD_SR = 4,
+ QEDR_QP_ERR_BAD_RR = 8,
+ QEDR_QP_ERR_SQ_PBL_FULL = 16,
+ QEDR_QP_ERR_RQ_PBL_FULL = 32,
+};
+
+struct qedr_qp {
+ struct ib_qp ibqp; /* must be first */
+ struct qedr_dev *dev;
+
+ struct qedr_qp_hwq_info sq;
+ struct qedr_qp_hwq_info rq;
+
+ u32 max_inline_data;
+
+ /* Lock for QP's */
+ spinlock_t q_lock;
+ struct qedr_cq *sq_cq;
+ struct qedr_cq *rq_cq;
+ struct qedr_srq *srq;
+ enum qed_roce_qp_state state;
+ u32 id;
+ struct qedr_pd *pd;
+ enum ib_qp_type qp_type;
+ struct qed_rdma_qp *qed_qp;
+ u32 qp_id;
+ u16 icid;
+ u16 mtu;
+ int sgid_idx;
+ u32 rq_psn;
+ u32 sq_psn;
+ u32 qkey;
+ u32 dest_qp_num;
+
+ /* Relevant to qps created from kernel space only (ULPs) */
+ u8 prev_wqe_size;
+ u16 wqe_cons;
+ u32 err_bitmap;
+ bool signaled;
+
+ /* SQ shadow */
+ struct {
+ u64 wr_id;
+ enum ib_wc_opcode opcode;
+ u32 bytes_len;
+ u8 wqe_size;
+ bool signaled;
+ dma_addr_t icrc_mapping;
+ u32 *icrc;
+ struct qedr_mr *mr;
+ } *wqe_wr_id;
+
+ /* RQ shadow */
+ struct {
+ u64 wr_id;
+ struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
+ u8 wqe_size;
+
+ u8 smac[ETH_ALEN];
+ u16 vlan_id;
+ int rc;
+ } *rqe_wr_id;
+
+ /* Relevant to qps created from user space only (applications) */
+ struct qedr_userq usq;
+ struct qedr_userq urq;
+};
+
+struct qedr_ah {
+ struct ib_ah ibah;
+ struct ib_ah_attr attr;
+};
+
+enum qedr_mr_type {
+ QEDR_MR_USER,
+ QEDR_MR_KERNEL,
+ QEDR_MR_DMA,
+ QEDR_MR_FRMR,
+};
+
+struct mr_info {
+ struct qedr_pbl *pbl_table;
+ struct qedr_pbl_info pbl_info;
+ struct list_head free_pbl_list;
+ struct list_head inuse_pbl_list;
+ u32 completed;
+ u32 completed_handled;
+};
+
+struct qedr_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+
+ struct qed_rdma_register_tid_in_params hw_mr;
+ enum qedr_mr_type type;
+
+ struct qedr_dev *dev;
+ struct mr_info info;
+
+ u64 *pages;
+ u32 npages;
+};
+
+#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
+
+#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
+ RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
+#define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
+ RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
+#define QEDR_RESP_RDMA_IMM (QEDR_RESP_IMM | QEDR_RESP_RDMA)
+
+static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
+{
+ info->cons = (info->cons + 1) % info->max_wr;
+ info->wqe_cons++;
+}
+
+static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
+{
+ info->prod = (info->prod + 1) % info->max_wr;
+}
+
+static inline int qedr_get_dmac(struct qedr_dev *dev,
+ struct ib_ah_attr *ah_attr, u8 *mac_addr)
+{
+ union ib_gid zero_sgid = { { 0 } };
+ struct in6_addr in6;
+
+ if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
+ DP_ERR(dev, "Local port GID not supported\n");
+ eth_zero_addr(mac_addr);
+ return -EINVAL;
+ }
+
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ ether_addr_copy(mac_addr, ah_attr->dmac);
+
+ return 0;
+}
+
+static inline
+struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct qedr_ucontext, ibucontext);
+}
+
+static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct qedr_dev, ibdev);
+}
+
+static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct qedr_pd, ibpd);
+}
+
+static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct qedr_cq, ibcq);
+}
+
+static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct qedr_qp, ibqp);
+}
+
+static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct qedr_ah, ibah);
+}
+
+static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct qedr_mr, ibmr);
+}
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
new file mode 100644
index 000000000000..63890ebb72bd
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -0,0 +1,622 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include "qedr_hsi.h"
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qedr.h"
+#include "qedr_hsi.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_hsi.h"
+#include "qedr_cm.h"
+
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
+{
+ info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
+}
+
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ dev->gsi_qp_created = 1;
+ dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
+ dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
+ dev->gsi_qp = qp;
+}
+
+void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
+{
+ struct qedr_dev *dev = (struct qedr_dev *)_qdev;
+ struct qedr_cq *cq = dev->gsi_sqcq;
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
+ dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
+ cq->ibcq.comp_handler ? "Yes" : "No");
+
+ dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
+ pkt->header.baddr);
+ kfree(pkt);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+ qedr_inc_sw_gsi_cons(&qp->sq);
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (cq->ibcq.comp_handler) {
+ spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+ spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+ }
+}
+
+void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_rx_params *params)
+{
+ struct qedr_dev *dev = (struct qedr_dev *)_dev;
+ struct qedr_cq *cq = dev->gsi_rqcq;
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc;
+ qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id;
+ qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len;
+ ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac);
+
+ qedr_inc_sw_gsi_cons(&qp->rq);
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (cq->ibcq.comp_handler) {
+ spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+ spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+ }
+}
+
+static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ struct qed_rdma_destroy_cq_in_params iparams;
+ struct qed_rdma_destroy_cq_out_params oparams;
+ struct qedr_cq *cq;
+
+ cq = get_qedr_cq(attrs->send_cq);
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+
+ cq = get_qedr_cq(attrs->recv_cq);
+ /* if a dedicated recv_cq was used, delete it too */
+ if (iparams.icid != cq->icid) {
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
+}
+
+static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
+ attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
+ attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_send_wr is too large %d>%d\n",
+ attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp)
+{
+ struct qed_roce_ll2_params ll2_params;
+ int rc;
+
+ rc = qedr_check_gsi_qp_attrs(dev, attrs);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /* configure and start LL2 */
+ memset(&ll2_params, 0, sizeof(ll2_params));
+ ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
+ ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
+ ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
+ ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
+ ll2_params.cb_cookie = (void *)dev;
+ ll2_params.mtu = dev->ndev->mtu;
+ ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
+ rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
+ if (rc) {
+ DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
+ return ERR_PTR(rc);
+ }
+
+ /* create QP */
+ qp->ibqp.qp_num = 1;
+ qp->rq.max_wr = attrs->cap.max_recv_wr;
+ qp->sq.max_wr = attrs->cap.max_send_wr;
+
+ qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->rqe_wr_id)
+ goto err;
+ qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->wqe_wr_id)
+ goto err;
+
+ qedr_store_gsi_qp_cq(dev, qp, attrs);
+ ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+ /* the GSI CQ is handled by the driver so remove it from the FW */
+ qedr_destroy_gsi_cq(dev, attrs);
+ dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+ dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+
+ DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
+
+ return &qp->ibqp;
+
+err:
+ kfree(qp->rqe_wr_id);
+
+ rc = dev->ops->roce_ll2_stop(dev->cdev);
+ if (rc)
+ DP_ERR(dev, "create gsi qp: failed destroy on create\n");
+
+ return ERR_PTR(-ENOMEM);
+}
+
+int qedr_destroy_gsi_qp(struct qedr_dev *dev)
+{
+ int rc;
+
+ rc = dev->ops->roce_ll2_stop(dev->cdev);
+ if (rc)
+ DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
+ else
+ DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
+
+ return rc;
+}
+
+#define QEDR_MAX_UD_HEADER_SIZE (100)
+#define QEDR_GSI_QPN (1)
+static inline int qedr_gsi_build_header(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_send_wr *swr,
+ struct ib_ud_header *udh,
+ int *roce_mode)
+{
+ bool has_vlan = false, has_grh_ipv6 = true;
+ struct ib_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
+ struct ib_global_route *grh = &ah_attr->grh;
+ union ib_gid sgid;
+ int send_size = 0;
+ u16 vlan_id = 0;
+ u16 ether_type;
+ struct ib_gid_attr sgid_attr;
+ int rc;
+ int ip_ver = 0;
+
+ bool has_udp = false;
+ int i;
+
+ send_size = 0;
+ for (i = 0; i < swr->num_sge; ++i)
+ send_size += swr->sg_list[i].length;
+
+ rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num,
+ grh->sgid_index, &sgid, &sgid_attr);
+ if (rc) {
+ DP_ERR(dev,
+ "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
+ ah_attr->port_num, grh->sgid_index);
+ return rc;
+ }
+
+ vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+ if (vlan_id < VLAN_CFI_MASK)
+ has_vlan = true;
+ if (sgid_attr.ndev)
+ dev_put(sgid_attr.ndev);
+
+ if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
+ DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
+ ah_attr->grh.sgid_index);
+ return -ENOENT;
+ }
+
+ has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
+ if (!has_udp) {
+ /* RoCE v1 */
+ ether_type = ETH_P_ROCE;
+ *roce_mode = ROCE_V1;
+ } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+ /* RoCE v2 IPv4 */
+ ip_ver = 4;
+ ether_type = ETH_P_IP;
+ has_grh_ipv6 = false;
+ *roce_mode = ROCE_V2_IPV4;
+ } else {
+ /* RoCE v2 IPv6 */
+ ip_ver = 6;
+ ether_type = ETH_P_IPV6;
+ *roce_mode = ROCE_V2_IPV6;
+ }
+
+ rc = ib_ud_header_init(send_size, false, true, has_vlan,
+ has_grh_ipv6, ip_ver, has_udp, 0, udh);
+ if (rc) {
+ DP_ERR(dev, "gsi post send: failed to init header\n");
+ return rc;
+ }
+
+ /* ENET + VLAN headers */
+ ether_addr_copy(udh->eth.dmac_h, ah_attr->dmac);
+ ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
+ if (has_vlan) {
+ udh->eth.type = htons(ETH_P_8021Q);
+ udh->vlan.tag = htons(vlan_id);
+ udh->vlan.type = htons(ether_type);
+ } else {
+ udh->eth.type = htons(ether_type);
+ }
+
+ /* BTH */
+ udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
+ udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
+ udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
+ udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
+ udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+
+ /* DETH */
+ udh->deth.qkey = htonl(0x80010000);
+ udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
+
+ if (has_grh_ipv6) {
+ /* GRH / IPv6 header */
+ udh->grh.traffic_class = grh->traffic_class;
+ udh->grh.flow_label = grh->flow_label;
+ udh->grh.hop_limit = grh->hop_limit;
+ udh->grh.destination_gid = grh->dgid;
+ memcpy(&udh->grh.source_gid.raw, &sgid.raw,
+ sizeof(udh->grh.source_gid.raw));
+ } else {
+ /* IPv4 header */
+ u32 ipv4_addr;
+
+ udh->ip4.protocol = IPPROTO_UDP;
+ udh->ip4.tos = htonl(ah_attr->grh.flow_label);
+ udh->ip4.frag_off = htons(IP_DF);
+ udh->ip4.ttl = ah_attr->grh.hop_limit;
+
+ ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
+ udh->ip4.saddr = ipv4_addr;
+ ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw);
+ udh->ip4.daddr = ipv4_addr;
+ /* note: checksum is calculated by the device */
+ }
+
+ /* UDP */
+ if (has_udp) {
+ udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
+ udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
+ udh->udp.csum = 0;
+ /* UDP length is untouched hence is zero */
+ }
+ return 0;
+}
+
+static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_send_wr *swr,
+ struct qed_roce_ll2_packet **p_packet)
+{
+ u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
+ struct qed_roce_ll2_packet *packet;
+ struct pci_dev *pdev = dev->pdev;
+ int roce_mode, header_size;
+ struct ib_ud_header udh;
+ int i, rc;
+
+ *p_packet = NULL;
+
+ rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
+ if (rc)
+ return rc;
+
+ header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
+
+ packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+ if (!packet)
+ return -ENOMEM;
+
+ packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
+ &packet->header.baddr,
+ GFP_ATOMIC);
+ if (!packet->header.vaddr) {
+ kfree(packet);
+ return -ENOMEM;
+ }
+
+ if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
+ packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+ else
+ packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+
+ packet->roce_mode = roce_mode;
+ memcpy(packet->header.vaddr, ud_header_buffer, header_size);
+ packet->header.len = header_size;
+ packet->n_seg = swr->num_sge;
+ for (i = 0; i < packet->n_seg; i++) {
+ packet->payload[i].baddr = swr->sg_list[i].addr;
+ packet->payload[i].len = swr->sg_list[i].length;
+ }
+
+ *p_packet = packet;
+
+ return 0;
+}
+
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qed_roce_ll2_packet *pkt = NULL;
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_roce_ll2_tx_params params;
+ struct qedr_dev *dev = qp->dev;
+ unsigned long flags;
+ int rc;
+
+ if (qp->state != QED_ROCE_QP_STATE_RTS) {
+ *bad_wr = wr;
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
+ qp->state);
+ return -EINVAL;
+ }
+
+ if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
+ DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
+ wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (wr->opcode != IB_WR_SEND) {
+ DP_ERR(dev,
+ "gsi post send: failed due to unsupported opcode %d\n",
+ wr->opcode);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ memset(&params, 0, sizeof(params));
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
+ if (rc) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ goto err;
+ }
+
+ rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, &params);
+ if (!rc) {
+ qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+ qedr_inc_sw_prod(&qp->sq);
+ DP_DEBUG(qp->dev, QEDR_MSG_GSI,
+ "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
+ wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
+ } else {
+ if (rc == QED_ROCE_TX_HEAD_FAILURE) {
+ /* TX failed while posting header - release resources */
+ dma_free_coherent(&dev->pdev->dev, pkt->header.len,
+ pkt->header.vaddr, pkt->header.baddr);
+ kfree(pkt);
+ } else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
+ /* NTD since TX failed while posting a fragment. We will
+ * release the resources on TX callback
+ */
+ }
+
+ DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
+ rc = -EAGAIN;
+ *bad_wr = wr;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (wr->next) {
+ DP_ERR(dev,
+ "gsi post send: failed second WR. Only one WR may be passed at a time\n");
+ *bad_wr = wr->next;
+ rc = -EINVAL;
+ }
+
+ return rc;
+
+err:
+ *bad_wr = wr;
+ return rc;
+}
+
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_roce_ll2_buffer buf;
+ unsigned long flags;
+ int status = 0;
+ int rc;
+
+ if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
+ (qp->state != QED_ROCE_QP_STATE_RTS)) {
+ *bad_wr = wr;
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
+ qp->state);
+ return -EINVAL;
+ }
+
+ memset(&buf, 0, sizeof(buf));
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ while (wr) {
+ if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
+ wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
+ goto err;
+ }
+
+ buf.baddr = wr->sg_list[0].addr;
+ buf.len = wr->sg_list[0].length;
+
+ rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1);
+ if (rc) {
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer (rc=%d)\n",
+ rc);
+ goto err;
+ }
+
+ memset(&qp->rqe_wr_id[qp->rq.prod], 0,
+ sizeof(qp->rqe_wr_id[qp->rq.prod]));
+ qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
+ qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+
+ qedr_inc_sw_prod(&qp->rq);
+
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return status;
+err:
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ return -ENOMEM;
+}
+
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+ int i = 0;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
+ while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
+ memset(&wc[i], 0, sizeof(*wc));
+
+ wc[i].qp = &qp->ibqp;
+ wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+ wc[i].opcode = IB_WC_RECV;
+ wc[i].pkey_index = 0;
+ wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
+ IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
+ /* 0 - currently only one recv sg is supported */
+ wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
+ wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
+ ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
+ wc[i].wc_flags |= IB_WC_WITH_SMAC;
+ if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+ wc[i].wc_flags |= IB_WC_WITH_VLAN;
+ wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+ }
+
+ qedr_inc_sw_cons(&qp->rq);
+ i++;
+ }
+
+ while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
+ memset(&wc[i], 0, sizeof(*wc));
+
+ wc[i].qp = &qp->ibqp;
+ wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+ wc[i].opcode = IB_WC_SEND;
+ wc[i].status = IB_WC_SUCCESS;
+
+ qedr_inc_sw_cons(&qp->sq);
+ i++;
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
+ num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
+ qp->sq.gsi_cons, qp->ibqp.qp_num);
+
+ return i;
+}
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h
new file mode 100644
index 000000000000..9ba6e15cd93f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_cm.h
@@ -0,0 +1,61 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef LINUX_QEDR_CM_H_
+#define LINUX_QEDR_CM_H_
+
+#define QEDR_GSI_MAX_RECV_WR (4096)
+#define QEDR_GSI_MAX_SEND_WR (4096)
+
+#define QEDR_GSI_MAX_RECV_SGE (1) /* LL2 FW limitation */
+
+#define ETH_P_ROCE (0x8915)
+#define QEDR_ROCE_V2_UDP_SPORT (0000)
+
+static inline u32 qedr_get_ipv4_from_gid(u8 *gid)
+{
+ return *(u32 *)(void *)&gid[12];
+}
+
+/* RDMA CM */
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp);
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
+int qedr_destroy_gsi_qp(struct qedr_dev *dev);
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info);
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h
new file mode 100644
index 000000000000..66d27521373f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_hsi.h
@@ -0,0 +1,56 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_ROCE__
+#define __QED_HSI_ROCE__
+
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/roce_common.h>
+#include "qedr_hsi_rdma.h"
+
+/* Affiliated asynchronous events / errors enumeration */
+enum roce_async_events_type {
+ ROCE_ASYNC_EVENT_NONE = 0,
+ ROCE_ASYNC_EVENT_COMM_EST = 1,
+ ROCE_ASYNC_EVENT_SQ_DRAINED,
+ ROCE_ASYNC_EVENT_SRQ_LIMIT,
+ ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
+ ROCE_ASYNC_EVENT_CQ_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
+ ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
+ ROCE_ASYNC_EVENT_SRQ_EMPTY,
+ MAX_ROCE_ASYNC_EVENTS_TYPE
+};
+
+#endif /* __QED_HSI_ROCE__ */
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
new file mode 100644
index 000000000000..5c98d2055cad
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -0,0 +1,748 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_RDMA__
+#define __QED_HSI_RDMA__
+
+#include <linux/qed/rdma_common.h>
+
+/* rdma completion notification queue element */
+struct rdma_cnqe {
+ struct regpair cq_handle;
+};
+
+struct rdma_cqe_responder {
+ struct regpair srq_wr_id;
+ struct regpair qp_handle;
+ __le32 imm_data_or_inv_r_Key;
+ __le32 length;
+ __le32 imm_data_hi;
+ __le16 rq_cons;
+ u8 flags;
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_RESPONDER_TYPE_MASK 0x3
+#define RDMA_CQE_RESPONDER_TYPE_SHIFT 1
+#define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3
+#define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4
+#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5
+#define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3
+#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6
+ u8 status;
+};
+
+struct rdma_cqe_requester {
+ __le16 sq_cons;
+ __le16 reserved0;
+ __le32 reserved1;
+ struct regpair qp_handle;
+ struct regpair reserved2;
+ __le32 reserved3;
+ __le16 reserved4;
+ u8 flags;
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_REQUESTER_TYPE_MASK 0x3
+#define RDMA_CQE_REQUESTER_TYPE_SHIFT 1
+#define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F
+#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3
+ u8 status;
+};
+
+struct rdma_cqe_common {
+ struct regpair reserved0;
+ struct regpair qp_handle;
+ __le16 reserved1[7];
+ u8 flags;
+#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_COMMON_TYPE_MASK 0x3
+#define RDMA_CQE_COMMON_TYPE_SHIFT 1
+#define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F
+#define RDMA_CQE_COMMON_RESERVED2_SHIFT 3
+ u8 status;
+};
+
+/* rdma completion queue element */
+union rdma_cqe {
+ struct rdma_cqe_responder resp;
+ struct rdma_cqe_requester req;
+ struct rdma_cqe_common cmn;
+};
+
+/* * CQE requester status enumeration */
+enum rdma_cqe_requester_status_enum {
+ RDMA_CQE_REQ_STS_OK,
+ RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
+ RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
+ RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
+ RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
+ MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
+};
+
+/* CQE responder status enumeration */
+enum rdma_cqe_responder_status_enum {
+ RDMA_CQE_RESP_STS_OK,
+ RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
+ RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
+ RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
+ RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
+ MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
+};
+
+/* CQE type enumeration */
+enum rdma_cqe_type {
+ RDMA_CQE_TYPE_REQUESTER,
+ RDMA_CQE_TYPE_RESPONDER_RQ,
+ RDMA_CQE_TYPE_RESPONDER_SRQ,
+ RDMA_CQE_TYPE_INVALID,
+ MAX_RDMA_CQE_TYPE
+};
+
+struct rdma_sq_sge {
+ __le32 length;
+ struct regpair addr;
+ __le32 l_key;
+};
+
+struct rdma_rq_sge {
+ struct regpair addr;
+ __le32 length;
+ __le32 flags;
+#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF
+#define RDMA_RQ_SGE_L_KEY_SHIFT 0
+#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7
+#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26
+#define RDMA_RQ_SGE_RESERVED0_MASK 0x7
+#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
+};
+
+struct rdma_srq_sge {
+ struct regpair addr;
+ __le32 length;
+ __le32 l_key;
+};
+
+/* Rdma doorbell data for SQ and RQ */
+struct rdma_pwm_val16_data {
+ __le16 icid;
+ __le16 value;
+};
+
+union rdma_pwm_val16_data_union {
+ struct rdma_pwm_val16_data as_struct;
+ __le32 as_dword;
+};
+
+/* Rdma doorbell data for CQ */
+struct rdma_pwm_val32_data {
+ __le16 icid;
+ u8 agg_flags;
+ u8 params;
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
+ __le32 value;
+};
+
+/* DIF Block size options */
+enum rdma_dif_block_size {
+ RDMA_DIF_BLOCK_512 = 0,
+ RDMA_DIF_BLOCK_4096 = 1,
+ MAX_RDMA_DIF_BLOCK_SIZE
+};
+
+/* DIF CRC initial value */
+enum rdma_dif_crc_seed {
+ RDMA_DIF_CRC_SEED_0000 = 0,
+ RDMA_DIF_CRC_SEED_FFFF = 1,
+ MAX_RDMA_DIF_CRC_SEED
+};
+
+/* RDMA DIF Error Result Structure */
+struct rdma_dif_error_result {
+ __le32 error_intervals;
+ __le32 dif_error_1st_interval;
+ u8 flags;
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT 0
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK 0xF
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT 3
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT 7
+ u8 reserved1[55];
+};
+
+/* DIF IO direction */
+enum rdma_dif_io_direction_flg {
+ RDMA_DIF_DIR_RX = 0,
+ RDMA_DIF_DIR_TX = 1,
+ MAX_RDMA_DIF_IO_DIRECTION_FLG
+};
+
+/* RDMA DIF Runt Result Structure */
+struct rdma_dif_runt_result {
+ __le16 guard_tag;
+ __le16 reserved[3];
+};
+
+/* Memory window type enumeration */
+enum rdma_mw_type {
+ RDMA_MW_TYPE_1,
+ RDMA_MW_TYPE_2A,
+ MAX_RDMA_MW_TYPE
+};
+
+struct rdma_sq_atomic_wqe {
+ __le32 reserved1;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ struct regpair remote_va;
+ __le32 r_key;
+ __le32 reserved2;
+ struct regpair cmp_data;
+ struct regpair swap_data;
+};
+
+/* First element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_1st {
+ __le32 reserved1;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_2nd {
+ struct regpair remote_va;
+ __le32 r_key;
+ __le32 reserved2;
+};
+
+/* Third element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_3rd {
+ struct regpair cmp_data;
+ struct regpair swap_data;
+};
+
+struct rdma_sq_bind_wqe {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7
+#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
+#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F
+#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2
+ u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_BIND_WQE_RESERVED2_MASK 0x7
+#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ __le32 parent_l_key;
+ __le32 reserved4;
+};
+
+/* First element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_1st {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_2nd {
+ u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2
+ u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK 0x7
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ __le32 parent_l_key;
+ __le32 reserved4;
+};
+
+/* Structure with only the SQ WQE common
+ * fields. Size is of one SQ element (16B)
+ */
+struct rdma_sq_common_wqe {
+ __le32 reserved1[3];
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK 0x7
+#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_fmr_wqe {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT 5
+#define RDMA_SQ_FMR_WQE_BIND_EN_MASK 0x1
+#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT 6
+#define RDMA_SQ_FMR_WQE_RESERVED1_MASK 0x1
+#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT 7
+ u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_FMR_WQE_RESERVED2_MASK 0x7
+#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ struct regpair pbl_addr;
+ __le32 dif_base_ref_tag;
+ __le16 dif_app_tag;
+ __le16 dif_app_tag_mask;
+ __le16 dif_runt_crc_value;
+ __le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
+ __le32 Reserved5;
+};
+
+/* First element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_1st {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK 0x3
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_2nd {
+ u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT 5
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT 6
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT 7
+ u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK 0x7
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ struct regpair pbl_addr;
+};
+
+/* Third element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_3rd {
+ __le32 dif_base_ref_tag;
+ __le16 dif_app_tag;
+ __le16 dif_app_tag_mask;
+ __le16 dif_runt_crc_value;
+ __le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
+ __le32 Reserved5;
+};
+
+struct rdma_sq_local_inv_wqe {
+ struct regpair reserved;
+ __le32 inv_l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_rdma_wqe {
+ __le32 imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ struct regpair remote_va;
+ __le32 r_key;
+ u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F
+#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3
+ u8 reserved2[3];
+};
+
+/* First element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_1st {
+ __le32 imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x3
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_2nd {
+ struct regpair remote_va;
+ __le32 r_key;
+ u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK 0x1F
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT 3
+ u8 reserved2[3];
+};
+
+/* SQ WQE req type enumeration */
+enum rdma_sq_req_type {
+ RDMA_SQ_REQ_TYPE_SEND,
+ RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
+ RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
+ RDMA_SQ_REQ_TYPE_RDMA_WR,
+ RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
+ RDMA_SQ_REQ_TYPE_RDMA_RD,
+ RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
+ RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
+ RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
+ RDMA_SQ_REQ_TYPE_FAST_MR,
+ RDMA_SQ_REQ_TYPE_BIND,
+ RDMA_SQ_REQ_TYPE_INVALID,
+ MAX_RDMA_SQ_REQ_TYPE
+};
+
+struct rdma_sq_send_wqe {
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_SEND_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ __le32 reserved1[4];
+};
+
+struct rdma_sq_send_wqe_1st {
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_send_wqe_2st {
+ __le32 reserved1[4];
+};
+
+#endif /* __QED_HSI_RDMA__ */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
new file mode 100644
index 000000000000..a61514296767
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -0,0 +1,3547 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <linux/iommu.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include "qedr_hsi.h"
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_cm.h"
+
+#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+
+int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+ if (index > QEDR_ROCE_PKEY_TABLE_LEN)
+ return -EINVAL;
+
+ *pkey = QEDR_ROCE_PKEY_DEFAULT;
+ return 0;
+}
+
+int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *sgid)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ int rc = 0;
+
+ if (!rdma_cap_roce_gid_table(ibdev, port))
+ return -ENODEV;
+
+ rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
+ if (rc == -EAGAIN) {
+ memcpy(sgid, &zgid, sizeof(*sgid));
+ return 0;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
+ sgid->global.interface_id, sgid->global.subnet_prefix);
+
+ return rc;
+}
+
+int qedr_add_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context)
+{
+ if (!rdma_cap_roce_gid_table(device, port_num))
+ return -EINVAL;
+
+ if (port_num > QEDR_MAX_PORT)
+ return -EINVAL;
+
+ if (!context)
+ return -EINVAL;
+
+ return 0;
+}
+
+int qedr_del_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, void **context)
+{
+ if (!rdma_cap_roce_gid_table(device, port_num))
+ return -EINVAL;
+
+ if (port_num > QEDR_MAX_PORT)
+ return -EINVAL;
+
+ if (!context)
+ return -EINVAL;
+
+ return 0;
+}
+
+int qedr_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev,
+ "qedr_query_device called with invalid params rdma_ctx=%p\n",
+ dev->rdma_ctx);
+ return -EINVAL;
+ }
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->fw_ver = qattr->fw_ver;
+ attr->sys_image_guid = qattr->sys_image_guid;
+ attr->max_mr_size = qattr->max_mr_size;
+ attr->page_size_cap = qattr->page_size_caps;
+ attr->vendor_id = qattr->vendor_id;
+ attr->vendor_part_id = qattr->vendor_part_id;
+ attr->hw_ver = qattr->hw_ver;
+ attr->max_qp = qattr->max_qp;
+ attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
+ attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
+ IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
+
+ attr->max_sge = qattr->max_sge;
+ attr->max_sge_rd = qattr->max_sge;
+ attr->max_cq = qattr->max_cq;
+ attr->max_cqe = qattr->max_cqe;
+ attr->max_mr = qattr->max_mr;
+ attr->max_mw = qattr->max_mw;
+ attr->max_pd = qattr->max_pd;
+ attr->atomic_cap = dev->atomic_cap;
+ attr->max_fmr = qattr->max_fmr;
+ attr->max_map_per_fmr = 16;
+ attr->max_qp_init_rd_atom =
+ 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
+ attr->max_qp_rd_atom =
+ min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
+ attr->max_qp_init_rd_atom);
+
+ attr->max_srq = qattr->max_srq;
+ attr->max_srq_sge = qattr->max_srq_sge;
+ attr->max_srq_wr = qattr->max_srq_wr;
+
+ attr->local_ca_ack_delay = qattr->dev_ack_delay;
+ attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
+ attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
+ attr->max_ah = qattr->max_ah;
+
+ return 0;
+}
+
+#define QEDR_SPEED_SDR (1)
+#define QEDR_SPEED_DDR (2)
+#define QEDR_SPEED_QDR (4)
+#define QEDR_SPEED_FDR10 (8)
+#define QEDR_SPEED_FDR (16)
+#define QEDR_SPEED_EDR (32)
+
+static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+ u8 *ib_width)
+{
+ switch (speed) {
+ case 1000:
+ *ib_speed = QEDR_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+ case 10000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 20000:
+ *ib_speed = QEDR_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 25000:
+ *ib_speed = QEDR_SPEED_EDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 40000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 50000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 100000:
+ *ib_speed = QEDR_SPEED_EDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+ *ib_speed = QEDR_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ }
+}
+
+int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
+{
+ struct qedr_dev *dev;
+ struct qed_rdma_port *rdma_port;
+
+ dev = get_qedr_dev(ibdev);
+ if (port > 1) {
+ DP_ERR(dev, "invalid_port=0x%x\n", port);
+ return -EINVAL;
+ }
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev, "rdma_ctx is NULL\n");
+ return -EINVAL;
+ }
+
+ rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
+ memset(attr, 0, sizeof(*attr));
+
+ if (rdma_port->port_state == QED_RDMA_PORT_UP) {
+ attr->state = IB_PORT_ACTIVE;
+ attr->phys_state = 5;
+ } else {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = 3;
+ }
+ attr->max_mtu = IB_MTU_4096;
+ attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
+ attr->lid = 0;
+ attr->lmc = 0;
+ attr->sm_lid = 0;
+ attr->sm_sl = 0;
+ attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
+ attr->gid_tbl_len = QEDR_MAX_SGID;
+ attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+ attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
+ attr->qkey_viol_cntr = 0;
+ get_link_speed_and_width(rdma_port->link_speed,
+ &attr->active_speed, &attr->active_width);
+ attr->max_msg_sz = rdma_port->max_msg_size;
+ attr->max_vl_num = 4;
+
+ return 0;
+}
+
+int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
+ struct ib_port_modify *props)
+{
+ struct qedr_dev *dev;
+
+ dev = get_qedr_dev(ibdev);
+ if (port > 1) {
+ DP_ERR(dev, "invalid_port=0x%x\n", port);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+ unsigned long len)
+{
+ struct qedr_mm *mm;
+
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+ if (!mm)
+ return -ENOMEM;
+
+ mm->key.phy_addr = phy_addr;
+ /* This function might be called with a length which is not a multiple
+ * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
+ * forces this granularity by increasing the requested size if needed.
+ * When qedr_mmap is called, it will search the list with the updated
+ * length as a key. To prevent search failures, the length is rounded up
+ * in advance to PAGE_SIZE.
+ */
+ mm->key.len = roundup(len, PAGE_SIZE);
+ INIT_LIST_HEAD(&mm->entry);
+
+ mutex_lock(&uctx->mm_list_lock);
+ list_add(&mm->entry, &uctx->mm_head);
+ mutex_unlock(&uctx->mm_list_lock);
+
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+ (unsigned long long)mm->key.phy_addr,
+ (unsigned long)mm->key.len, uctx);
+
+ return 0;
+}
+
+static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+ unsigned long len)
+{
+ bool found = false;
+ struct qedr_mm *mm;
+
+ mutex_lock(&uctx->mm_list_lock);
+ list_for_each_entry(mm, &uctx->mm_head, entry) {
+ if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+ continue;
+
+ found = true;
+ break;
+ }
+ mutex_unlock(&uctx->mm_list_lock);
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
+ mm->key.phy_addr, mm->key.len, uctx, found);
+
+ return found;
+}
+
+struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ int rc;
+ struct qedr_ucontext *ctx;
+ struct qedr_alloc_ucontext_resp uresp;
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qed_rdma_add_user_out_params oparams;
+
+ if (!udata)
+ return ERR_PTR(-EFAULT);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
+ if (rc) {
+ DP_ERR(dev,
+ "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
+ rc);
+ goto err;
+ }
+
+ ctx->dpi = oparams.dpi;
+ ctx->dpi_addr = oparams.dpi_addr;
+ ctx->dpi_phys_addr = oparams.dpi_phys_addr;
+ ctx->dpi_size = oparams.dpi_size;
+ INIT_LIST_HEAD(&ctx->mm_head);
+ mutex_init(&ctx->mm_list_lock);
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.db_pa = ctx->dpi_phys_addr;
+ uresp.db_size = ctx->dpi_size;
+ uresp.max_send_wr = dev->attr.max_sqe;
+ uresp.max_recv_wr = dev->attr.max_rqe;
+ uresp.max_srq_wr = dev->attr.max_srq_wr;
+ uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+ uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
+ uresp.max_cqes = QEDR_MAX_CQES;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ goto err;
+
+ ctx->dev = dev;
+
+ rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
+ if (rc)
+ goto err;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
+ &ctx->ibucontext);
+ return &ctx->ibucontext;
+
+err:
+ kfree(ctx);
+ return ERR_PTR(rc);
+}
+
+int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
+ struct qedr_mm *mm, *tmp;
+ int status = 0;
+
+ DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
+ uctx);
+ uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
+
+ list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+ mm->key.phy_addr, mm->key.len, uctx);
+ list_del(&mm->entry);
+ kfree(mm);
+ }
+
+ kfree(uctx);
+ return status;
+}
+
+int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
+ struct qedr_dev *dev = get_qedr_dev(context->device);
+ unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
+ u64 unmapped_db = dev->db_phys_addr;
+ unsigned long len = (vma->vm_end - vma->vm_start);
+ int rc = 0;
+ bool found;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
+ vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
+ if (vma->vm_start & (PAGE_SIZE - 1)) {
+ DP_ERR(dev, "Vma_start not page aligned = %ld\n",
+ vma->vm_start);
+ return -EINVAL;
+ }
+
+ found = qedr_search_mmap(ucontext, vm_page, len);
+ if (!found) {
+ DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
+ vma->vm_pgoff);
+ return -EINVAL;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+
+ if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
+ dev->db_size))) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+ if (vma->vm_flags & VM_READ) {
+ DP_ERR(dev, "Trying to map doorbell bar for read\n");
+ return -EPERM;
+ }
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ PAGE_SIZE, vma->vm_page_prot);
+ } else {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
+ rc = remap_pfn_range(vma, vma->vm_start,
+ vma->vm_pgoff, len, vma->vm_page_prot);
+ }
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
+ return rc;
+}
+
+struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qedr_ucontext *uctx = NULL;
+ struct qedr_alloc_pd_uresp uresp;
+ struct qedr_pd *pd;
+ u16 pd_id;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
+ (udata && context) ? "User Lib" : "Kernel");
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev, "invlaid RDMA context\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+
+ uresp.pd_id = pd_id;
+ pd->pd_id = pd_id;
+
+ if (udata && context) {
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
+ uctx = get_qedr_ucontext(context);
+ uctx->pd = pd;
+ pd->uctx = uctx;
+ }
+
+ return &pd->ibpd;
+}
+
+int qedr_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+
+ if (!pd)
+ pr_err("Invalid PD received in dealloc_pd\n");
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
+
+ kfree(pd);
+
+ return 0;
+}
+
+static void qedr_free_pbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int i;
+
+ for (i = 0; i < pbl_info->num_pbls; i++) {
+ if (!pbl[i].va)
+ continue;
+ dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+ pbl[i].va, pbl[i].pa);
+ }
+
+ kfree(pbl);
+}
+
+#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
+#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
+
+#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
+#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
+#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
+
+static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info,
+ gfp_t flags)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct qedr_pbl *pbl_table;
+ dma_addr_t *pbl_main_tbl;
+ dma_addr_t pa;
+ void *va;
+ int i;
+
+ pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
+ if (!pbl_table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < pbl_info->num_pbls; i++) {
+ va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
+ &pa, flags);
+ if (!va)
+ goto err;
+
+ memset(va, 0, pbl_info->pbl_size);
+ pbl_table[i].va = va;
+ pbl_table[i].pa = pa;
+ }
+
+ /* Two-Layer PBLs, if we have more than one pbl we need to initialize
+ * the first one with physical pointers to all of the rest
+ */
+ pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
+ for (i = 0; i < pbl_info->num_pbls - 1; i++)
+ pbl_main_tbl[i] = pbl_table[i + 1].pa;
+
+ return pbl_table;
+
+err:
+ for (i--; i >= 0; i--)
+ dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+ pbl_table[i].va, pbl_table[i].pa);
+
+ qedr_free_pbl(dev, pbl_info, pbl_table);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info,
+ u32 num_pbes, int two_layer_capable)
+{
+ u32 pbl_capacity;
+ u32 pbl_size;
+ u32 num_pbls;
+
+ if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
+ if (num_pbes > MAX_PBES_TWO_LAYER) {
+ DP_ERR(dev, "prepare pbl table: too many pages %d\n",
+ num_pbes);
+ return -EINVAL;
+ }
+
+ /* calculate required pbl page size */
+ pbl_size = MIN_FW_PBL_PAGE_SIZE;
+ pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
+ NUM_PBES_ON_PAGE(pbl_size);
+
+ while (pbl_capacity < num_pbes) {
+ pbl_size *= 2;
+ pbl_capacity = pbl_size / sizeof(u64);
+ pbl_capacity = pbl_capacity * pbl_capacity;
+ }
+
+ num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
+ num_pbls++; /* One for the layer0 ( points to the pbls) */
+ pbl_info->two_layered = true;
+ } else {
+ /* One layered PBL */
+ num_pbls = 1;
+ pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
+ roundup_pow_of_two((num_pbes * sizeof(u64))));
+ pbl_info->two_layered = false;
+ }
+
+ pbl_info->num_pbls = num_pbls;
+ pbl_info->pbl_size = pbl_size;
+ pbl_info->num_pbes = num_pbes;
+
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
+ pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
+
+ return 0;
+}
+
+static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
+ struct qedr_pbl *pbl,
+ struct qedr_pbl_info *pbl_info)
+{
+ int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+ struct qedr_pbl *pbl_tbl;
+ struct scatterlist *sg;
+ struct regpair *pbe;
+ int entry;
+ u32 addr;
+
+ if (!pbl_info->num_pbes)
+ return;
+
+ /* If we have a two layered pbl, the first pbl points to the rest
+ * of the pbls and the first entry lays on the second pbl in the table
+ */
+ if (pbl_info->two_layered)
+ pbl_tbl = &pbl[1];
+ else
+ pbl_tbl = pbl;
+
+ pbe = (struct regpair *)pbl_tbl->va;
+ if (!pbe) {
+ DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
+ return;
+ }
+
+ pbe_cnt = 0;
+
+ shift = ilog2(umem->page_size);
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ pages = sg_dma_len(sg) >> shift;
+ for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
+ /* store the page address in pbe */
+ pbe->lo = cpu_to_le32(sg_dma_address(sg) +
+ umem->page_size * pg_cnt);
+ addr = upper_32_bits(sg_dma_address(sg) +
+ umem->page_size * pg_cnt);
+ pbe->hi = cpu_to_le32(addr);
+ pbe_cnt++;
+ total_num_pbes++;
+ pbe++;
+
+ if (total_num_pbes == pbl_info->num_pbes)
+ return;
+
+ /* If the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct regpair *)pbl_tbl->va;
+ pbe_cnt = 0;
+ }
+ }
+ }
+}
+
+static int qedr_copy_cq_uresp(struct qedr_dev *dev,
+ struct qedr_cq *cq, struct ib_udata *udata)
+{
+ struct qedr_create_cq_uresp uresp;
+ int rc;
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ uresp.icid = cq->icid;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
+
+ return rc;
+}
+
+static void consume_cqe(struct qedr_cq *cq)
+{
+ if (cq->latest_cqe == cq->toggle_cqe)
+ cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+
+ cq->latest_cqe = qed_chain_consume(&cq->pbl);
+}
+
+static inline int qedr_align_cq_entries(int entries)
+{
+ u64 size, aligned_size;
+
+ /* We allocate an extra entry that we don't report to the FW. */
+ size = (entries + 1) * QEDR_CQE_SIZE;
+ aligned_size = ALIGN(size, PAGE_SIZE);
+
+ return aligned_size / QEDR_CQE_SIZE;
+}
+
+static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
+ struct qedr_dev *dev,
+ struct qedr_userq *q,
+ u64 buf_addr, size_t buf_len,
+ int access, int dmasync)
+{
+ int page_cnt;
+ int rc;
+
+ q->buf_addr = buf_addr;
+ q->buf_len = buf_len;
+ q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
+ if (IS_ERR(q->umem)) {
+ DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
+ PTR_ERR(q->umem));
+ return PTR_ERR(q->umem);
+ }
+
+ page_cnt = ib_umem_page_count(q->umem);
+ rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
+ if (rc)
+ goto err0;
+
+ q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(q->pbl_tbl))
+ goto err0;
+
+ qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
+
+ return 0;
+
+err0:
+ ib_umem_release(q->umem);
+
+ return rc;
+}
+
+static inline void qedr_init_cq_params(struct qedr_cq *cq,
+ struct qedr_ucontext *ctx,
+ struct qedr_dev *dev, int vector,
+ int chain_entries, int page_cnt,
+ u64 pbl_ptr,
+ struct qed_rdma_create_cq_in_params
+ *params)
+{
+ memset(params, 0, sizeof(*params));
+ params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
+ params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
+ params->cnq_id = vector;
+ params->cq_size = chain_entries - 1;
+ params->dpi = (ctx) ? ctx->dpi : dev->dpi;
+ params->pbl_num_pages = page_cnt;
+ params->pbl_ptr = pbl_ptr;
+ params->pbl_two_level = 0;
+}
+
+static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
+{
+ /* Flush data before signalling doorbell */
+ wmb();
+ cq->db.data.agg_flags = flags;
+ cq->db.data.value = cpu_to_le32(cons);
+ writeq(cq->db.raw, cq->db_addr);
+
+ /* Make sure write would stick */
+ mmiowb();
+}
+
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ unsigned long sflags;
+
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ return 0;
+
+ spin_lock_irqsave(&cq->cq_lock, sflags);
+
+ cq->arm_flags = 0;
+
+ if (flags & IB_CQ_SOLICITED)
+ cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
+
+ if (flags & IB_CQ_NEXT_COMP)
+ cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
+
+ doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+ spin_unlock_irqrestore(&cq->cq_lock, sflags);
+
+ return 0;
+}
+
+struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *ib_ctx, struct ib_udata *udata)
+{
+ struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
+ struct qed_rdma_destroy_cq_out_params destroy_oparams;
+ struct qed_rdma_destroy_cq_in_params destroy_iparams;
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qed_rdma_create_cq_in_params params;
+ struct qedr_create_cq_ureq ureq;
+ int vector = attr->comp_vector;
+ int entries = attr->cqe;
+ struct qedr_cq *cq;
+ int chain_entries;
+ int page_cnt;
+ u64 pbl_ptr;
+ u16 icid;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "create_cq: called from %s. entries=%d, vector=%d\n",
+ udata ? "User Lib" : "Kernel", entries, vector);
+
+ if (entries > QEDR_MAX_CQES) {
+ DP_ERR(dev,
+ "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
+ entries, QEDR_MAX_CQES);
+ return ERR_PTR(-EINVAL);
+ }
+
+ chain_entries = qedr_align_cq_entries(entries);
+ chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return ERR_PTR(-ENOMEM);
+
+ if (udata) {
+ memset(&ureq, 0, sizeof(ureq));
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ DP_ERR(dev,
+ "create cq: problem copying data from user space\n");
+ goto err0;
+ }
+
+ if (!ureq.len) {
+ DP_ERR(dev,
+ "create cq: cannot create a cq with 0 entries\n");
+ goto err0;
+ }
+
+ cq->cq_type = QEDR_CQ_TYPE_USER;
+
+ rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
+ ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
+ if (rc)
+ goto err0;
+
+ pbl_ptr = cq->q.pbl_tbl->pa;
+ page_cnt = cq->q.pbl_info.num_pbes;
+ } else {
+ cq->cq_type = QEDR_CQ_TYPE_KERNEL;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ chain_entries,
+ sizeof(union rdma_cqe),
+ &cq->pbl);
+ if (rc)
+ goto err1;
+
+ page_cnt = qed_chain_get_page_cnt(&cq->pbl);
+ pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+ }
+
+ qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
+ pbl_ptr, &params);
+
+ rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
+ if (rc)
+ goto err2;
+
+ cq->icid = icid;
+ cq->sig = QEDR_CQ_MAGIC_NUMBER;
+ spin_lock_init(&cq->cq_lock);
+
+ if (ib_ctx) {
+ rc = qedr_copy_cq_uresp(dev, cq, udata);
+ if (rc)
+ goto err3;
+ } else {
+ /* Generate doorbell address. */
+ cq->db_addr = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ cq->db.data.icid = cq->icid;
+ cq->db.data.params = DB_AGG_CMD_SET <<
+ RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
+
+ /* point to the very last element, passing it we will toggle */
+ cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
+ cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+ cq->latest_cqe = NULL;
+ consume_cqe(cq);
+ cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
+ cq->icid, cq, params.cq_size);
+
+ return &cq->ibcq;
+
+err3:
+ destroy_iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
+ &destroy_oparams);
+err2:
+ if (udata)
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+ else
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+err1:
+ if (udata)
+ ib_umem_release(cq->q.umem);
+err0:
+ kfree(cq);
+ return ERR_PTR(-EINVAL);
+}
+
+int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+ DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
+
+ return 0;
+}
+
+int qedr_destroy_cq(struct ib_cq *ibcq)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qed_rdma_destroy_cq_out_params oparams;
+ struct qed_rdma_destroy_cq_in_params iparams;
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+ DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
+
+ /* GSIs CQs are handled by driver, so they don't exist in the FW */
+ if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
+
+ if (ibcq->uobject && ibcq->uobject->context) {
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+ ib_umem_release(cq->q.umem);
+ }
+
+ kfree(cq);
+
+ return 0;
+}
+
+static inline int get_gid_info_from_table(struct ib_qp *ibqp,
+ struct ib_qp_attr *attr,
+ int attr_mask,
+ struct qed_rdma_modify_qp_in_params
+ *qp_params)
+{
+ enum rdma_network_type nw_type;
+ struct ib_gid_attr gid_attr;
+ union ib_gid gid;
+ u32 ipv4_addr;
+ int rc = 0;
+ int i;
+
+ rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
+ attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
+ if (rc)
+ return rc;
+
+ if (!memcmp(&gid, &zgid, sizeof(gid)))
+ return -ENOENT;
+
+ if (gid_attr.ndev) {
+ qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
+
+ dev_put(gid_attr.ndev);
+ nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
+ switch (nw_type) {
+ case RDMA_NETWORK_IPV6:
+ memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ sizeof(qp_params->sgid));
+ memcpy(&qp_params->dgid.bytes[0],
+ &attr->ah_attr.grh.dgid,
+ sizeof(qp_params->dgid));
+ qp_params->roce_mode = ROCE_V2_IPV6;
+ SET_FIELD(qp_params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+ break;
+ case RDMA_NETWORK_IB:
+ memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ sizeof(qp_params->sgid));
+ memcpy(&qp_params->dgid.bytes[0],
+ &attr->ah_attr.grh.dgid,
+ sizeof(qp_params->dgid));
+ qp_params->roce_mode = ROCE_V1;
+ break;
+ case RDMA_NETWORK_IPV4:
+ memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
+ memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
+ ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
+ qp_params->sgid.ipv4_addr = ipv4_addr;
+ ipv4_addr =
+ qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
+ qp_params->dgid.ipv4_addr = ipv4_addr;
+ SET_FIELD(qp_params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+ qp_params->roce_mode = ROCE_V2_IPV4;
+ break;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
+ qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
+ }
+
+ if (qp_params->vlan_id >= VLAN_CFI_MASK)
+ qp_params->vlan_id = 0;
+
+ return 0;
+}
+
+static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
+ ib_umem_release(qp->usq.umem);
+}
+
+static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
+ ib_umem_release(qp->urq.umem);
+}
+
+static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+ kfree(qp->wqe_wr_id);
+}
+
+static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+ kfree(qp->rqe_wr_id);
+}
+
+static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ /* QP0... attrs->qp_type == IB_QPT_GSI */
+ if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: unsupported qp type=0x%x requested\n",
+ attrs->qp_type);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_wr > qattr->max_sqe) {
+ DP_ERR(dev,
+ "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
+ attrs->cap.max_send_wr, qattr->max_sqe);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_inline_data > qattr->max_inline) {
+ DP_ERR(dev,
+ "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
+ attrs->cap.max_inline_data, qattr->max_inline);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
+ attrs->cap.max_send_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_recv_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
+ attrs->cap.max_recv_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ /* Unprivileged user space cannot create special QP */
+ if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+ DP_ERR(dev,
+ "create qp: userspace can't create special QPs of type=0x%x\n",
+ attrs->qp_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
+{
+ uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ uresp->rq_icid = qp->icid;
+}
+
+static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
+{
+ uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ uresp->sq_icid = qp->icid + 1;
+}
+
+static int qedr_copy_qp_uresp(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_udata *udata)
+{
+ struct qedr_create_qp_uresp uresp;
+ int rc;
+
+ memset(&uresp, 0, sizeof(uresp));
+ qedr_copy_sq_uresp(&uresp, qp);
+ qedr_copy_rq_uresp(&uresp, qp);
+
+ uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
+ uresp.qp_id = qp->qp_id;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev,
+ "create qp: failed a copy to user space with qp icid=0x%x.\n",
+ qp->icid);
+
+ return rc;
+}
+
+static void qedr_set_qp_init_params(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qedr_pd *pd,
+ struct ib_qp_init_attr *attrs)
+{
+ qp->pd = pd;
+
+ spin_lock_init(&qp->q_lock);
+
+ qp->qp_type = attrs->qp_type;
+ qp->max_inline_data = attrs->cap.max_inline_data;
+ qp->sq.max_sges = attrs->cap.max_send_sge;
+ qp->state = QED_ROCE_QP_STATE_RESET;
+ qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+ qp->sq_cq = get_qedr_cq(attrs->send_cq);
+ qp->rq_cq = get_qedr_cq(attrs->recv_cq);
+ qp->dev = dev;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
+ pd->pd_id, qp->qp_type, qp->max_inline_data,
+ qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
+ qp->sq.max_sges, qp->sq_cq->icid);
+ qp->rq.max_sges = attrs->cap.max_recv_sge;
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
+ qp->rq.max_sges, qp->rq_cq->icid);
+}
+
+static inline void
+qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
+ struct qedr_create_qp_ureq *ureq)
+{
+ /* QP handle to be written in CQE */
+ params->qp_handle_lo = ureq->qp_handle_lo;
+ params->qp_handle_hi = ureq->qp_handle_hi;
+}
+
+static inline void
+qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qp->sq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ qp->sq.db_data.data.icid = qp->icid + 1;
+}
+
+static inline void
+qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qp->rq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ qp->rq.db_data.data.icid = qp->icid;
+}
+
+static inline int
+qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
+{
+ /* Allocate driver internal RQ array */
+ qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->rqe_wr_id)
+ return -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
+
+ return 0;
+}
+
+static inline int
+qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ u32 temp_max_wr;
+
+ /* Allocate driver internal SQ array */
+ temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
+ temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
+
+ /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
+ qp->sq.max_wr = (u16)temp_max_wr;
+ qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->wqe_wr_id)
+ return -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
+
+ /* QP handle to be written in CQE */
+ params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
+ params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
+
+ return 0;
+}
+
+static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ u32 n_sq_elems, n_sq_entries;
+ int rc;
+
+ /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
+ * the ring. The ring should allow at least a single WR, even if the
+ * user requested none, due to allocation issues.
+ */
+ n_sq_entries = attrs->cap.max_send_wr;
+ n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
+ n_sq_entries = max_t(u32, n_sq_entries, 1);
+ n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_sq_elems,
+ QEDR_SQE_ELEMENT_SIZE,
+ &qp->sq.pbl);
+ if (rc) {
+ DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
+ return rc;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_SQ,
+ "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
+ qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
+ n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
+ return 0;
+}
+
+static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ u32 n_rq_elems, n_rq_entries;
+ int rc;
+
+ /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
+ * the ring. There ring should allow at least a single WR, even if the
+ * user requested none, due to allocation issues.
+ */
+ n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
+ n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_rq_elems,
+ QEDR_RQE_ELEMENT_SIZE,
+ &qp->rq.pbl);
+
+ if (rc) {
+ DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
+ return -ENOMEM;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_RQ,
+ "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
+ qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
+ n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
+
+ /* n_rq_entries < u16 so the casting is safe */
+ qp->rq.max_wr = (u16)n_rq_entries;
+
+ return 0;
+}
+
+static inline void
+qedr_init_qp_in_params_sq(struct qedr_dev *dev,
+ struct qedr_pd *pd,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ /* QP handle to be written in an async event */
+ params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
+ params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
+
+ params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
+ params->fmr_and_reserved_lkey = !udata;
+ params->pd = pd->pd_id;
+ params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
+ params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
+ params->max_sq_sges = 0;
+ params->stats_queue = 0;
+
+ if (udata) {
+ params->sq_num_pages = qp->usq.pbl_info.num_pbes;
+ params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
+ } else {
+ params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
+ params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
+ }
+}
+
+static inline void
+qedr_init_qp_in_params_rq(struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+ params->srq_id = 0;
+ params->use_srq = false;
+
+ if (udata) {
+ params->rq_num_pages = qp->urq.pbl_info.num_pbes;
+ params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+ } else {
+ params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
+ params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
+ }
+}
+
+static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
+ qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
+ qp->urq.buf_len);
+}
+
+static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
+ struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qedr_create_qp_ureq *ureq)
+{
+ int rc;
+
+ /* SQ - read access only (0), dma sync not required (0) */
+ rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
+ ureq->sq_len, 0, 0);
+ if (rc)
+ return rc;
+
+ /* RQ - read access only (0), dma sync not required (0) */
+ rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
+ ureq->rq_len, 0, 0);
+
+ if (rc)
+ qedr_cleanup_user_sq(dev, qp);
+ return rc;
+}
+
+static inline int
+qedr_init_kernel_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ int rc;
+
+ rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
+ if (rc) {
+ DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
+ if (rc) {
+ dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+ DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
+ if (rc) {
+ qedr_cleanup_kernel_sq(dev, qp);
+ DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
+ if (rc) {
+ DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
+ qedr_cleanup_kernel_sq(dev, qp);
+ dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+ return rc;
+ }
+
+ return rc;
+}
+
+struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qed_rdma_create_qp_out_params out_params;
+ struct qed_rdma_create_qp_in_params in_params;
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct ib_ucontext *ib_ctx = NULL;
+ struct qedr_ucontext *ctx = NULL;
+ struct qedr_create_qp_ureq ureq;
+ struct qedr_qp *qp;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
+ udata ? "user library" : "kernel", pd);
+
+ rc = qedr_check_qp_attrs(ibpd, dev, attrs);
+ if (rc)
+ return ERR_PTR(rc);
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ if (attrs->srq)
+ return ERR_PTR(-EINVAL);
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
+ get_qedr_cq(attrs->send_cq),
+ get_qedr_cq(attrs->send_cq)->icid,
+ get_qedr_cq(attrs->recv_cq),
+ get_qedr_cq(attrs->recv_cq)->icid);
+
+ qedr_set_qp_init_params(dev, qp, pd, attrs);
+
+ if (attrs->qp_type == IB_QPT_GSI) {
+ if (udata) {
+ DP_ERR(dev,
+ "create qp: unexpected udata when creating GSI QP\n");
+ goto err0;
+ }
+ return qedr_create_gsi_qp(dev, attrs, qp);
+ }
+
+ memset(&in_params, 0, sizeof(in_params));
+
+ if (udata) {
+ if (!(udata && ibpd->uobject && ibpd->uobject->context))
+ goto err0;
+
+ ib_ctx = ibpd->uobject->context;
+ ctx = get_qedr_ucontext(ib_ctx);
+
+ memset(&ureq, 0, sizeof(ureq));
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ DP_ERR(dev,
+ "create qp: problem copying data from user space\n");
+ goto err0;
+ }
+
+ rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
+ if (rc)
+ goto err0;
+
+ qedr_init_qp_user_params(&in_params, &ureq);
+ } else {
+ rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
+ if (rc)
+ goto err0;
+ }
+
+ qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
+ qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
+
+ qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+ &in_params, &out_params);
+
+ if (!qp->qed_qp)
+ goto err1;
+
+ qp->qp_id = out_params.qp_id;
+ qp->icid = out_params.icid;
+ qp->ibqp.qp_num = qp->qp_id;
+
+ if (udata) {
+ rc = qedr_copy_qp_uresp(dev, qp, udata);
+ if (rc)
+ goto err2;
+
+ qedr_qp_user_print(dev, qp);
+ } else {
+ qedr_init_qp_kernel_doorbell_sq(dev, qp);
+ qedr_init_qp_kernel_doorbell_rq(dev, qp);
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
+ udata ? "user" : "kernel", qp);
+
+ return &qp->ibqp;
+
+err2:
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
+err1:
+ if (udata) {
+ qedr_cleanup_user_sq(dev, qp);
+ qedr_cleanup_user_rq(dev, qp);
+ } else {
+ qedr_cleanup_kernel_sq(dev, qp);
+ qedr_cleanup_kernel_rq(dev, qp);
+ }
+
+err0:
+ kfree(qp);
+
+ return ERR_PTR(-EFAULT);
+}
+
+enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+{
+ switch (qp_state) {
+ case QED_ROCE_QP_STATE_RESET:
+ return IB_QPS_RESET;
+ case QED_ROCE_QP_STATE_INIT:
+ return IB_QPS_INIT;
+ case QED_ROCE_QP_STATE_RTR:
+ return IB_QPS_RTR;
+ case QED_ROCE_QP_STATE_RTS:
+ return IB_QPS_RTS;
+ case QED_ROCE_QP_STATE_SQD:
+ return IB_QPS_SQD;
+ case QED_ROCE_QP_STATE_ERR:
+ return IB_QPS_ERR;
+ case QED_ROCE_QP_STATE_SQE:
+ return IB_QPS_SQE;
+ }
+ return IB_QPS_ERR;
+}
+
+enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+{
+ switch (qp_state) {
+ case IB_QPS_RESET:
+ return QED_ROCE_QP_STATE_RESET;
+ case IB_QPS_INIT:
+ return QED_ROCE_QP_STATE_INIT;
+ case IB_QPS_RTR:
+ return QED_ROCE_QP_STATE_RTR;
+ case IB_QPS_RTS:
+ return QED_ROCE_QP_STATE_RTS;
+ case IB_QPS_SQD:
+ return QED_ROCE_QP_STATE_SQD;
+ case IB_QPS_ERR:
+ return QED_ROCE_QP_STATE_ERR;
+ default:
+ return QED_ROCE_QP_STATE_ERR;
+ }
+}
+
+static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
+{
+ qed_chain_reset(&qph->pbl);
+ qph->prod = 0;
+ qph->cons = 0;
+ qph->wqe_cons = 0;
+ qph->db_data.data.value = cpu_to_le16(0);
+}
+
+static int qedr_update_qp_state(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ enum qed_roce_qp_state new_state)
+{
+ int status = 0;
+
+ if (new_state == qp->state)
+ return 1;
+
+ switch (qp->state) {
+ case QED_ROCE_QP_STATE_RESET:
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_INIT:
+ qp->prev_wqe_size = 0;
+ qedr_reset_qp_hwq_info(&qp->sq);
+ qedr_reset_qp_hwq_info(&qp->rq);
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_INIT:
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTR:
+ /* Update doorbell (in case post_recv was
+ * done before move to RTR)
+ */
+ wmb();
+ writel(qp->rq.db_data.raw, qp->rq.db);
+ /* Make sure write takes effect */
+ mmiowb();
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_RTR:
+ /* RTR->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTS:
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_RTS:
+ /* RTS->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_SQD:
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_SQD:
+ /* SQD->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTS:
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ /* ERR->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RESET:
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+
+ return status;
+}
+
+int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_rdma_modify_qp_in_params qp_params = { 0 };
+ struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
+ enum ib_qp_state old_qp_state, new_qp_state;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
+ attr->qp_state);
+
+ old_qp_state = qedr_get_ibqp_state(qp->state);
+ if (attr_mask & IB_QP_STATE)
+ new_qp_state = attr->qp_state;
+ else
+ new_qp_state = old_qp_state;
+
+ if (!ib_modify_qp_is_ok
+ (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
+ DP_ERR(dev,
+ "modify qp: invalid attribute mask=0x%x specified for\n"
+ "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
+ attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
+ new_qp_state);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* Translate the masks... */
+ if (attr_mask & IB_QP_STATE) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
+ qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
+ }
+
+ if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+ qp_params.sqd_async = true;
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
+ if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
+ qp_params.incoming_rdma_read_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_READ;
+ qp_params.incoming_rdma_write_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE;
+ qp_params.incoming_atomic_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_ATOMIC;
+ }
+
+ if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+ if (attr_mask & IB_QP_PATH_MTU) {
+ if (attr->path_mtu < IB_MTU_256 ||
+ attr->path_mtu > IB_MTU_4096) {
+ pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
+ rc = -EINVAL;
+ goto err;
+ }
+ qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
+ ib_mtu_enum_to_int(iboe_get_mtu
+ (dev->ndev->mtu)));
+ }
+
+ if (!qp->mtu) {
+ qp->mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+ pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
+
+ qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
+ qp_params.flow_label = attr->ah_attr.grh.flow_label;
+ qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
+
+ qp->sgid_idx = attr->ah_attr.grh.sgid_index;
+
+ rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
+ if (rc) {
+ DP_ERR(dev,
+ "modify qp: problems with GID index %d (rc=%d)\n",
+ attr->ah_attr.grh.sgid_index, rc);
+ return rc;
+ }
+
+ rc = qedr_get_dmac(dev, &attr->ah_attr,
+ qp_params.remote_mac_addr);
+ if (rc)
+ return rc;
+
+ qp_params.use_local_mac = true;
+ ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
+ qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
+ qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
+ DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
+ qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
+ qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
+ DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
+ qp_params.remote_mac_addr);
+;
+
+ qp_params.mtu = qp->mtu;
+ qp_params.lb_indication = false;
+ }
+
+ if (!qp_params.mtu) {
+ /* Stay with current MTU */
+ if (qp->mtu)
+ qp_params.mtu = qp->mtu;
+ else
+ qp_params.mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+ }
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
+
+ qp_params.ack_timeout = attr->timeout;
+ if (attr->timeout) {
+ u32 temp;
+
+ temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
+ /* FW requires [msec] */
+ qp_params.ack_timeout = temp;
+ } else {
+ /* Infinite */
+ qp_params.ack_timeout = 0;
+ }
+ }
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
+ qp_params.retry_cnt = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
+ qp_params.rnr_retry_cnt = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
+ qp_params.rq_psn = attr->rq_psn;
+ qp->rq_psn = attr->rq_psn;
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
+ rc = -EINVAL;
+ DP_ERR(dev,
+ "unsupported max_rd_atomic=%d, supported=%d\n",
+ attr->max_rd_atomic,
+ dev->attr.max_qp_req_rd_atomic_resc);
+ goto err;
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
+ qp_params.max_rd_atomic_req = attr->max_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
+ qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
+ }
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
+ qp_params.sq_psn = attr->sq_psn;
+ qp->sq_psn = attr->sq_psn;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attr->max_dest_rd_atomic >
+ dev->attr.max_qp_resp_rd_atomic_resc) {
+ DP_ERR(dev,
+ "unsupported max_dest_rd_atomic=%d, supported=%d\n",
+ attr->max_dest_rd_atomic,
+ dev->attr.max_qp_resp_rd_atomic_resc);
+
+ rc = -EINVAL;
+ goto err;
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
+ qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_DEST_QPN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
+
+ qp_params.dest_qp = attr->dest_qp_num;
+ qp->dest_qp_num = attr->dest_qp_num;
+ }
+
+ if (qp->qp_type != IB_QPT_GSI)
+ rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
+ qp->qed_qp, &qp_params);
+
+ if (attr_mask & IB_QP_STATE) {
+ if ((qp->qp_type != IB_QPT_GSI) && (!udata))
+ qedr_update_qp_state(dev, qp, qp_params.new_state);
+ qp->state = qp_params.new_state;
+ }
+
+err:
+ return rc;
+}
+
+static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
+{
+ int ib_qp_acc_flags = 0;
+
+ if (params->incoming_rdma_write_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (params->incoming_rdma_read_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
+ if (params->incoming_atomic_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ return ib_qp_acc_flags;
+}
+
+int qedr_query_qp(struct ib_qp *ibqp,
+ struct ib_qp_attr *qp_attr,
+ int attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct qed_rdma_query_qp_out_params params;
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ int rc = 0;
+
+ memset(&params, 0, sizeof(params));
+
+ rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
+ if (rc)
+ goto err;
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+ qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
+ qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+ qp_attr->path_mig_state = IB_MIG_MIGRATED;
+ qp_attr->rq_psn = params.rq_psn;
+ qp_attr->sq_psn = params.sq_psn;
+ qp_attr->dest_qp_num = params.dest_qp;
+
+ qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
+
+ qp_attr->cap.max_send_wr = qp->sq.max_wr;
+ qp_attr->cap.max_recv_wr = qp->rq.max_wr;
+ qp_attr->cap.max_send_sge = qp->sq.max_sges;
+ qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+ qp_init_attr->cap = qp_attr->cap;
+
+ memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
+ sizeof(qp_attr->ah_attr.grh.dgid.raw));
+
+ qp_attr->ah_attr.grh.flow_label = params.flow_label;
+ qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
+ qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
+ qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
+
+ qp_attr->ah_attr.ah_flags = IB_AH_GRH;
+ qp_attr->ah_attr.port_num = 1;
+ qp_attr->ah_attr.sl = 0;
+ qp_attr->timeout = params.timeout;
+ qp_attr->rnr_retry = params.rnr_retry;
+ qp_attr->retry_cnt = params.retry_cnt;
+ qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
+ qp_attr->pkey_index = params.pkey_index;
+ qp_attr->port_num = 1;
+ qp_attr->ah_attr.src_path_bits = 0;
+ qp_attr->ah_attr.static_rate = 0;
+ qp_attr->alt_pkey_index = 0;
+ qp_attr->alt_port_num = 0;
+ qp_attr->alt_timeout = 0;
+ memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
+
+ qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
+ qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
+ qp_attr->max_rd_atomic = params.max_rd_atomic;
+ qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
+ qp_attr->cap.max_inline_data);
+
+err:
+ return rc;
+}
+
+int qedr_destroy_qp(struct ib_qp *ibqp)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ struct ib_qp_attr attr;
+ int attr_mask = 0;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
+ qp, qp->qp_type);
+
+ if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
+ QED_ROCE_QP_STATE_INIT)) {
+ attr.qp_state = IB_QPS_ERR;
+ attr_mask |= IB_QP_STATE;
+
+ /* Change the QP state to ERROR */
+ qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+ }
+
+ if (qp->qp_type != IB_QPT_GSI) {
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ return rc;
+ } else {
+ qedr_destroy_gsi_qp(dev);
+ }
+
+ if (ibqp->uobject && ibqp->uobject->context) {
+ qedr_cleanup_user_sq(dev, qp);
+ qedr_cleanup_user_rq(dev, qp);
+ } else {
+ qedr_cleanup_kernel_sq(dev, qp);
+ qedr_cleanup_kernel_rq(dev, qp);
+ }
+
+ kfree(qp);
+
+ return rc;
+}
+
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+{
+ struct qedr_ah *ah;
+
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ ah->attr = *attr;
+
+ return &ah->ibah;
+}
+
+int qedr_destroy_ah(struct ib_ah *ibah)
+{
+ struct qedr_ah *ah = get_qedr_ah(ibah);
+
+ kfree(ah);
+ return 0;
+}
+
+static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
+{
+ struct qedr_pbl *pbl, *tmp;
+
+ if (info->pbl_table)
+ list_add_tail(&info->pbl_table->list_entry,
+ &info->free_pbl_list);
+
+ if (!list_empty(&info->inuse_pbl_list))
+ list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
+
+ list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
+ list_del(&pbl->list_entry);
+ qedr_free_pbl(dev, &info->pbl_info, pbl);
+ }
+}
+
+static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
+ size_t page_list_len, bool two_layered)
+{
+ struct qedr_pbl *tmp;
+ int rc;
+
+ INIT_LIST_HEAD(&info->free_pbl_list);
+ INIT_LIST_HEAD(&info->inuse_pbl_list);
+
+ rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
+ page_list_len, two_layered);
+ if (rc)
+ goto done;
+
+ info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+ if (!info->pbl_table) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
+ &info->pbl_table->pa);
+
+ /* in usual case we use 2 PBLs, so we add one to free
+ * list and allocating another one
+ */
+ tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+ if (!tmp) {
+ DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
+ goto done;
+ }
+
+ list_add_tail(&tmp->list_entry, &info->free_pbl_list);
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
+
+done:
+ if (rc)
+ free_mr_info(dev, info);
+
+ return rc;
+}
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 usr_addr, int acc, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_mr *mr;
+ struct qedr_pd *pd;
+ int rc = -ENOMEM;
+
+ pd = get_qedr_pd(ibpd);
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
+ pd->pd_id, start, len, usr_addr, acc);
+
+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(rc);
+
+ mr->type = QEDR_MR_USER;
+
+ mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ if (IS_ERR(mr->umem)) {
+ rc = -EFAULT;
+ goto err0;
+ }
+
+ rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
+ if (rc)
+ goto err1;
+
+ qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
+ &mr->info.pbl_info);
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ /* Index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+ mr->hw_mr.key = 0;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hw_mr.mw_bind = false;
+ mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
+ mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+ mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+ mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
+ mr->hw_mr.fbo = ib_umem_offset(mr->umem);
+ mr->hw_mr.length = len;
+ mr->hw_mr.vaddr = usr_addr;
+ mr->hw_mr.zbva = false;
+ mr->hw_mr.phy_mr = false;
+ mr->hw_mr.dma_mr = false;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err2;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+ mr->hw_mr.remote_atomic)
+ mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
+ mr->ibmr.lkey);
+ return &mr->ibmr;
+
+err2:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+err0:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int qedr_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct qedr_mr *mr = get_qedr_mr(ib_mr);
+ struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
+ int rc = 0;
+
+ rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
+ if (rc)
+ return rc;
+
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+
+ if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+
+ /* it could be user registered memory. */
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ kfree(mr);
+
+ return rc;
+}
+
+struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+{
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_mr *mr;
+ int rc = -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
+ max_page_list_len);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(rc);
+
+ mr->dev = dev;
+ mr->type = QEDR_MR_FRMR;
+
+ rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
+ if (rc)
+ goto err0;
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err0;
+ }
+
+ /* Index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
+ mr->hw_mr.key = 0;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = 0;
+ mr->hw_mr.remote_read = 0;
+ mr->hw_mr.remote_write = 0;
+ mr->hw_mr.remote_atomic = 0;
+ mr->hw_mr.mw_bind = false;
+ mr->hw_mr.pbl_ptr = 0;
+ mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+ mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+ mr->hw_mr.fbo = 0;
+ mr->hw_mr.length = 0;
+ mr->hw_mr.vaddr = 0;
+ mr->hw_mr.zbva = false;
+ mr->hw_mr.phy_mr = true;
+ mr->hw_mr.dma_mr = false;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ mr->ibmr.rkey = mr->ibmr.lkey;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
+ return mr;
+
+err1:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err0:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
+ enum ib_mr_type mr_type, u32 max_num_sg)
+{
+ struct qedr_dev *dev;
+ struct qedr_mr *mr;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = __qedr_alloc_mr(ibpd, max_num_sg);
+
+ if (IS_ERR(mr))
+ return ERR_PTR(-EINVAL);
+
+ dev = mr->dev;
+
+ return &mr->ibmr;
+}
+
+static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct qedr_mr *mr = get_qedr_mr(ibmr);
+ struct qedr_pbl *pbl_table;
+ struct regpair *pbe;
+ u32 pbes_in_page;
+
+ if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
+ DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
+ return -ENOMEM;
+ }
+
+ DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
+ mr->npages, addr);
+
+ pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
+ pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
+ pbe = (struct regpair *)pbl_table->va;
+ pbe += mr->npages % pbes_in_page;
+ pbe->lo = cpu_to_le32((u32)addr);
+ pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
+
+ mr->npages++;
+
+ return 0;
+}
+
+static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
+{
+ int work = info->completed - info->completed_handled - 1;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
+ while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
+ struct qedr_pbl *pbl;
+
+ /* Free all the page list that are possible to be freed
+ * (all the ones that were invalidated), under the assumption
+ * that if an FMR was completed successfully that means that
+ * if there was an invalidate operation before it also ended
+ */
+ pbl = list_first_entry(&info->inuse_pbl_list,
+ struct qedr_pbl, list_entry);
+ list_del(&pbl->list_entry);
+ list_add_tail(&pbl->list_entry, &info->free_pbl_list);
+ info->completed_handled++;
+ }
+}
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct qedr_mr *mr = get_qedr_mr(ibmr);
+
+ mr->npages = 0;
+
+ handle_completed_mrs(mr->dev, &mr->info);
+ return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
+}
+
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_mr *mr;
+ int rc;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->type = QEDR_MR_DMA;
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ /* index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hw_mr.dma_mr = true;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err2;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+ mr->hw_mr.remote_atomic)
+ mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
+ return &mr->ibmr;
+
+err2:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
+{
+ return (((wq->prod + 1) % wq->max_wr) == wq->cons);
+}
+
+static int sge_data_len(struct ib_sge *sg_list, int num_sge)
+{
+ int i, len = 0;
+
+ for (i = 0; i < num_sge; i++)
+ len += sg_list[i].length;
+
+ return len;
+}
+
+static void swap_wqe_data64(u64 *p)
+{
+ int i;
+
+ for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
+ *p = cpu_to_be64(cpu_to_le64(*p));
+}
+
+static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
+ struct qedr_qp *qp, u8 *wqe_size,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr, u8 *bits,
+ u8 bit)
+{
+ u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
+ char *seg_prt, *wqe;
+ int i, seg_siz;
+
+ if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
+ DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
+ *bad_wr = wr;
+ return 0;
+ }
+
+ if (!data_size)
+ return data_size;
+
+ *bits |= bit;
+
+ seg_prt = NULL;
+ wqe = NULL;
+ seg_siz = 0;
+
+ /* Copy data inline */
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 len = wr->sg_list[i].length;
+ void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
+
+ while (len > 0) {
+ u32 cur;
+
+ /* New segment required */
+ if (!seg_siz) {
+ wqe = (char *)qed_chain_produce(&qp->sq.pbl);
+ seg_prt = wqe;
+ seg_siz = sizeof(struct rdma_sq_common_wqe);
+ (*wqe_size)++;
+ }
+
+ /* Calculate currently allowed length */
+ cur = min_t(u32, len, seg_siz);
+ memcpy(seg_prt, src, cur);
+
+ /* Update segment variables */
+ seg_prt += cur;
+ seg_siz -= cur;
+
+ /* Update sge variables */
+ src += cur;
+ len -= cur;
+
+ /* Swap fully-completed segments */
+ if (!seg_siz)
+ swap_wqe_data64((u64 *)wqe);
+ }
+ }
+
+ /* swap last not completed segment */
+ if (seg_siz)
+ swap_wqe_data64((u64 *)wqe);
+
+ return data_size;
+}
+
+#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
+ do { \
+ DMA_REGPAIR_LE(sge->addr, vaddr); \
+ (sge)->length = cpu_to_le32(vlength); \
+ (sge)->flags = cpu_to_le32(vflags); \
+ } while (0)
+
+#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
+ do { \
+ DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
+ (hdr)->num_sges = num_sge; \
+ } while (0)
+
+#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
+ do { \
+ DMA_REGPAIR_LE(sge->addr, vaddr); \
+ (sge)->length = cpu_to_le32(vlength); \
+ (sge)->l_key = cpu_to_le32(vlkey); \
+ } while (0)
+
+static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
+ struct ib_send_wr *wr)
+{
+ u32 data_size = 0;
+ int i;
+
+ for (i = 0; i < wr->num_sge; i++) {
+ struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
+
+ DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
+ sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
+ sge->length = cpu_to_le32(wr->sg_list[i].length);
+ data_size += wr->sg_list[i].length;
+ }
+
+ if (wqe_size)
+ *wqe_size += wr->num_sge;
+
+ return data_size;
+}
+
+static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct rdma_sq_rdma_wqe_1st *rwqe,
+ struct rdma_sq_rdma_wqe_2nd *rwqe2,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
+ DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
+
+ if (wr->send_flags & IB_SEND_INLINE) {
+ u8 flags = 0;
+
+ SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
+ return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
+ bad_wr, &rwqe->flags, flags);
+ }
+
+ return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
+}
+
+static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct rdma_sq_send_wqe_1st *swqe,
+ struct rdma_sq_send_wqe_2st *swqe2,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ memset(swqe2, 0, sizeof(*swqe2));
+ if (wr->send_flags & IB_SEND_INLINE) {
+ u8 flags = 0;
+
+ SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
+ return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
+ bad_wr, &swqe->flags, flags);
+ }
+
+ return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
+}
+
+static int qedr_prepare_reg(struct qedr_qp *qp,
+ struct rdma_sq_fmr_wqe_1st *fwqe1,
+ struct ib_reg_wr *wr)
+{
+ struct qedr_mr *mr = get_qedr_mr(wr->mr);
+ struct rdma_sq_fmr_wqe_2nd *fwqe2;
+
+ fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
+ fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
+ fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
+ fwqe1->l_key = wr->key;
+
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
+ !!(wr->access & IB_ACCESS_REMOTE_READ));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
+ !!(wr->access & IB_ACCESS_REMOTE_WRITE));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
+ !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
+ !!(wr->access & IB_ACCESS_LOCAL_WRITE));
+ fwqe2->fmr_ctrl = 0;
+
+ SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
+ ilog2(mr->ibmr.page_size) - 12);
+
+ fwqe2->length_hi = 0;
+ fwqe2->length_lo = mr->ibmr.length;
+ fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
+ fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
+
+ qp->wqe_wr_id[qp->sq.prod].mr = mr;
+
+ return 0;
+}
+
+enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return IB_WC_RDMA_WRITE;
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ return IB_WC_SEND;
+ case IB_WR_RDMA_READ:
+ return IB_WC_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return IB_WC_COMP_SWAP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return IB_WC_FETCH_ADD;
+ case IB_WR_REG_MR:
+ return IB_WC_REG_MR;
+ case IB_WR_LOCAL_INV:
+ return IB_WC_LOCAL_INV;
+ default:
+ return IB_WC_SEND;
+ }
+}
+
+inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+{
+ int wq_is_full, err_wr, pbl_is_full;
+ struct qedr_dev *dev = qp->dev;
+
+ /* prevent SQ overflow and/or processing of a bad WR */
+ err_wr = wr->num_sge > qp->sq.max_sges;
+ wq_is_full = qedr_wq_is_full(&qp->sq);
+ pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
+ QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ if (wq_is_full || err_wr || pbl_is_full) {
+ if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
+ DP_ERR(dev,
+ "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
+ }
+
+ if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
+ DP_ERR(dev,
+ "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
+ }
+
+ if (pbl_is_full &&
+ !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
+ DP_ERR(dev,
+ "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
+ }
+ return false;
+ }
+ return true;
+}
+
+int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct rdma_sq_atomic_wqe_1st *awqe1;
+ struct rdma_sq_atomic_wqe_2nd *awqe2;
+ struct rdma_sq_atomic_wqe_3rd *awqe3;
+ struct rdma_sq_send_wqe_2st *swqe2;
+ struct rdma_sq_local_inv_wqe *iwqe;
+ struct rdma_sq_rdma_wqe_2nd *rwqe2;
+ struct rdma_sq_send_wqe_1st *swqe;
+ struct rdma_sq_rdma_wqe_1st *rwqe;
+ struct rdma_sq_fmr_wqe_1st *fwqe1;
+ struct rdma_sq_common_wqe *wqe;
+ u32 length;
+ int rc = 0;
+ bool comp;
+
+ if (!qedr_can_post_send(qp, wr)) {
+ *bad_wr = wr;
+ return -ENOMEM;
+ }
+
+ wqe = qed_chain_produce(&qp->sq.pbl);
+ qp->wqe_wr_id[qp->sq.prod].signaled =
+ !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
+
+ wqe->flags = 0;
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
+ !!(wr->send_flags & IB_SEND_SOLICITED));
+ comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
+ !!(wr->send_flags & IB_SEND_FENCE));
+ wqe->prev_wqe_size = qp->prev_wqe_size;
+
+ qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
+
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+ swqe->wqe_size = 2;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+
+ swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+ case IB_WR_SEND:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+
+ swqe->wqe_size = 2;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+ swqe->wqe_size = 2;
+ swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+ case IB_WR_RDMA_WRITE:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ DP_ERR(dev,
+ "RDMA READ WITH INVALIDATE not supported\n");
+ *bad_wr = wr;
+ rc = -EINVAL;
+ break;
+
+ case IB_WR_RDMA_READ:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
+ awqe1->wqe_size = 4;
+
+ awqe2 = qed_chain_produce(&qp->sq.pbl);
+ DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
+ awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
+
+ awqe3 = qed_chain_produce(&qp->sq.pbl);
+
+ if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
+ DMA_REGPAIR_LE(awqe3->swap_data,
+ atomic_wr(wr)->compare_add);
+ } else {
+ wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
+ DMA_REGPAIR_LE(awqe3->swap_data,
+ atomic_wr(wr)->swap);
+ DMA_REGPAIR_LE(awqe3->cmp_data,
+ atomic_wr(wr)->compare_add);
+ }
+
+ qedr_prepare_sq_sges(qp, NULL, wr);
+
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
+ qp->prev_wqe_size = awqe1->wqe_size;
+ break;
+
+ case IB_WR_LOCAL_INV:
+ iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
+ iwqe->wqe_size = 1;
+
+ iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
+ iwqe->inv_l_key = wr->ex.invalidate_rkey;
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
+ qp->prev_wqe_size = iwqe->wqe_size;
+ break;
+ case IB_WR_REG_MR:
+ DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
+ wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
+ fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
+ fwqe1->wqe_size = 2;
+
+ rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
+ if (rc) {
+ DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
+ *bad_wr = wr;
+ break;
+ }
+
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
+ qp->prev_wqe_size = fwqe1->wqe_size;
+ break;
+ default:
+ DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (*bad_wr) {
+ u16 value;
+
+ /* Restore prod to its position before
+ * this WR was processed
+ */
+ value = le16_to_cpu(qp->sq.db_data.data.value);
+ qed_chain_set_prod(&qp->sq.pbl, value, wqe);
+
+ /* Restore prev_wqe_size */
+ qp->prev_wqe_size = wqe->prev_wqe_size;
+ rc = -EINVAL;
+ DP_ERR(dev, "POST SEND FAILED\n");
+ }
+
+ return rc;
+}
+
+int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ unsigned long flags;
+ int rc = 0;
+
+ *bad_wr = NULL;
+
+ if (qp->qp_type == IB_QPT_GSI)
+ return qedr_gsi_post_send(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
+ (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "QP in wrong state! QP icid=0x%x state %d\n",
+ qp->icid, qp->state);
+ return -EINVAL;
+ }
+
+ if (!wr) {
+ DP_ERR(dev, "Got an empty post send.\n");
+ return -EINVAL;
+ }
+
+ while (wr) {
+ rc = __qedr_post_send(ibqp, wr, bad_wr);
+ if (rc)
+ break;
+
+ qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+
+ qedr_inc_sw_prod(&qp->sq);
+
+ qp->sq.db_data.data.value++;
+
+ wr = wr->next;
+ }
+
+ /* Trigger doorbell
+ * If there was a failure in the first WR then it will be triggered in
+ * vane. However this is not harmful (as long as the producer value is
+ * unchanged). For performance reasons we avoid checking for this
+ * redundant doorbell.
+ */
+ wmb();
+ writel(qp->sq.db_data.raw, qp->sq.db);
+
+ /* Make sure write sticks */
+ mmiowb();
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return rc;
+}
+
+int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ unsigned long flags;
+ int status = 0;
+
+ if (qp->qp_type == IB_QPT_GSI)
+ return qedr_gsi_post_recv(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
+ (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ while (wr) {
+ int i;
+
+ if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
+ QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
+ wr->num_sge > qp->rq.max_sges) {
+ DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
+ qed_chain_get_elem_left_u32(&qp->rq.pbl),
+ QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
+ qp->rq.max_sges);
+ status = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 flags = 0;
+ struct rdma_rq_sge *rqe =
+ qed_chain_produce(&qp->rq.pbl);
+
+ /* First one must include the number
+ * of SGE in the list
+ */
+ if (!i)
+ SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
+ wr->num_sge);
+
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
+ wr->sg_list[i].lkey);
+
+ RQ_SGE_SET(rqe, wr->sg_list[i].addr,
+ wr->sg_list[i].length, flags);
+ }
+
+ /* Special case of no sges. FW requires between 1-4 sges...
+ * in this case we need to post 1 sge with length zero. this is
+ * because rdma write with immediate consumes an RQ.
+ */
+ if (!wr->num_sge) {
+ u32 flags = 0;
+ struct rdma_rq_sge *rqe =
+ qed_chain_produce(&qp->rq.pbl);
+
+ /* First one must include the number
+ * of SGE in the list
+ */
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
+ SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
+
+ RQ_SGE_SET(rqe, 0, 0, flags);
+ i = 1;
+ }
+
+ qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+ qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
+
+ qedr_inc_sw_prod(&qp->rq);
+
+ /* Flush all the writes before signalling doorbell */
+ wmb();
+
+ qp->rq.db_data.data.value++;
+
+ writel(qp->rq.db_data.raw, qp->rq.db);
+
+ /* Make sure write sticks */
+ mmiowb();
+
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return status;
+}
+
+static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+ return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
+ cq->pbl_toggle;
+}
+
+static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+ struct qedr_qp *qp;
+
+ qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
+ resp_cqe->qp_handle.lo,
+ u64);
+ return qp;
+}
+
+static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+ return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
+}
+
+/* Return latest CQE (needs processing) */
+static union rdma_cqe *get_cqe(struct qedr_cq *cq)
+{
+ return cq->latest_cqe;
+}
+
+/* In fmr we need to increase the number of fmr completed counter for the fmr
+ * algorithm determining whether we can free a pbl or not.
+ * we need to perform this whether the work request was signaled or not. for
+ * this purpose we call this function from the condition that checks if a wr
+ * should be skipped, to make sure we don't miss it ( possibly this fmr
+ * operation was not signalted)
+ */
+static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
+{
+ if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
+ qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+}
+
+static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
+ int force)
+{
+ u16 cnt = 0;
+
+ while (num_entries && qp->sq.wqe_cons != hw_cons) {
+ if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
+ qedr_chk_if_fmr(qp);
+ /* skip WC */
+ goto next_cqe;
+ }
+
+ /* fill WC */
+ wc->status = status;
+ wc->wc_flags = 0;
+ wc->src_qp = qp->id;
+ wc->qp = &