aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/acpi_pad.c5
-rw-r--r--drivers/acpi/acpica/dsinit.c11
-rw-r--r--drivers/acpi/acpica/dsmethod.c50
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c3
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/fan.c12
-rw-r--r--drivers/acpi/osl.c13
-rw-r--r--drivers/acpi/pci_link.c38
-rw-r--r--drivers/acpi/property.c117
-rw-r--r--drivers/android/binder.c35
-rw-r--r--drivers/ata/ahci.c156
-rw-r--r--drivers/ata/ahci.h24
-rw-r--r--drivers/ata/ahci_qoriq.c20
-rw-r--r--drivers/ata/ahci_st.c4
-rw-r--r--drivers/ata/libahci.c9
-rw-r--r--drivers/ata/libata-scsi.c288
-rw-r--r--drivers/ata/pata_at91.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c3
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/auxdisplay/Kconfig9
-rw-r--r--drivers/auxdisplay/Makefile1
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c443
-rw-r--r--drivers/base/Kconfig6
-rw-r--r--drivers/block/DAC960.c4
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/rbd.c50
-rw-r--r--drivers/bluetooth/btwilink.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c8
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/char/hw_random/core.c6
-rw-r--r--drivers/char/ipmi/Kconfig8
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/bt-bmc.c505
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c7
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/clk/at91/clk-programmable.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c11
-rw-r--r--drivers/clk/clk-max77686.c1
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c4
-rw-r--r--drivers/clk/mediatek/Kconfig2
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c11
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c1
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c20
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mio.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mux.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h2
-rw-r--r--drivers/clocksource/Kconfig10
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/jcore-pit.c249
-rw-r--r--drivers/clocksource/timer-sun5i.c16
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c19
-rw-r--r--drivers/cpufreq/intel_pstate.c81
-rw-r--r--drivers/cpuidle/Kconfig.mips2
-rw-r--r--drivers/cpuidle/cpuidle-cps.c2
-rw-r--r--drivers/dax/Kconfig2
-rw-r--r--drivers/dax/pmem.c2
-rw-r--r--drivers/devfreq/devfreq.c8
-rw-r--r--drivers/devfreq/event/Kconfig1
-rw-r--r--drivers/devfreq/event/exynos-nocp.c3
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/firewire/nosy.c13
-rw-r--r--drivers/firmware/efi/libstub/Makefile5
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-ath79.c1
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-mxs.c8
-rw-r--r--drivers/gpio/gpio-pca953x.c16
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-ts4800.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c7
-rw-r--r--drivers/gpio/gpiolib.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c15
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c18
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c7
-rw-r--r--drivers/gpu/drm/drm_info.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c56
-rw-r--r--drivers/hid/hid-dr.c83
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-led.c23
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/hv_util.c10
-rw-r--r--drivers/hwmon/adm9240.c6
-rw-r--r--drivers/hwmon/max31790.c4
-rw-r--r--drivers/i2c/busses/Kconfig12
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c17
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c1
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c2
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c1
-rw-r--r--drivers/i2c/busses/i2c-xlr.c1
-rw-r--r--drivers/i2c/i2c-core.c21
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c7
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c16
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/umem.c6
-rw-r--r--drivers/infiniband/core/umem_odp.c7
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c23
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h18
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c146
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c76
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c293
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c36
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c22
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c67
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig8
-rw-r--r--drivers/infiniband/hw/qedr/Makefile3
-rw-r--r--drivers/infiniband/hw/qedr/main.c914
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h495
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c622
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.h61
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi.h56
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h748
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c3547
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h101
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c54
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/input/mouse/alps.c87
-rw-r--r--drivers/input/mouse/alps.h2
-rw-r--r--drivers/input/mouse/elantech.c27
-rw-r--r--drivers/input/rmi4/rmi_i2c.c38
-rw-r--r--drivers/input/rmi4/rmi_spi.c22
-rw-r--r--drivers/input/serio/i8042-io.h2
-rw-r--r--drivers/input/serio/i8042-ip22io.h2
-rw-r--r--drivers/input/serio/i8042-ppcio.h2
-rw-r--r--drivers/input/serio/i8042-sparcio.h2
-rw-r--r--drivers/input/serio/i8042-unicore32io.h2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h96
-rw-r--r--drivers/input/serio/i8042.c55
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c38
-rw-r--r--drivers/ipack/ipack.c2
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/irq-eznps.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c10
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-i8259.c30
-rw-r--r--drivers/irqchip/irq-jcore-aic.c20
-rw-r--r--drivers/md/dm-raid.c15
-rw-r--r--drivers/md/dm-raid1.c22
-rw-r--r--drivers/md/dm-rq.c7
-rw-r--r--drivers/md/dm-table.c24
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c5
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/v4l2-core/Kconfig2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c6
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c6
-rw-r--r--drivers/misc/cxl/api.c11
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h24
-rw-r--r--drivers/misc/cxl/file.c15
-rw-r--r--drivers/misc/cxl/guest.c3
-rw-r--r--drivers/misc/cxl/main.c42
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c27
-rw-r--r--drivers/misc/genwqe/card_utils.c12
-rw-r--r--drivers/misc/mei/hw-txe.c6
-rw-r--r--drivers/misc/mic/scif/scif_rma.c3
-rw-r--r--drivers/misc/sgi-gru/grufault.c2
-rw-r--r--drivers/misc/sgi-gru/grumain.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c8
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c23
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c26
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c54
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci.c42
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/ubi/eba.c1
-rw-r--r--drivers/mtd/ubi/fastmap.c12
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c16
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c32
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c107
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c45
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig12
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c216
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c99
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c15
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c13
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c4
-rw-r--r--drivers/net/geneve.c47
-rw-r--r--drivers/net/hyperv/netvsc_drv.c25
-rw-r--r--drivers/net/macsec.c26
-rw-r--r--drivers/net/phy/at803x.c65
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/usb/asix_common.c8
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c17
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/net/vxlan.c82
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/slic_ds26522.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c75
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c8
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/namespace_devs.c14
-rw-r--r--drivers/nvdimm/pmem.c8
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/pci.c77
-rw-r--r--drivers/nvme/host/scsi.c4
-rw-r--r--drivers/nvme/target/admin-cmd.c8
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/discovery.c4
-rw-r--r--drivers/of/platform.c1
-rw-r--r--drivers/pci/host/pci-layerscape.c2
-rw-r--r--drivers/pci/host/pcie-designware-plat.c2
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c100
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c12
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c25
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c3
-rw-r--r--drivers/reset/reset-uniphier.c16
-rw-r--r--drivers/rtc/Kconfig38
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ac100.c5
-rw-r--r--drivers/rtc/rtc-asm9260.c20
-rw-r--r--drivers/rtc/rtc-at32ap700x.c2
-rw-r--r--drivers/rtc/rtc-bq32k.c16
-rw-r--r--drivers/rtc/rtc-cmos.c93
-rw-r--r--drivers/rtc/rtc-coh901331.c2
-rw-r--r--drivers/rtc/rtc-davinci.c2
-rw-r--r--drivers/rtc/rtc-digicolor.c2
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c54
-rw-r--r--drivers/rtc/rtc-ds1347.c96
-rw-r--r--drivers/rtc/rtc-gemini.c2
-rw-r--r--drivers/rtc/rtc-isl12057.c643
-rw-r--r--drivers/rtc/rtc-jz4740.c2
-rw-r--r--drivers/rtc/rtc-mcp795.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-nuc900.c2
-rw-r--r--drivers/rtc/rtc-omap.c170
-rw-r--r--drivers/rtc/rtc-palmas.c2
-rw-r--r--drivers/rtc/rtc-pcf2123.c5
-rw-r--r--drivers/rtc/rtc-pcf50633.c2
-rw-r--r--drivers/rtc/rtc-pic32.c1
-rw-r--r--drivers/rtc/rtc-rv8803.c50
-rw-r--r--drivers/rtc/rtc-rx6110.c3
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-spear.c2
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-sysfs.c4
-rw-r--r--drivers/rtc/rtc-tegra.c2
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/cio/chp.c6
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/scsi/NCR5380.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.c49
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/g_NCR5380.c699
-rw-r--r--drivers/scsi/g_NCR5380.h8
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/scsi_dh.c6
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/ufs/Kconfig2
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c10
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/qbman/Kconfig67
-rw-r--r--drivers/soc/fsl/qbman/Makefile12
-rw-r--r--drivers/soc/fsl/qbman/bman.c797
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c263
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c219
-rw-r--r--drivers/soc/fsl/qbman/bman_priv.h80
-rw-r--r--drivers/soc/fsl/qbman/bman_test.c53
-rw-r--r--drivers/soc/fsl/qbman/bman_test.h35
-rw-r--r--drivers/soc/fsl/qbman/bman_test_api.c151
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h103
-rw-r--r--drivers/soc/fsl/qbman/qman.c2881
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c808
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c355
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h371
-rw-r--r--drivers/soc/fsl/qbman/qman_test.c62
-rw-r--r--drivers/soc/fsl/qbman/qman_test.h36
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c252
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c617
-rw-r--r--drivers/soc/fsl/qe/gpio.c3
-rw-r--r--drivers/soc/fsl/qe/qe.c10
-rw-r--r--drivers/soc/fsl/qe/qe_common.c8
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c4
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/android/ion/ion_of.c2
-rw-r--r--drivers/staging/greybus/arche-platform.c1
-rw-r--r--drivers/staging/greybus/es2.c3
-rw-r--r--drivers/staging/greybus/gpio.c6
-rw-r--r--drivers/staging/greybus/module.c2
-rw-r--r--drivers/staging/greybus/uart.c2
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c3
-rw-r--r--drivers/staging/wilc1000/host_interface.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/target_core_transport.c39
-rw-r--r--drivers/target/target_core_user.c50
-rw-r--r--drivers/target/target_core_xcopy.c34
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c4
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c42
-rw-r--r--drivers/thermal/intel_pch_thermal.c60
-rw-r--r--drivers/thermal/intel_powerclamp.c14
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c3
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c4
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/atmel_serial.c26
-rw-r--r--drivers/tty/serial/fsl_lpuart.c3
-rw-r--r--drivers/tty/serial/pch_uart.c1
-rw-r--r--drivers/tty/serial/sc16is7xx.c8
-rw-r--r--drivers/tty/serial/serial_core.c8
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/vt/vt.c7
-rw-r--r--drivers/usb/chipidea/host.c2
-rw-r--r--drivers/usb/dwc2/core.c11
-rw-r--r--drivers/usb/dwc2/core.h7
-rw-r--r--drivers/usb/dwc2/gadget.c53
-rw-r--r--drivers/usb/dwc3/gadget.c26
-rw-r--r--drivers/usb/gadget/function/f_fs.c107
-rw-r--r--drivers/usb/gadget/function/u_ether.c5
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c5
-rw-r--r--drivers/usb/host/ehci-platform.c2
-rw-r--r--drivers/usb/host/ehci-sead3.c185
-rw-r--r--drivers/usb/host/ohci-at91.c9
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c41
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/omap2430.c7
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c8
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/wusbcore/crypto.c61
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c42
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/vme/vme.c4
-rw-r--r--drivers/watchdog/wdat_wdt.c4
-rw-r--r--drivers/xen/manage.c45
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c4
522 files changed, 20355 insertions, 4049 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index f0afdfb3c7df..194d20bee7dc 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -21,7 +21,7 @@ obj-y += video/
obj-y += idle/
# IPMI must come before ACPI in order to provide IPMI opregion support
-obj-$(CONFIG_IPMI_HANDLER) += char/ipmi/
+obj-y += char/ipmi/
obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_SFI) += sfi/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 8ea8211b2d58..eb76a4c10dbf 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <asm/mwait.h>
+#include <xen/xen.h>
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
@@ -477,6 +478,10 @@ static struct acpi_driver acpi_pad_driver = {
static int __init acpi_pad_init(void)
{
+ /* Xen ACPI PAD is used when running as Xen Dom0. */
+ if (xen_initial_domain())
+ return -ENODEV;
+
power_saving_mwait_init();
if (power_saving_mwait_eax == 0)
return -EINVAL;
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index f1e6dcc7a827..54d48b90de2c 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -46,6 +46,7 @@
#include "acdispat.h"
#include "acnamesp.h"
#include "actables.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsinit")
@@ -214,23 +215,17 @@ acpi_ds_initialize_objects(u32 table_index,
/* Walk entire namespace from the supplied root */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
/*
* We don't use acpi_walk_namespace since we do not want to acquire
* the namespace reader lock.
*/
status =
acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
- ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object,
- NULL, &info, NULL);
+ 0, acpi_ds_init_one_object, NULL, &info,
+ NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 32e9ddc0cf2b..2b3210f42a46 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -99,14 +99,11 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
"Method auto-serialization parse [%4.4s] %p\n",
acpi_ut_get_node_name(node), node));
- acpi_ex_enter_interpreter();
-
/* Create/Init a root op for the method parse tree */
op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
if (!op) {
- status = AE_NO_MEMORY;
- goto unlock;
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
acpi_ps_set_name(op, node->name.integer);
@@ -118,8 +115,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
if (!walk_state) {
acpi_ps_free_op(op);
- status = AE_NO_MEMORY;
- goto unlock;
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
status = acpi_ds_init_aml_walk(walk_state, op, node,
@@ -138,8 +134,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
status = acpi_ps_parse_aml(walk_state);
acpi_ps_delete_parse_tree(op);
-unlock:
- acpi_ex_exit_interpreter();
return_ACPI_STATUS(status);
}
@@ -731,26 +725,6 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_ds_method_data_delete_all(walk_state);
/*
- * If method is serialized, release the mutex and restore the
- * current sync level for this thread
- */
- if (method_desc->method.mutex) {
-
- /* Acquisition Depth handles recursive calls */
-
- method_desc->method.mutex->mutex.acquisition_depth--;
- if (!method_desc->method.mutex->mutex.acquisition_depth) {
- walk_state->thread->current_sync_level =
- method_desc->method.mutex->mutex.
- original_sync_level;
-
- acpi_os_release_mutex(method_desc->method.
- mutex->mutex.os_mutex);
- method_desc->method.mutex->mutex.thread_id = 0;
- }
- }
-
- /*
* Delete any namespace objects created anywhere within the
* namespace by the execution of this method. Unless:
* 1) This method is a module-level executable code method, in which
@@ -786,6 +760,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
~ACPI_METHOD_MODIFIED_NAMESPACE;
}
}
+
+ /*
+ * If method is serialized, release the mutex and restore the
+ * current sync level for this thread
+ */
+ if (method_desc->method.mutex) {
+
+ /* Acquisition Depth handles recursive calls */
+
+ method_desc->method.mutex->mutex.acquisition_depth--;
+ if (!method_desc->method.mutex->mutex.acquisition_depth) {
+ walk_state->thread->current_sync_level =
+ method_desc->method.mutex->mutex.
+ original_sync_level;
+
+ acpi_os_release_mutex(method_desc->method.
+ mutex->mutex.os_mutex);
+ method_desc->method.mutex->mutex.thread_id = 0;
+ }
+ }
}
/* Decrement the thread count on the method */
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 028b22a3154e..e36218206bb0 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -607,11 +607,9 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
}
}
- acpi_ex_exit_interpreter();
status =
acpi_ev_initialize_region
(acpi_ns_get_attached_object(node), FALSE);
- acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
/*
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 3843f1fc5dbb..75ddd160a716 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -45,6 +45,7 @@
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evrgnini")
@@ -597,9 +598,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
}
}
+ acpi_ex_exit_interpreter();
status =
acpi_ev_execute_reg_method(region_obj,
ACPI_REG_CONNECT);
+ acpi_ex_enter_interpreter();
if (acpi_ns_locked) {
status =
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 334d3c5ba617..d1f20143bb11 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -137,7 +137,9 @@ unlock:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Begin Table Object Initialization\n"));
+ acpi_ex_enter_interpreter();
status = acpi_ds_initialize_objects(table_index, node);
+ acpi_ex_exit_interpreter();
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Completed Table Object Initialization\n"));
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f0a029e68d3e..0d099a24f776 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes)
ghes_do_proc(ghes, ghes->estatus);
out:
ghes_clear_estatus(ghes);
- return 0;
+ return rc;
}
static void ghes_add_timer(struct ghes *ghes)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 680531062160..48e19d013170 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -526,6 +526,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
acpi_ec_clear(ec);
}
+#ifdef CONFIG_PM_SLEEP
static bool acpi_ec_query_flushed(struct acpi_ec *ec)
{
bool flushed;
@@ -557,6 +558,7 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
spin_unlock_irqrestore(&ec->lock, flags);
__acpi_ec_flush_event(ec);
}
+#endif /* CONFIG_PM_SLEEP */
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 384cfc3083e1..6cf4988206f2 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -129,8 +129,18 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
control = obj->package.elements[1].integer.value;
for (i = 0; i < fan->fps_count; i++) {
- if (control == fan->fps[i].control)
+ /*
+ * When Fine Grain Control is set, return the state
+ * corresponding to maximum fan->fps[i].control
+ * value compared to the current speed. Here the
+ * fan->fps[] is sorted array with increasing speed.
+ */
+ if (fan->fif.fine_grain_ctrl && control < fan->fps[i].control) {
+ i = (i > 0) ? i - 1 : 0;
break;
+ } else if (control == fan->fps[i].control) {
+ break;
+ }
}
if (i == fan->fps_count) {
dev_dbg(&device->dev, "Invalid control value returned\n");
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 4305ee9db4b2..416953a42510 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -162,11 +162,18 @@ void acpi_os_vprintf(const char *fmt, va_list args)
if (acpi_in_debugger) {
kdb_printf("%s", buffer);
} else {
- printk(KERN_CONT "%s", buffer);
+ if (printk_get_level(buffer))
+ printk("%s", buffer);
+ else
+ printk(KERN_CONT "%s", buffer);
}
#else
- if (acpi_debugger_write_log(buffer) < 0)
- printk(KERN_CONT "%s", buffer);
+ if (acpi_debugger_write_log(buffer) < 0) {
+ if (printk_get_level(buffer))
+ printk("%s", buffer);
+ else
+ printk(KERN_CONT "%s", buffer);
+ }
#endif
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index c983bf733ad3..bc3d914dfc3e 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -87,6 +87,7 @@ struct acpi_pci_link {
static LIST_HEAD(acpi_link_list);
static DEFINE_MUTEX(acpi_link_lock);
+static int sci_irq = -1, sci_penalty;
/* --------------------------------------------------------------------------
PCI Link Device Management
@@ -496,25 +497,13 @@ static int acpi_irq_get_penalty(int irq)
{
int penalty = 0;
- /*
- * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
- * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
- * use for PCI IRQs.
- */
- if (irq == acpi_gbl_FADT.sci_interrupt) {
- u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
-
- if (type != IRQ_TYPE_LEVEL_LOW)
- penalty += PIRQ_PENALTY_ISA_ALWAYS;
- else
- penalty += PIRQ_PENALTY_PCI_USING;
- }
+ if (irq == sci_irq)
+ penalty += sci_penalty;
if (irq < ACPI_MAX_ISA_IRQS)
return penalty + acpi_isa_irq_penalty[irq];
- penalty += acpi_irq_pci_sharing_penalty(irq);
- return penalty;
+ return penalty + acpi_irq_pci_sharing_penalty(irq);
}
int __init acpi_irq_penalty_init(void)
@@ -619,6 +608,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_device_bid(link->device));
return -ENODEV;
} else {
+ if (link->irq.active < ACPI_MAX_ISA_IRQS)
+ acpi_isa_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_USING;
+
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
@@ -849,7 +842,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
continue;
if (used)
- new_penalty = acpi_irq_get_penalty(irq) +
+ new_penalty = acpi_isa_irq_penalty[irq] +
PIRQ_PENALTY_ISA_USED;
else
new_penalty = 0;
@@ -871,7 +864,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
void acpi_penalize_isa_irq(int irq, int active)
{
if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
- acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
+ acpi_isa_irq_penalty[irq] +=
(active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
}
@@ -881,6 +874,17 @@ bool acpi_isa_irq_available(int irq)
acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
}
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+{
+ sci_irq = irq;
+
+ if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
+ polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
+ sci_penalty = PIRQ_PENALTY_PCI_USING;
+ else
+ sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
+}
+
/*
* Over-ride default table to reserve additional IRQs for use by ISA
* e.g. acpi_irq_isa=5
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index f2fd3fee588a..03f5ec11ab31 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -468,10 +468,11 @@ static int acpi_data_get_property_array(struct acpi_device_data *data,
}
/**
- * acpi_data_get_property_reference - returns handle to the referenced object
- * @data: ACPI device data object containing the property
+ * __acpi_node_get_property_reference - returns handle to the referenced object
+ * @fwnode: Firmware node to get the property from
* @propname: Name of the property
* @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
* @args: Location to store the returned reference with optional arguments
*
* Find property with @name, verifify that it is a package containing at least
@@ -482,17 +483,40 @@ static int acpi_data_get_property_array(struct acpi_device_data *data,
* If there's more than one reference in the property value package, @index is
* used to select the one to return.
*
+ * It is possible to leave holes in the property value set like in the
+ * example below:
+ *
+ * Package () {
+ * "cs-gpios",
+ * Package () {
+ * ^GPIO, 19, 0, 0,
+ * ^GPIO, 20, 0, 0,
+ * 0,
+ * ^GPIO, 21, 0, 0,
+ * }
+ * }
+ *
+ * Calling this function with index %2 return %-ENOENT and with index %3
+ * returns the last entry. If the property does not contain any more values
+ * %-ENODATA is returned. The NULL entry must be single integer and
+ * preferably contain value %0.
+ *
* Return: %0 on success, negative error code on failure.
*/
-static int acpi_data_get_property_reference(struct acpi_device_data *data,
- const char *propname, size_t index,
- struct acpi_reference_args *args)
+int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *propname, size_t index, size_t num_args,
+ struct acpi_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
+ struct acpi_device_data *data;
struct acpi_device *device;
int ret, idx = 0;
+ data = acpi_device_data_of_node(fwnode);
+ if (!data)
+ return -EINVAL;
+
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
if (ret)
return ret;
@@ -532,59 +556,54 @@ static int acpi_data_get_property_reference(struct acpi_device_data *data,
while (element < end) {
u32 nargs, i;
- if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
- return -EPROTO;
-
- ret = acpi_bus_get_device(element->reference.handle, &device);
- if (ret)
- return -ENODEV;
-
- element++;
- nargs = 0;
-
- /* assume following integer elements are all args */
- for (i = 0; element + i < end; i++) {
- int type = element[i].type;
+ if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
+ ret = acpi_bus_get_device(element->reference.handle,
+ &device);
+ if (ret)
+ return -ENODEV;
+
+ nargs = 0;
+ element++;
+
+ /* assume following integer elements are all args */
+ for (i = 0; element + i < end && i < num_args; i++) {
+ int type = element[i].type;
+
+ if (type == ACPI_TYPE_INTEGER)
+ nargs++;
+ else if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ break;
+ else
+ return -EPROTO;
+ }
- if (type == ACPI_TYPE_INTEGER)
- nargs++;
- else if (type == ACPI_TYPE_LOCAL_REFERENCE)
- break;
- else
+ if (nargs > MAX_ACPI_REFERENCE_ARGS)
return -EPROTO;
- }
- if (idx++ == index) {
- args->adev = device;
- args->nargs = nargs;
- for (i = 0; i < nargs; i++)
- args->args[i] = element[i].integer.value;
+ if (idx == index) {
+ args->adev = device;
+ args->nargs = nargs;
+ for (i = 0; i < nargs; i++)
+ args->args[i] = element[i].integer.value;
- return 0;
+ return 0;
+ }
+
+ element += nargs;
+ } else if (element->type == ACPI_TYPE_INTEGER) {
+ if (idx == index)
+ return -ENOENT;
+ element++;
+ } else {
+ return -EPROTO;
}
- element += nargs;
+ idx++;
}
- return -EPROTO;
-}
-
-/**
- * acpi_node_get_property_reference - get a handle to the referenced object.
- * @fwnode: Firmware node to get the property from.
- * @propname: Name of the property.
- * @index: Index of the reference to return.
- * @args: Location to store the returned reference with optional arguments.
- */
-int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
- const char *name, size_t index,
- struct acpi_reference_args *args)
-{
- struct acpi_device_data *data = acpi_device_data_of_node(fwnode);
-
- return data ? acpi_data_get_property_reference(data, name, index, args) : -EINVAL;
+ return -ENODATA;
}
-EXPORT_SYMBOL_GPL(acpi_node_get_property_reference);
+EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
static int acpi_data_prop_read_single(struct acpi_device_data *data,
const char *propname,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 562af94bec35..3c71b982bf2a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1002,7 +1002,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- uint32_t desc)
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1010,12 +1010,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc)
+ if (desc < ref->desc) {
n = n->rb_left;
- else if (desc > ref->desc)
+ } else if (desc > ref->desc) {
n = n->rb_right;
- else
+ } else if (need_strong_ref && !ref->strong) {
+ binder_user_error("tried to use weak ref as strong ref\n");
+ return NULL;
+ } else {
return ref;
+ }
}
return NULL;
}
@@ -1285,7 +1289,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
@@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc,
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle);
+ ref = binder_get_ref(proc, tr->target.handle, true);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
@@ -1577,7 +1584,9 @@ static void binder_transaction(struct binder_proc *proc,
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->binder = 0;
fp->handle = ref->desc;
+ fp->cookie = 0;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
@@ -1589,7 +1598,10 @@ static void binder_transaction(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
@@ -1624,7 +1636,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
+ fp->binder = 0;
fp->handle = new_ref->desc;
+ fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
@@ -1678,6 +1692,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
+ fp->binder = 0;
fp->handle = target_fd;
} break;
@@ -1800,7 +1815,9 @@ static int binder_thread_write(struct binder_proc *proc,
ref->desc);
}
} else
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target,
+ cmd == BC_ACQUIRE ||
+ cmd == BC_RELEASE);
if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target);
@@ -1996,7 +2013,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 90eabaf81215..9669fc7c19df 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1400,142 +1400,59 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
}
#endif
-/*
- * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
- * to single msi.
- */
-static int ahci_init_msix(struct pci_dev *pdev, unsigned int n_ports,
- struct ahci_host_priv *hpriv, unsigned long flags)
+static int ahci_get_irq_vector(struct ata_host *host, int port)
{
- int nvec, i, rc;
-
- /* Do not init MSI-X if MSI is disabled for the device */
- if (hpriv->flags & AHCI_HFLAG_NO_MSI)
- return -ENODEV;
-
- nvec = pci_msix_vec_count(pdev);
- if (nvec < 0)
- return nvec;
-
- /*
- * Proper MSI-X implementations will have a vector per-port.
- * Barring that, we prefer single-MSI over single-MSIX. If this
- * check fails (not enough MSI-X vectors for all ports) we will
- * be called again with the flag clear iff ahci_init_msi()
- * fails.
- */
- if (flags & AHCI_HFLAG_MULTI_MSIX) {
- if (nvec < n_ports)
- return -ENODEV;
- nvec = n_ports;
- } else if (nvec) {
- nvec = 1;
- } else {
- /*
- * Emit dev_err() since this was the non-legacy irq
- * method of last resort.
- */
- rc = -ENODEV;
- goto fail;
- }
-
- for (i = 0; i < nvec; i++)
- hpriv->msix[i].entry = i;
- rc = pci_enable_msix_exact(pdev, hpriv->msix, nvec);
- if (rc < 0)
- goto fail;
-
- if (nvec > 1)
- hpriv->flags |= AHCI_HFLAG_MULTI_MSIX;
- hpriv->irq = hpriv->msix[0].vector; /* for single msi-x */
-
- return nvec;
-fail:
- dev_err(&pdev->dev,
- "failed to enable MSI-X with error %d, # of vectors: %d\n",
- rc, nvec);
-
- return rc;
+ return pci_irq_vector(to_pci_dev(host->dev), port);
}
static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
struct ahci_host_priv *hpriv)
{
- int rc, nvec;
+ int nvec;
if (hpriv->flags & AHCI_HFLAG_NO_MSI)
return -ENODEV;
- nvec = pci_msi_vec_count(pdev);
- if (nvec < 0)
- return nvec;
-
/*
* If number of MSIs is less than number of ports then Sharing Last
* Message mode could be enforced. In this case assume that advantage
* of multipe MSIs is negated and use single MSI mode instead.
*/
- if (nvec < n_ports)
- goto single_msi;
+ if (n_ports > 1) {
+ nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nvec > 0) {
+ if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
+ hpriv->get_irq_vector = ahci_get_irq_vector;
+ hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+ return nvec;
+ }
- rc = pci_enable_msi_exact(pdev, nvec);
- if (rc == -ENOSPC)
- goto single_msi;
- if (rc < 0)
- return rc;
+ /*
+ * Fallback to single MSI mode if the controller
+ * enforced MRSM mode.
+ */
+ printk(KERN_INFO
+ "ahci: MRSM is on, fallback to single MSI\n");
+ pci_free_irq_vectors(pdev);
+ }
- /* fallback to single MSI mode if the controller enforced MRSM mode */
- if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) {
- pci_disable_msi(pdev);
- printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
- goto single_msi;
+ /*
+ * -ENOSPC indicated we don't have enough vectors. Don't bother
+ * trying a single vectors for any other error:
+ */
+ if (nvec < 0 && nvec != -ENOSPC)
+ return nvec;
}
- if (nvec > 1)
- hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
-
- goto out;
-
-single_msi:
- nvec = 1;
-
- rc = pci_enable_msi(pdev);
- if (rc < 0)
- return rc;
-out:
- hpriv->irq = pdev->irq;
-
- return nvec;
-}
-
-static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
- struct ahci_host_priv *hpriv)
-{
- int nvec;
-
/*
- * Try to enable per-port MSI-X. If the host is not capable
- * fall back to single MSI before finally attempting single
- * MSI-X.
+ * If the host is not capable of supporting per-port vectors, fall
+ * back to single MSI before finally attempting single MSI-X.
*/
- nvec = ahci_init_msix(pdev, n_ports, hpriv, AHCI_HFLAG_MULTI_MSIX);
- if (nvec >= 0)
- return nvec;
-
- nvec = ahci_init_msi(pdev, n_ports, hpriv);
- if (nvec >= 0)
- return nvec;
-
- /* try single-msix */
- nvec = ahci_init_msix(pdev, n_ports, hpriv, 0);
- if (nvec >= 0)
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec == 1)
return nvec;
-
- /* legacy intx interrupts */
- pci_intx(pdev, 1);
- hpriv->irq = pdev->irq;
-
- return 0;
+ return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
}
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1698,11 +1615,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!host)
return -ENOMEM;
host->private_data = hpriv;
- hpriv->msix = devm_kzalloc(&pdev->dev,
- sizeof(struct msix_entry) * n_ports, GFP_KERNEL);
- if (!hpriv->msix)
- return -ENOMEM;
- ahci_init_interrupts(pdev, n_ports, hpriv);
+
+ if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
+ /* legacy intx interrupts */
+ pci_intx(pdev, 1);
+ }
+ hpriv->irq = pci_irq_vector(pdev, 0);
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 70b06bcfb7e3..0cc08f892fea 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -242,12 +242,10 @@ enum {
AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
#ifdef CONFIG_PCI_MSI
- AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
- AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
+ AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */
#else
/* compile out MSI infrastructure */
AHCI_HFLAG_MULTI_MSI = 0,
- AHCI_HFLAG_MULTI_MSIX = 0,
#endif
AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
@@ -351,7 +349,6 @@ struct ahci_host_priv {
* the PHY position in this array.
*/
struct phy **phys;
- struct msix_entry *msix; /* Optional MSI-X support */
unsigned nports; /* Number of ports */
void *plat_data; /* Other platform data */
unsigned int irq; /* interrupt line */
@@ -362,22 +359,11 @@ struct ahci_host_priv {
*/
void (*start_engine)(struct ata_port *ap);
irqreturn_t (*irq_handler)(int irq, void *dev_instance);
-};
-#ifdef CONFIG_PCI_MSI
-static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
-{
- if (hpriv->flags & AHCI_HFLAG_MULTI_MSIX)
- return hpriv->msix[port].vector;
- else
- return hpriv->irq + port;
-}
-#else
-static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
-{
- return hpriv->irq;
-}
-#endif
+ /* only required for per-port MSI(-X) support */
+ int (*get_irq_vector)(struct ata_host *host,
+ int port);
+};
extern int ahci_ignore_sss;
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 7bdee9bd8786..1eba8dff875e 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -30,24 +30,23 @@
#define PORT_PHY3 0xB0
#define PORT_PHY4 0xB4
#define PORT_PHY5 0xB8
+#define PORT_AXICC 0xBC
#define PORT_TRANS 0xC8
/* port register default value */
#define AHCI_PORT_PHY_1_CFG 0xa003fffe
#define AHCI_PORT_TRANS_CFG 0x08000029
+#define AHCI_PORT_AXICC_CFG 0x3fffffff
/* for ls1021a */
#define LS1021A_PORT_PHY2 0x28183414
#define LS1021A_PORT_PHY3 0x0e080e06
#define LS1021A_PORT_PHY4 0x064a080b
#define LS1021A_PORT_PHY5 0x2aa86470
+#define LS1021A_AXICC_ADDR 0xC0
#define SATA_ECC_DISABLE 0x00020000
-/* for ls1043a */
-#define LS1043A_PORT_PHY2 0x28184d1f
-#define LS1043A_PORT_PHY3 0x0e081509
-
enum ahci_qoriq_type {
AHCI_LS1021A,
AHCI_LS1043A,
@@ -137,7 +136,7 @@ static struct ata_port_operations ahci_qoriq_ops = {
.hardreset = ahci_qoriq_hardreset,
};
-static struct ata_port_info ahci_qoriq_port_info = {
+static const struct ata_port_info ahci_qoriq_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -162,18 +161,19 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4);
writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + LS1021A_AXICC_ADDR);
break;
case AHCI_LS1043A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
- writel(LS1043A_PORT_PHY2, reg_base + PORT_PHY2);
- writel(LS1043A_PORT_PHY3, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
case AHCI_LS2080A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
}
@@ -221,12 +221,6 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
if (rc)
goto disable_resources;
- /* Workaround for ls2080a */
- if (qoriq_priv->type == AHCI_LS2080A) {
- hpriv->flags |= AHCI_HFLAG_NO_NCQ;
- ahci_qoriq_port_info.flags &= ~ATA_FLAG_NCQ;
- }
-
rc = ahci_platform_init_host(pdev, hpriv, &ahci_qoriq_port_info,
&ahci_qoriq_sht);
if (rc)
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 8ff428fe8e0f..bc345f249555 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -147,6 +147,7 @@ static struct scsi_host_template ahci_platform_sht = {
static int st_ahci_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct st_ahci_drv_data *drv_data;
struct ahci_host_priv *hpriv;
int err;
@@ -170,6 +171,9 @@ static int st_ahci_probe(struct platform_device *pdev)
st_ahci_configure_oob(hpriv->mmio);
+ of_property_read_u32(dev->of_node,
+ "ports-implemented", &hpriv->force_port_map);
+
err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
&ahci_platform_sht);
if (err) {
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index dcf2c724fd06..0d028ead99e8 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2520,7 +2520,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
*/
for (i = 0; i < host->n_ports; i++) {
struct ahci_port_priv *pp = host->ports[i]->private_data;
- int irq = ahci_irq_vector(hpriv, i);
+ int irq = hpriv->get_irq_vector(host, i);
/* Do not receive interrupts sent by dummy ports */
if (!pp) {
@@ -2556,10 +2556,15 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
int irq = hpriv->irq;
int rc;
- if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
+ if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
if (hpriv->irq_handler)
dev_warn(host->dev,
"both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
+ if (!hpriv->get_irq_vector) {
+ dev_err(host->dev,
+ "AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
+ return -EIO;
+ }
rc = ahci_host_activate_multi_irqs(host, sht);
} else {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e207b33e4ce9..9cceb4a875a5 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1159,8 +1159,6 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
- sdev->no_report_opcodes = 1;
- sdev->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer() callback and
* it needs to see every deferred qc. Set dev_blocked to 1 to
@@ -3282,18 +3280,125 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
return 1;
}
+/**
+ * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
+ * @cmd: SCSI command being translated
+ * @trmax: Maximum number of entries that will fit in sector_size bytes.
+ * @sector: Starting sector
+ * @count: Total Range of request in logical sectors
+ *
+ * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
+ * descriptor.
+ *
+ * Upto 64 entries of the format:
+ * 63:48 Range Length
+ * 47:0 LBA
+ *
+ * Range Length of 0 is ignored.
+ * LBA's should be sorted order and not overlap.
+ *
+ * NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET
+ *
+ * Return: Number of bytes copied into sglist.
+ */
+static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax,
+ u64 sector, u32 count)
+{
+ struct scsi_device *sdp = cmd->device;
+ size_t len = sdp->sector_size;
+ size_t r;
+ __le64 *buf;
+ u32 i = 0;
+ unsigned long flags;
+
+ WARN_ON(len > ATA_SCSI_RBUF_SIZE);
+
+ if (len > ATA_SCSI_RBUF_SIZE)
+ len = ATA_SCSI_RBUF_SIZE;
+
+ spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+ buf = ((void *)ata_scsi_rbuf);
+ memset(buf, 0, len);
+ while (i < trmax) {
+ u64 entry = sector |
+ ((u64)(count > 0xffff ? 0xffff : count) << 48);
+ buf[i++] = __cpu_to_le64(entry);
+ if (count <= 0xffff)
+ break;
+ count -= 0xffff;
+ sector += 0xffff;
+ }
+ r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
+ spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+
+ return r;
+}
+
+/**
+ * ata_format_dsm_trim_descr() - SATL Write Same to ATA SCT Write Same
+ * @cmd: SCSI command being translated
+ * @lba: Starting sector
+ * @num: Number of sectors to be zero'd.
+ *
+ * Rewrite the WRITE SAME payload to be an SCT Write Same formatted
+ * descriptor.
+ * NOTE: Writes a pattern (0's) in the foreground.
+ *
+ * Return: Number of bytes copied into sglist.
+ */
+static size_t ata_format_sct_write_same(struct scsi_cmnd *cmd, u64 lba, u64 num)
+{
+ struct scsi_device *sdp = cmd->device;
+ size_t len = sdp->sector_size;
+ size_t r;
+ u16 *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+ buf = ((void *)ata_scsi_rbuf);
+
+ put_unaligned_le16(0x0002, &buf[0]); /* SCT_ACT_WRITE_SAME */
+ put_unaligned_le16(0x0101, &buf[1]); /* WRITE PTRN FG */
+ put_unaligned_le64(lba, &buf[2]);
+ put_unaligned_le64(num, &buf[6]);
+ put_unaligned_le32(0u, &buf[10]); /* pattern */
+
+ WARN_ON(len > ATA_SCSI_RBUF_SIZE);
+
+ if (len > ATA_SCSI_RBUF_SIZE)
+ len = ATA_SCSI_RBUF_SIZE;
+
+ r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
+ spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+
+ return r;
+}
+
+/**
+ * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
+ * @qc: Command to be translated
+ *
+ * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
+ * an SCT Write Same command.
+ * Based on WRITE SAME has the UNMAP flag
+ * When set translate to DSM TRIM
+ * When clear translate to SCT Write Same
+ */
static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
struct scsi_cmnd *scmd = qc->scsicmd;
+ struct scsi_device *sdp = scmd->device;
+ size_t len = sdp->sector_size;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
u64 block;
u32 n_block;
+ const u32 trmax = len >> 3;
u32 size;
- void *buf;
u16 fp;
u8 bp = 0xff;
+ u8 unmap = cdb[1] & 0x8;
/* we may not issue DMA commands if no DMA mode is set */
if (unlikely(!dev->dma_mode))
@@ -3305,11 +3410,26 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
}
scsi_16_lba_len(cdb, &block, &n_block);
- /* for now we only support WRITE SAME with the unmap bit set */
- if (unlikely(!(cdb[1] & 0x8))) {
- fp = 1;
- bp = 3;
- goto invalid_fld;
+ if (unmap) {
+ /* If trim is not enabled the cmd is invalid. */
+ if ((dev->horkage & ATA_HORKAGE_NOTRIM) ||
+ !ata_id_has_trim(dev->id)) {
+ fp = 1;
+ bp = 3;
+ goto invalid_fld;
+ }
+ /* If the request is too large the cmd is invalid */
+ if (n_block > 0xffff * trmax) {
+ fp = 2;
+ goto invalid_fld;
+ }
+ } else {
+ /* If write same is not available the cmd is invalid */
+ if (!ata_id_sct_write_same(dev->id)) {
+ fp = 1;
+ bp = 3;
+ goto invalid_fld;
+ }
}
/*
@@ -3319,32 +3439,54 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
if (!scsi_sg_count(scmd))
goto invalid_param_len;
- buf = page_address(sg_page(scsi_sglist(scmd)));
-
- if (n_block <= 65535 * ATA_MAX_TRIM_RNUM) {
- size = ata_set_lba_range_entries(buf, ATA_MAX_TRIM_RNUM, block, n_block);
- } else {
- fp = 2;
- goto invalid_fld;
- }
+ /*
+ * size must match sector size in bytes
+ * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count)
+ * is defined as number of 512 byte blocks to be transferred.
+ */
+ if (unmap) {
+ size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block);
+ if (size != len)
+ goto invalid_param_len;
- if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
- /* Newer devices support queued TRIM commands */
- tf->protocol = ATA_PROT_NCQ;
- tf->command = ATA_CMD_FPDMA_SEND;
- tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
- tf->nsect = qc->tag << 3;
- tf->hob_feature = (size / 512) >> 8;
- tf->feature = size / 512;
+ if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
+ /* Newer devices support queued TRIM commands */
+ tf->protocol = ATA_PROT_NCQ;
+ tf->command = ATA_CMD_FPDMA_SEND;
+ tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
+ tf->nsect = qc->tag << 3;
+ tf->hob_feature = (size / 512) >> 8;
+ tf->feature = size / 512;
- tf->auxiliary = 1;
+ tf->auxiliary = 1;
+ } else {
+ tf->protocol = ATA_PROT_DMA;
+ tf->hob_feature = 0;
+ tf->feature = ATA_DSM_TRIM;
+ tf->hob_nsect = (size / 512) >> 8;
+ tf->nsect = size / 512;
+ tf->command = ATA_CMD_DSM;
+ }
} else {
- tf->protocol = ATA_PROT_DMA;
+ size = ata_format_sct_write_same(scmd, block, n_block);
+ if (size != len)
+ goto invalid_param_len;
+
tf->hob_feature = 0;
- tf->feature = ATA_DSM_TRIM;
- tf->hob_nsect = (size / 512) >> 8;
- tf->nsect = size / 512;
- tf->command = ATA_CMD_DSM;
+ tf->feature = 0;
+ tf->hob_nsect = 0;
+ tf->nsect = 1;
+ tf->lbah = 0;
+ tf->lbam = 0;
+ tf->lbal = ATA_CMD_STANDBYNOW1;
+ tf->hob_lbah = 0;
+ tf->hob_lbam = 0;
+ tf->hob_lbal = 0;
+ tf->device = ATA_CMD_STANDBYNOW1;
+ tf->protocol = ATA_PROT_DMA;
+ tf->command = ATA_CMD_WRITE_LOG_DMA_EXT;
+ if (unlikely(dev->flags & ATA_DFLAG_PIO))
+ tf->command = ATA_CMD_WRITE_LOG_EXT;
}
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
@@ -3368,6 +3510,76 @@ invalid_opcode:
}
/**
+ * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
+ * @args: device MAINTENANCE_IN data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Yields a subset to satisfy scsi_report_opcode()
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+{
+ struct ata_device *dev = args->dev;
+ u8 *cdb = args->cmd->cmnd;
+ u8 supported = 0;
+ unsigned int err = 0;
+
+ if (cdb[2] != 1) {
+ ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
+ err = 2;
+ goto out;
+ }
+ switch (cdb[3]) {
+ case INQUIRY:
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ case READ_CAPACITY:
+ case SERVICE_ACTION_IN_16:
+ case REPORT_LUNS:
+ case REQUEST_SENSE:
+ case SYNCHRONIZE_CACHE:
+ case REZERO_UNIT:
+ case SEEK_6:
+ case SEEK_10:
+ case TEST_UNIT_READY:
+ case SEND_DIAGNOSTIC:
+ case MAINTENANCE_IN:
+ case READ_6:
+ case READ_10:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_16:
+ case ATA_12:
+ case ATA_16:
+ case VERIFY:
+ case VERIFY_16:
+ case MODE_SELECT:
+ case MODE_SELECT_10:
+ case START_STOP:
+ supported = 3;
+ break;
+ case WRITE_SAME_16:
+ if (!ata_id_sct_write_same(dev->id))
+ break;
+ /* fallthrough: if SCT ... only enable for ZBC */
+ case ZBC_IN:
+ case ZBC_OUT:
+ if (ata_id_zoned_cap(dev->id) ||
+ dev->class == ATA_DEV_ZAC)
+ supported = 3;
+ break;
+ default:
+ break;
+ }
+out:
+ rbuf[1] = supported; /* supported */
+ return err;
+}
+
+/**
* ata_scsi_report_zones_complete - convert ATA output
* @qc: command structure returning the data
*
@@ -3610,7 +3822,7 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc,
{
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
- char mpage[CACHE_MPAGE_LEN];
+ u8 mpage[CACHE_MPAGE_LEN];
u8 wce;
int i;
@@ -3666,7 +3878,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
- char mpage[CONTROL_MPAGE_LEN];
+ u8 mpage[CONTROL_MPAGE_LEN];
u8 d_sense;
int i;
@@ -3701,8 +3913,6 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
dev->flags |= ATA_DFLAG_D_SENSE;
else
dev->flags &= ~ATA_DFLAG_D_SENSE;
- qc->scsicmd->result = SAM_STAT_GOOD;
- qc->scsicmd->scsi_done(qc->scsicmd);
return 0;
}
@@ -3829,6 +4039,8 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
if (ata_mselect_control(qc, p, pg_len, &fp) < 0) {
fp += hdr_len + bd_len;
goto invalid_param;
+ } else {
+ goto skip; /* No ATA command to send */
}
break;
default: /* invalid page code */
@@ -4147,6 +4359,13 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
ata_scsi_invalid_field(dev, cmd, 1);
break;
+ case MAINTENANCE_IN:
+ if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
+ else
+ ata_scsi_invalid_field(dev, cmd, 1);
+ break;
+
/* all other commands */
default:
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
@@ -4179,7 +4398,6 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
shost->max_lun = 1;
shost->max_channel = 1;
shost->max_cmd_len = 16;
- shost->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer()
* callback and it needs to see every deferred qc.
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 9f27b14009f9..1611e0e8d767 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -347,10 +347,8 @@ static int at91sam9_smc_fields_init(struct device *dev)
field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC);
fields.mode = devm_regmap_field_alloc(dev, smc, field);
- if (IS_ERR(fields.mode))
- return PTR_ERR(fields.mode);
- return 0;
+ return PTR_ERR_OR_ZERO(fields.mode);
}
static int pata_at91_probe(struct platform_device *pdev)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 27245957eee3..475a00669427 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -152,8 +152,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
div = 8;
T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
- if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
- BUG();
+ BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
t1 = timing.setup;
if (t1)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 745489a1c86a..efc48bf89d51 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1727,15 +1727,13 @@ static int mv_port_start(struct ata_port *ap)
return -ENOMEM;
ap->private_data = pp;
- pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
+ pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
if (!pp->crqb)
return -ENOMEM;
- memset(pp->crqb, 0, MV_CRQB_Q_SZ);
- pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
+ pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
if (!pp->crpb)
goto out_port_free_dma_mem;
- memset(pp->crpb, 0, MV_CRPB_Q_SZ);
/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index c07e725ea93d..10e1b9eee10e 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -119,4 +119,13 @@ config CFAG12864B_RATE
If you compile this as a module, you can still override this
value using the module parameters.
+config IMG_ASCII_LCD
+ tristate "Imagination Technologies ASCII LCD Display"
+ default y if MIPS_MALTA || MIPS_SEAD3
+ select SYSCON
+ help
+ Enable this to support the simple ASCII LCD displays found on
+ development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
+ from Imagination Technologies.
+
endif # AUXDISPLAY
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 8a8936a468b9..3127175c89df 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
+obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
new file mode 100644
index 000000000000..bf43b5d2aafc
--- /dev/null
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <generated/utsrelease.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+struct img_ascii_lcd_ctx;
+
+/**
+ * struct img_ascii_lcd_config - Configuration information about an LCD model
+ * @num_chars: the number of characters the LCD can display
+ * @external_regmap: true if registers are in a system controller, else false
+ * @update: function called to update the LCD
+ */
+struct img_ascii_lcd_config {
+ unsigned int num_chars;
+ bool external_regmap;
+ void (*update)(struct img_ascii_lcd_ctx *ctx);
+};
+
+/**
+ * struct img_ascii_lcd_ctx - Private data structure
+ * @pdev: the ASCII LCD platform device
+ * @base: the base address of the LCD registers
+ * @regmap: the regmap through which LCD registers are accessed
+ * @offset: the offset within regmap to the start of the LCD registers
+ * @cfg: pointer to the LCD model configuration
+ * @message: the full message to display or scroll on the LCD
+ * @message_len: the length of the @message string
+ * @scroll_pos: index of the first character of @message currently displayed
+ * @scroll_rate: scroll interval in jiffies
+ * @timer: timer used to implement scrolling
+ * @curr: the string currently displayed on the LCD
+ */
+struct img_ascii_lcd_ctx {
+ struct platform_device *pdev;
+ union {
+ void __iomem *base;
+ struct regmap *regmap;
+ };
+ u32 offset;
+ const struct img_ascii_lcd_config *cfg;
+ char *message;
+ unsigned int message_len;
+ unsigned int scroll_pos;
+ unsigned int scroll_rate;
+ struct timer_list timer;
+ char curr[] __aligned(8);
+};
+
+/*
+ * MIPS Boston development board
+ */
+
+static void boston_update(struct img_ascii_lcd_ctx *ctx)
+{
+ ulong val;
+
+#if BITS_PER_LONG == 64
+ val = *((u64 *)&ctx->curr[0]);
+ __raw_writeq(val, ctx->base);
+#elif BITS_PER_LONG == 32
+ val = *((u32 *)&ctx->curr[0]);
+ __raw_writel(val, ctx->base);
+ val = *((u32 *)&ctx->curr[4]);
+ __raw_writel(val, ctx->base + 4);
+#else
+# error Not 32 or 64 bit
+#endif
+}
+
+static struct img_ascii_lcd_config boston_config = {
+ .num_chars = 8,
+ .update = boston_update,
+};
+
+/*
+ * MIPS Malta development board
+ */
+
+static void malta_update(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ctx->cfg->num_chars; i++) {
+ err = regmap_write(ctx->regmap,
+ ctx->offset + (i * 8), ctx->curr[i]);
+ if (err)
+ break;
+ }
+
+ if (unlikely(err))
+ pr_err_ratelimited("Failed to update LCD display: %d\n", err);
+}
+
+static struct img_ascii_lcd_config malta_config = {
+ .num_chars = 8,
+ .external_regmap = true,
+ .update = malta_update,
+};
+
+/*
+ * MIPS SEAD3 development board
+ */
+
+enum {
+ SEAD3_REG_LCD_CTRL = 0x00,
+#define SEAD3_REG_LCD_CTRL_SETDRAM BIT(7)
+ SEAD3_REG_LCD_DATA = 0x08,
+ SEAD3_REG_CPLD_STATUS = 0x10,
+#define SEAD3_REG_CPLD_STATUS_BUSY BIT(0)
+ SEAD3_REG_CPLD_DATA = 0x18,
+#define SEAD3_REG_CPLD_DATA_BUSY BIT(7)
+};
+
+static int sead3_wait_sm_idle(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int status;
+ int err;
+
+ do {
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_CPLD_STATUS,
+ &status);
+ if (err)
+ return err;
+ } while (status & SEAD3_REG_CPLD_STATUS_BUSY);
+
+ return 0;
+
+}
+
+static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int cpld_data;
+ int err;
+
+ err = sead3_wait_sm_idle(ctx);
+ if (err)
+ return err;
+
+ do {
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_CTRL,
+ &cpld_data);
+ if (err)
+ return err;
+
+ err = sead3_wait_sm_idle(ctx);
+ if (err)
+ return err;
+
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_CPLD_DATA,
+ &cpld_data);
+ if (err)
+ return err;
+ } while (cpld_data & SEAD3_REG_CPLD_DATA_BUSY);
+
+ return 0;
+}
+
+static void sead3_update(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ctx->cfg->num_chars; i++) {
+ err = sead3_wait_lcd_idle(ctx);
+ if (err)
+ break;
+
+ err = regmap_write(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_CTRL,
+ SEAD3_REG_LCD_CTRL_SETDRAM | i);
+ if (err)
+ break;
+
+ err = sead3_wait_lcd_idle(ctx);
+ if (err)
+ break;
+
+ err = regmap_write(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_DATA,
+ ctx->curr[i]);
+ if (err)
+ break;
+ }
+
+ if (unlikely(err))
+ pr_err_ratelimited("Failed to update LCD display: %d\n", err);
+}
+
+static struct img_ascii_lcd_config sead3_config = {
+ .num_chars = 16,
+ .external_regmap = true,
+ .update = sead3_update,
+};
+
+static const struct of_device_id img_ascii_lcd_matches[] = {
+ { .compatible = "img,boston-lcd", .data = &boston_config },
+ { .compatible = "mti,malta-lcd", .data = &malta_config },
+ { .compatible = "mti,sead3-lcd", .data = &sead3_config },
+};
+
+/**
+ * img_ascii_lcd_scroll() - scroll the display by a character
+ * @arg: really a pointer to the private data structure
+ *
+ * Scroll the current message along the LCD by one character, rearming the
+ * timer if required.
+ */
+static void img_ascii_lcd_scroll(unsigned long arg)
+{
+ struct img_ascii_lcd_ctx *ctx = (struct img_ascii_lcd_ctx *)arg;
+ unsigned int i, ch = ctx->scroll_pos;
+ unsigned int num_chars = ctx->cfg->num_chars;
+
+ /* update the current message string */
+ for (i = 0; i < num_chars;) {
+ /* copy as many characters from the string as possible */
+ for (; i < num_chars && ch < ctx->message_len; i++, ch++)
+ ctx->curr[i] = ctx->message[ch];
+
+ /* wrap around to the start of the string */
+ ch = 0;
+ }
+
+ /* update the LCD */
+ ctx->cfg->update(ctx);
+
+ /* move on to the next character */
+ ctx->scroll_pos++;
+ ctx->scroll_pos %= ctx->message_len;
+
+ /* rearm the timer */
+ if (ctx->message_len > ctx->cfg->num_chars)
+ mod_timer(&ctx->timer, jiffies + ctx->scroll_rate);
+}
+
+/**
+ * img_ascii_lcd_display() - set the message to be displayed
+ * @ctx: pointer to the private data structure
+ * @msg: the message to display
+ * @count: length of msg, or -1
+ *
+ * Display a new message @msg on the LCD. @msg can be longer than the number of
+ * characters the LCD can display, in which case it will begin scrolling across
+ * the LCD display.
+ *
+ * Return: 0 on success, -ENOMEM on memory allocation failure
+ */
+static int img_ascii_lcd_display(struct img_ascii_lcd_ctx *ctx,
+ const char *msg, ssize_t count)
+{
+ char *new_msg;
+
+ /* stop the scroll timer */
+ del_timer_sync(&ctx->timer);
+
+ if (count == -1)
+ count = strlen(msg);
+
+ /* if the string ends with a newline, trim it */
+ if (msg[count - 1] == '\n')
+ count--;
+
+ new_msg = devm_kmalloc(&ctx->pdev->dev, count + 1, GFP_KERNEL);
+ if (!new_msg)
+ return -ENOMEM;
+
+ memcpy(new_msg, msg, count);
+ new_msg[count] = 0;
+
+ if (ctx->message)
+ devm_kfree(&ctx->pdev->dev, ctx->message);
+
+ ctx->message = new_msg;
+ ctx->message_len = count;
+ ctx->scroll_pos = 0;
+
+ /* update the LCD */
+ img_ascii_lcd_scroll((unsigned long)ctx);
+
+ return 0;
+}
+
+/**
+ * message_show() - read message via sysfs
+ * @dev: the LCD device
+ * @attr: the LCD message attribute
+ * @buf: the buffer to read the message into
+ *
+ * Read the current message being displayed or scrolled across the LCD display
+ * into @buf, for reads from sysfs.
+ *
+ * Return: the number of characters written to @buf
+ */
+static ssize_t message_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", ctx->message);
+}
+
+/**
+ * message_store() - write a new message via sysfs
+ * @dev: the LCD device
+ * @attr: the LCD message attribute
+ * @buf: the buffer containing the new message
+ * @count: the size of the message in @buf
+ *
+ * Write a new message to display or scroll across the LCD display from sysfs.
+ *
+ * Return: the size of the message on success, else -ERRNO
+ */
+static ssize_t message_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev);
+ int err;
+
+ err = img_ascii_lcd_display(ctx, buf, count);
+ return err ?: count;
+}
+
+static DEVICE_ATTR_RW(message);
+
+/**
+ * img_ascii_lcd_probe() - probe an LCD display device
+ * @pdev: the LCD platform device
+ *
+ * Probe an LCD display device, ensuring that we have the required resources in
+ * order to access the LCD & setting up private data as well as sysfs files.
+ *
+ * Return: 0 on success, else -ERRNO
+ */
+static int img_ascii_lcd_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct img_ascii_lcd_config *cfg;
+ struct img_ascii_lcd_ctx *ctx;
+ struct resource *res;
+ int err;
+
+ match = of_match_device(img_ascii_lcd_matches, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ cfg = match->data;
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx) + cfg->num_chars,
+ GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (cfg->external_regmap) {
+ ctx->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(ctx->regmap))
+ return PTR_ERR(ctx->regmap);
+
+ if (of_property_read_u32(pdev->dev.of_node, "offset",
+ &ctx->offset))
+ return -EINVAL;
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->base))
+ return PTR_ERR(ctx->base);
+ }
+
+ ctx->pdev = pdev;
+ ctx->cfg = cfg;
+ ctx->message = NULL;
+ ctx->scroll_pos = 0;
+ ctx->scroll_rate = HZ / 2;
+
+ /* initialise a timer for scrolling the message */
+ init_timer(&ctx->timer);
+ ctx->timer.function = img_ascii_lcd_scroll;
+ ctx->timer.data = (unsigned long)ctx;
+
+ platform_set_drvdata(pdev, ctx);
+
+ /* display a default message */
+ err = img_ascii_lcd_display(ctx, "Linux " UTS_RELEASE " ", -1);
+ if (err)
+ goto out_del_timer;
+
+ err = device_create_file(&pdev->dev, &dev_attr_message);
+ if (err)
+ goto out_del_timer;
+
+ return 0;
+out_del_timer:
+ del_timer_sync(&ctx->timer);
+ return err;
+}
+
+/**
+ * img_ascii_lcd_remove() - remove an LCD display device
+ * @pdev: the LCD platform device
+ *
+ * Remove an LCD display device, freeing private resources & ensuring that the
+ * driver stops using the LCD display registers.
+ *
+ * Return: 0
+ */
+static int img_ascii_lcd_remove(struct platform_device *pdev)
+{
+ struct img_ascii_lcd_ctx *ctx = platform_get_drvdata(pdev);
+
+ device_remove_file(&pdev->dev, &dev_attr_message);
+ del_timer_sync(&ctx->timer);
+ return 0;
+}
+
+static struct platform_driver img_ascii_lcd_driver = {
+ .driver = {
+ .name = "img-ascii-lcd",
+ .of_match_table = img_ascii_lcd_matches,
+ },
+ .probe = img_ascii_lcd_probe,
+ .remove = img_ascii_lcd_remove,
+};
+module_platform_driver(img_ascii_lcd_driver);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fdf44cac08e6..d02e7c0f5bfd 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -213,14 +213,16 @@ config DEBUG_DEVRES
If you are unsure about this, Say N here.
config DEBUG_TEST_DRIVER_REMOVE
- bool "Test driver remove calls during probe"
+ bool "Test driver remove calls during probe (UNSTABLE)"
depends on DEBUG_KERNEL
help
Say Y here if you want the Driver core to test driver remove functions
by calling probe, remove, probe. This tests the remove path without
having to unbind the driver or unload the driver module.
- If you are unsure about this, say N here.
+ This option is expected to find errors and may render your system
+ unusable. You should say N here unless you are explicitly looking to
+ test this functionality.
config SYS_HYPERVISOR
bool
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 811e11c82f32..0809cda93cc0 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_PD_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)) {
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
@@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_P_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)){
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ba405b55329f..19a16b2dbb91 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd)
spin_lock(&nbd->sock_lock);
if (!nbd->sock) {
- spin_unlock_irq(&nbd->sock_lock);
+ spin_unlock(&nbd->sock_lock);
return;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index abb71628ab61..7b274ff4632c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -415,15 +415,15 @@ struct rbd_device {
};
/*
- * Flag bits for rbd_dev->flags. If atomicity is required,
- * rbd_dev->lock is used to protect access.
- *
- * Currently, only the "removing" flag (which is coupled with the
- * "open_count" field) requires atomic access.
+ * Flag bits for rbd_dev->flags:
+ * - REMOVING (which is coupled with rbd_dev->open_count) is protected
+ * by rbd_dev->lock
+ * - BLACKLISTED is protected by rbd_dev->lock_rwsem
*/
enum rbd_dev_flags {
RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
+ RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
};
static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
@@ -3926,6 +3926,7 @@ static void rbd_reregister_watch(struct work_struct *work)
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
struct rbd_device, watch_dwork);
bool was_lock_owner = false;
+ bool need_to_wake = false;
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
@@ -3935,19 +3936,27 @@ static void rbd_reregister_watch(struct work_struct *work)
was_lock_owner = rbd_release_lock(rbd_dev);
mutex_lock(&rbd_dev->watch_mutex);
- if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
- goto fail_unlock;
+ if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
+ }
ret = __rbd_register_watch(rbd_dev);
if (ret) {
rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
- if (ret != -EBLACKLISTED)
+ if (ret == -EBLACKLISTED || ret == -ENOENT) {
+ set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
+ need_to_wake = true;
+ } else {
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->watch_dwork,
RBD_RETRY_DELAY);
- goto fail_unlock;
+ }
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
}
+ need_to_wake = true;
rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
mutex_unlock(&rbd_dev->watch_mutex);
@@ -3963,13 +3972,10 @@ static void rbd_reregister_watch(struct work_struct *work)
ret);
}
+out:
up_write(&rbd_dev->lock_rwsem);
- wake_requests(rbd_dev, true);
- return;
-
-fail_unlock:
- mutex_unlock(&rbd_dev->watch_mutex);
- up_write(&rbd_dev->lock_rwsem);
+ if (need_to_wake)
+ wake_requests(rbd_dev, true);
}
/*
@@ -4074,7 +4080,9 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
up_read(&rbd_dev->lock_rwsem);
schedule();
down_read(&rbd_dev->lock_rwsem);
- } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
+ } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+
finish_wait(&rbd_dev->lock_waitq, &wait);
}
@@ -4166,8 +4174,16 @@ static void rbd_queue_workfn(struct work_struct *work)
if (must_be_locked) {
down_read(&rbd_dev->lock_rwsem);
- if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
+ if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
rbd_wait_state_locked(rbd_dev);
+
+ WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+ if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ result = -EBLACKLISTED;
+ goto err_unlock;
+ }
}
img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index ef51c9c864c5..b6bb58c41df5 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -310,7 +310,7 @@ static int bt_ti_probe(struct platform_device *pdev)
BT_DBG("HCI device registered (hdev %p)", hdev);
dev_set_drvdata(&pdev->dev, hst);
- return err;
+ return 0;
}
static int bt_ti_remove(struct platform_device *pdev)
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 5ccb90ef0146..8f6c23c20c52 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -643,6 +643,14 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
},
.driver_data = &acpi_active_low,
},
+ { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
+ .ident = "Lenovo ThinkPad 8",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ .driver_data = &acpi_active_low,
+ },
{ }
};
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 7010dcac9328..78751057164a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -111,6 +111,7 @@ config OMAP_OCP2SCP
config QCOM_EBI2
bool "Qualcomm External Bus Interface 2 (EBI2)"
depends on HAS_IOMEM
+ depends on ARCH_QCOM || COMPILE_TEST
help
Say y here to enable support for the Qualcomm External Bus
Interface 2, which can be used to connect things like NAND Flash,
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 482794526e8c..d2d2c89de5b4 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -84,14 +84,14 @@ static size_t rng_buffer_size(void)
static void add_early_randomness(struct hwrng *rng)
{
- unsigned char bytes[16];
int bytes_read;
+ size_t size = min_t(size_t, 16, rng_buffer_size());
mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ bytes_read = rng_get_data(rng, rng_buffer, size, 1);
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
- add_device_randomness(bytes, bytes_read);
+ add_device_randomness(rng_buffer, bytes_read);
}
static inline void cleanup_rng(struct kref *kref)
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 5a9350b1069a..7f816655cbbf 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -76,3 +76,11 @@ config IPMI_POWEROFF
the IPMI management controller is capable of this.
endif # IPMI_HANDLER
+
+config ASPEED_BT_IPMI_BMC
+ depends on ARCH_ASPEED
+ tristate "BT IPMI bmc driver"
+ help
+ Provides a driver for the BT (Block Transfer) IPMI interface
+ found on Aspeed SOCs (AST2400 and AST2500). The driver
+ implements the BMC side of the BT interface.
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index f3ffde1f5f1f..0d98cd91def1 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
+obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
new file mode 100644
index 000000000000..b49e61320952
--- /dev/null
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bt-bmc.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+/*
+ * This is a BMC device used to communicate to the host
+ */
+#define DEVICE_NAME "ipmi-bt-host"
+
+#define BT_IO_BASE 0xe4
+#define BT_IRQ 10
+
+#define BT_CR0 0x0
+#define BT_CR0_IO_BASE 16
+#define BT_CR0_IRQ 12
+#define BT_CR0_EN_CLR_SLV_RDP 0x8
+#define BT_CR0_EN_CLR_SLV_WRP 0x4
+#define BT_CR0_ENABLE_IBT 0x1
+#define BT_CR1 0x4
+#define BT_CR1_IRQ_H2B 0x01
+#define BT_CR1_IRQ_HBUSY 0x40
+#define BT_CR2 0x8
+#define BT_CR2_IRQ_H2B 0x01
+#define BT_CR2_IRQ_HBUSY 0x40
+#define BT_CR3 0xc
+#define BT_CTRL 0x10
+#define BT_CTRL_B_BUSY 0x80
+#define BT_CTRL_H_BUSY 0x40
+#define BT_CTRL_OEM0 0x20
+#define BT_CTRL_SMS_ATN 0x10
+#define BT_CTRL_B2H_ATN 0x08
+#define BT_CTRL_H2B_ATN 0x04
+#define BT_CTRL_CLR_RD_PTR 0x02
+#define BT_CTRL_CLR_WR_PTR 0x01
+#define BT_BMC2HOST 0x14
+#define BT_INTMASK 0x18
+#define BT_INTMASK_B2H_IRQEN 0x01
+#define BT_INTMASK_B2H_IRQ 0x02
+#define BT_INTMASK_BMC_HWRST 0x80
+
+#define BT_BMC_BUFFER_SIZE 256
+
+struct bt_bmc {
+ struct device dev;
+ struct miscdevice miscdev;
+ void __iomem *base;
+ int irq;
+ wait_queue_head_t queue;
+ struct timer_list poll_timer;
+ struct mutex mutex;
+};
+
+static atomic_t open_count = ATOMIC_INIT(0);
+
+static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
+{
+ return ioread8(bt_bmc->base + reg);
+}
+
+static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
+{
+ iowrite8(data, bt_bmc->base + reg);
+}
+
+static void clr_rd_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL);
+}
+
+static void clr_wr_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL);
+}
+
+static void clr_h2b_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL);
+}
+
+static void set_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY))
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void clr_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void set_b2h_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL);
+}
+
+static u8 bt_read(struct bt_bmc *bt_bmc)
+{
+ return bt_inb(bt_bmc, BT_BMC2HOST);
+}
+
+static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ buf[i] = bt_read(bt_bmc);
+ return n;
+}
+
+static void bt_write(struct bt_bmc *bt_bmc, u8 c)
+{
+ bt_outb(bt_bmc, c, BT_BMC2HOST);
+}
+
+static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ bt_write(bt_bmc, buf[i]);
+ return n;
+}
+
+static void set_sms_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL);
+}
+
+static struct bt_bmc *file_bt_bmc(struct file *file)
+{
+ return container_of(file->private_data, struct bt_bmc, miscdev);
+}
+
+static int bt_bmc_open(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ if (atomic_inc_return(&open_count) == 1) {
+ clr_b_busy(bt_bmc);
+ return 0;
+ }
+
+ atomic_dec(&open_count);
+ return -EBUSY;
+}
+
+/*
+ * The BT (Block Transfer) interface means that entire messages are
+ * buffered by the host before a notification is sent to the BMC that
+ * there is data to be read. The first byte is the length and the
+ * message data follows. The read operation just tries to capture the
+ * whole before returning it to userspace.
+ *
+ * BT Message format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5:N
+ * Length NetFn/LUN Seq Cmd Data
+ *
+ */
+static ssize_t bt_bmc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 len;
+ int len_byte = 1;
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nread;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ WARN_ON(*ppos);
+
+ if (wait_event_interruptible(bt_bmc->queue,
+ bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ set_b_busy(bt_bmc);
+ clr_h2b_atn(bt_bmc);
+ clr_rd_ptr(bt_bmc);
+
+ /*
+ * The BT frames start with the message length, which does not
+ * include the length byte.
+ */
+ kbuffer[0] = bt_read(bt_bmc);
+ len = kbuffer[0];
+
+ /* We pass the length back to userspace as well */
+ if (len + 1 > count)
+ len = count - 1;
+
+ while (len) {
+ nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte);
+
+ bt_readn(bt_bmc, kbuffer + len_byte, nread);
+
+ if (copy_to_user(buf, kbuffer, nread + len_byte)) {
+ ret = -EFAULT;
+ break;
+ }
+ len -= nread;
+ buf += nread + len_byte;
+ ret += nread + len_byte;
+ len_byte = 0;
+ }
+
+ clr_b_busy(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+/*
+ * BT Message response format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6:N
+ * Length NetFn/LUN Seq Cmd Code Data
+ */
+static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nwritten;
+
+ /*
+ * send a minimum response size
+ */
+ if (count < 5)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ WARN_ON(*ppos);
+
+ /*
+ * There's no interrupt for clearing bmc busy so we have to
+ * poll
+ */
+ if (wait_event_interruptible(bt_bmc->queue,
+ !(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ clr_wr_ptr(bt_bmc);
+
+ while (count) {
+ nwritten = min_t(ssize_t, count, sizeof(kbuffer));
+ if (copy_from_user(&kbuffer, buf, nwritten)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ bt_writen(bt_bmc, kbuffer, nwritten);
+
+ count -= nwritten;
+ buf += nwritten;
+ ret += nwritten;
+ }
+
+ set_b2h_atn(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+static long bt_bmc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ switch (cmd) {
+ case BT_BMC_IOCTL_SMS_ATN:
+ set_sms_atn(bt_bmc);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int bt_bmc_release(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ atomic_dec(&open_count);
+ set_b_busy(bt_bmc);
+ return 0;
+}
+
+static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ unsigned int mask = 0;
+ u8 ctrl;
+
+ poll_wait(file, &bt_bmc->queue, wait);
+
+ ctrl = bt_inb(bt_bmc, BT_CTRL);
+
+ if (ctrl & BT_CTRL_H2B_ATN)
+ mask |= POLLIN;
+
+ if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
+ mask |= POLLOUT;
+
+ return mask;
+}
+
+static const struct file_operations bt_bmc_fops = {
+ .owner = THIS_MODULE,
+ .open = bt_bmc_open,
+ .read = bt_bmc_read,
+ .write = bt_bmc_write,
+ .release = bt_bmc_release,
+ .poll = bt_bmc_poll,
+ .unlocked_ioctl = bt_bmc_ioctl,
+};
+
+static void poll_timer(unsigned long data)
+{
+ struct bt_bmc *bt_bmc = (void *)data;
+
+ bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
+ wake_up(&bt_bmc->queue);
+ add_timer(&bt_bmc->poll_timer);
+}
+
+static irqreturn_t bt_bmc_irq(int irq, void *arg)
+{
+ struct bt_bmc *bt_bmc = arg;
+ u32 reg;
+
+ reg = ioread32(bt_bmc->base + BT_CR2);
+ reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
+ if (!reg)
+ return IRQ_NONE;
+
+ /* ack pending IRQs */
+ iowrite32(reg, bt_bmc->base + BT_CR2);
+
+ wake_up(&bt_bmc->queue);
+ return IRQ_HANDLED;
+}
+
+static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ u32 reg;
+ int rc;
+
+ bt_bmc->irq = platform_get_irq(pdev, 0);
+ if (!bt_bmc->irq)
+ return -ENODEV;
+
+ rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
+ DEVICE_NAME, bt_bmc);
+ if (rc < 0) {
+ dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
+ bt_bmc->irq = 0;
+ return rc;
+ }
+
+ /*
+ * Configure IRQs on the bmc clearing the H2B and HBUSY bits;
+ * H2B will be asserted when the bmc has data for us; HBUSY
+ * will be cleared (along with B2H) when we can write the next
+ * message to the BT buffer
+ */
+ reg = ioread32(bt_bmc->base + BT_CR1);
+ reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
+ iowrite32(reg, bt_bmc->base + BT_CR1);
+
+ return 0;
+}
+
+static int bt_bmc_probe(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc;
+ struct device *dev;
+ struct resource *res;
+ int rc;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -ENODEV;
+
+ dev = &pdev->dev;
+ dev_info(dev, "Found bt bmc device\n");
+
+ bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL);
+ if (!bt_bmc)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, bt_bmc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bt_bmc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(bt_bmc->base))
+ return PTR_ERR(bt_bmc->base);
+
+ mutex_init(&bt_bmc->mutex);
+ init_waitqueue_head(&bt_bmc->queue);
+
+ bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR,
+ bt_bmc->miscdev.name = DEVICE_NAME,
+ bt_bmc->miscdev.fops = &bt_bmc_fops,
+ bt_bmc->miscdev.parent = dev;
+ rc = misc_register(&bt_bmc->miscdev);
+ if (rc) {
+ dev_err(dev, "Unable to register misc device\n");
+ return rc;
+ }
+
+ bt_bmc_config_irq(bt_bmc, pdev);
+
+ if (bt_bmc->irq) {
+ dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
+ } else {
+ dev_info(dev, "No IRQ; using timer\n");
+ setup_timer(&bt_bmc->poll_timer, poll_timer,
+ (unsigned long)bt_bmc);
+ bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
+ add_timer(&bt_bmc->poll_timer);
+ }
+
+ iowrite32((BT_IO_BASE << BT_CR0_IO_BASE) |
+ (BT_IRQ << BT_CR0_IRQ) |
+ BT_CR0_EN_CLR_SLV_RDP |
+ BT_CR0_EN_CLR_SLV_WRP |
+ BT_CR0_ENABLE_IBT,
+ bt_bmc->base + BT_CR0);
+
+ clr_b_busy(bt_bmc);
+
+ return 0;
+}
+
+static int bt_bmc_remove(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&bt_bmc->miscdev);
+ if (!bt_bmc->irq)
+ del_timer_sync(&bt_bmc->poll_timer);
+ return 0;
+}
+
+static const struct of_device_id bt_bmc_match[] = {
+ { .compatible = "aspeed,ast2400-bt-bmc" },
+ { },
+};
+
+static struct platform_driver bt_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = bt_bmc_match,
+ },
+ .probe = bt_bmc_probe,
+ .remove = bt_bmc_remove,
+};
+
+module_platform_driver(bt_bmc_driver);
+
+MODULE_DEVICE_TABLE(of, bt_bmc_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
+MODULE_DESCRIPTION("Linux device interface to the BT interface");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index d8619998cfb5..fcdd886819f5 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2891,11 +2891,11 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
intf->curr_channel = IPMI_MAX_CHANNELS;
}
+ rv = ipmi_bmc_register(intf, i);
+
if (rv == 0)
rv = add_proc_entries(intf, i);
- rv = ipmi_bmc_register(intf, i);
-
out:
if (rv) {
if (intf->proc_dir)
@@ -2982,8 +2982,6 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
int intf_num = intf->intf_num;
ipmi_user_t user;
- ipmi_bmc_unregister(intf);
-
mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
intf->intf_num = -1;
@@ -3007,6 +3005,7 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
mutex_unlock(&ipmi_interfaces_mutex);
remove_proc_entries(intf);
+ ipmi_bmc_unregister(intf);
/*
* Call all the watcher interfaces to tell them that
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d131e152c8ce..d6876d506220 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -479,8 +479,8 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
static void push_to_pool(struct work_struct *work);
-static __u32 input_pool_data[INPUT_POOL_WORDS];
-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 190122e64a3a..85a449cf61e3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -203,7 +203,7 @@ at91_clk_register_programmable(struct regmap *regmap,
ret = clk_hw_register(NULL, &prog->hw);
if (ret) {
kfree(prog);
- hw = &prog->hw;
+ hw = ERR_PTR(ret);
}
return hw;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index b68bf573dcfb..8c7763fd9efc 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -502,8 +502,12 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ const struct bcm2835_pll_data *data = pll->data;
u32 ndiv, fdiv;
+ rate = clamp(rate, data->min_rate, data->max_rate);
+
bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
@@ -608,13 +612,6 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
u32 ana[4];
int i;
- if (rate < data->min_rate || rate > data->max_rate) {
- dev_err(cprman->dev, "%s: rate out of spec: %lu vs (%lu, %lu)\n",
- clk_hw_get_name(hw), rate,
- data->min_rate, data->max_rate);
- return -EINVAL;
- }
-
if (rate > data->max_fb_rate) {
use_fb_prediv = true;
rate /= 2;
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index b637f5979023..eb953d3b0b69 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -216,6 +216,7 @@ static int max77686_clk_probe(struct platform_device *pdev)
return -EINVAL;
}
+ drv_data->num_clks = num_clks;
drv_data->max_clk_data = devm_kcalloc(dev, num_clks,
sizeof(*drv_data->max_clk_data),
GFP_KERNEL);
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index fe364e63f8de..c0e8e1f196aa 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -195,7 +195,7 @@ static void __init hi6220_clk_sys_init(struct device_node *np)
hi6220_clk_register_divider(hi6220_div_clks_sys,
ARRAY_SIZE(hi6220_div_clks_sys), clk_data);
}
-CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
/* clocks in media controller */
@@ -252,7 +252,7 @@ static void __init hi6220_clk_media_init(struct device_node *np)
hi6220_clk_register_divider(hi6220_div_clks_media,
ARRAY_SIZE(hi6220_div_clks_media), clk_data);
}
-CLK_OF_DECLARE(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
/* clocks in pmctrl */
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 380c372d528e..f042bd2a6a99 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -8,6 +8,7 @@ config COMMON_CLK_MEDIATEK
config COMMON_CLK_MT8135
bool "Clock driver for Mediatek MT8135"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
---help---
@@ -15,6 +16,7 @@ config COMMON_CLK_MT8135
config COMMON_CLK_MT8173
bool "Clock driver for Mediatek MT8173"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
---help---
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 45905fc0d75b..cecb0fdfaef6 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -305,7 +305,7 @@ static const struct of_device_id armada_3700_periph_clock_of_match[] = {
};
static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
void __iomem *reg, spinlock_t *lock,
- struct device *dev, struct clk_hw *hw)
+ struct device *dev, struct clk_hw **hw)
{
const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
*rate_ops = NULL;
@@ -329,6 +329,7 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
gate->lock = lock;
gate_ops = gate_hw->init->ops;
gate->reg = reg + (u64)gate->reg;
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
}
if (data->rate_hw) {
@@ -353,13 +354,13 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
}
}
- hw = clk_hw_register_composite(dev, data->name, data->parent_names,
+ *hw = clk_hw_register_composite(dev, data->name, data->parent_names,
data->num_parents, mux_hw,
mux_ops, rate_hw, rate_ops,
gate_hw, gate_ops, CLK_IGNORE_UNUSED);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(*hw))
+ return PTR_ERR(*hw);
return 0;
}
@@ -400,7 +401,7 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
spin_lock_init(&driver_data->lock);
for (i = 0; i < num_periph; i++) {
- struct clk_hw *hw = driver_data->hw_data->hws[i];
+ struct clk_hw **hw = &driver_data->hw_data->hws[i];
if (armada_3700_add_composite_clk(&data[i], reg,
&driver_data->lock, dev, hw))
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 51d152f735cc..17e68a724945 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -106,6 +106,7 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match);
static void exynos_audss_clk_teardown(void)
{
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index 5ffb898d0839..26c53f7963a4 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -79,7 +79,7 @@ static int uniphier_clk_probe(struct platform_device *pdev)
hw_data->num = clk_num;
/* avoid returning NULL for unused idx */
- for (; clk_num >= 0; clk_num--)
+ while (--clk_num >= 0)
hw_data->hws[clk_num] = ERR_PTR(-EINVAL);
for (p = data; p->name; p++) {
@@ -111,6 +111,10 @@ static int uniphier_clk_remove(struct platform_device *pdev)
static const struct of_device_id uniphier_clk_match[] = {
/* System clock */
{
+ .compatible = "socionext,uniphier-sld3-clock",
+ .data = uniphier_sld3_sys_clk_data,
+ },
+ {
.compatible = "socionext,uniphier-ld4-clock",
.data = uniphier_ld4_sys_clk_data,
},
@@ -138,7 +142,7 @@ static const struct of_device_id uniphier_clk_match[] = {
.compatible = "socionext,uniphier-ld20-clock",
.data = uniphier_ld20_sys_clk_data,
},
- /* Media I/O clock */
+ /* Media I/O clock, SD clock */
{
.compatible = "socionext,uniphier-sld3-mio-clock",
.data = uniphier_sld3_mio_clk_data,
@@ -156,20 +160,20 @@ static const struct of_device_id uniphier_clk_match[] = {
.data = uniphier_sld3_mio_clk_data,
},
{
- .compatible = "socionext,uniphier-pro5-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-pro5-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
{
- .compatible = "socionext,uniphier-pxs2-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-pxs2-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
{
.compatible = "socionext,uniphier-ld11-mio-clock",
.data = uniphier_sld3_mio_clk_data,
},
{
- .compatible = "socionext,uniphier-ld20-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-ld20-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
/* Peripheral clock */
{
diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c
index 6aa7ec768d0b..218d20f099ce 100644
--- a/drivers/clk/uniphier/clk-uniphier-mio.c
+++ b/drivers/clk/uniphier/clk-uniphier-mio.c
@@ -93,7 +93,7 @@ const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = {
{ /* sentinel */ }
};
-const struct uniphier_clk_data uniphier_pro5_mio_clk_data[] = {
+const struct uniphier_clk_data uniphier_pro5_sd_clk_data[] = {
UNIPHIER_MIO_CLK_SD_FIXED,
UNIPHIER_MIO_CLK_SD(0, 0),
UNIPHIER_MIO_CLK_SD(1, 1),
diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
index 15a2f2cbe0d9..2c243a894f3b 100644
--- a/drivers/clk/uniphier/clk-uniphier-mux.c
+++ b/drivers/clk/uniphier/clk-uniphier-mux.c
@@ -42,7 +42,7 @@ static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
int num_parents = clk_hw_get_num_parents(hw);
int ret;
- u32 val;
+ unsigned int val;
u8 i;
ret = regmap_read(mux->regmap, mux->reg, &val);
diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h
index 3ae184062388..0244dba1f4cf 100644
--- a/drivers/clk/uniphier/clk-uniphier.h
+++ b/drivers/clk/uniphier/clk-uniphier.h
@@ -115,7 +115,7 @@ extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[];
-extern const struct uniphier_clk_data uniphier_pro5_mio_clk_data[];
+extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[];
extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[];
extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[];
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 245190839359..e2c6e43cf8ca 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -417,6 +417,16 @@ config SYS_SUPPORTS_SH_TMU
config SYS_SUPPORTS_EM_STI
bool
+config CLKSRC_JCORE_PIT
+ bool "J-Core PIT timer driver" if COMPILE_TEST
+ depends on OF
+ depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ This enables build of clocksource and clockevent driver for
+ the integrated PIT in the J-Core synthesizable, open source SoC.
+
config SH_TIMER_CMT
bool "Renesas CMT timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index fd9d6df0bbc0..cf87f407f1ad 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
+obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
new file mode 100644
index 000000000000..54e1665aa03c
--- /dev/null
+++ b/drivers/clocksource/jcore-pit.c
@@ -0,0 +1,249 @@
+/*
+ * J-Core SoC PIT/clocksource driver
+ *
+ * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define PIT_IRQ_SHIFT 12
+#define PIT_PRIO_SHIFT 20
+#define PIT_ENABLE_SHIFT 26
+#define PIT_PRIO_MASK 0xf
+
+#define REG_PITEN 0x00
+#define REG_THROT 0x10
+#define REG_COUNT 0x14
+#define REG_BUSPD 0x18
+#define REG_SECHI 0x20
+#define REG_SECLO 0x24
+#define REG_NSEC 0x28
+
+struct jcore_pit {
+ struct clock_event_device ced;
+ void __iomem *base;
+ unsigned long periodic_delta;
+ u32 enable_val;
+};
+
+static void __iomem *jcore_pit_base;
+static struct jcore_pit __percpu *jcore_pit_percpu;
+
+static notrace u64 jcore_sched_clock_read(void)
+{
+ u32 seclo, nsec, seclo0;
+ __iomem void *base = jcore_pit_base;
+
+ seclo = readl(base + REG_SECLO);
+ do {
+ seclo0 = seclo;
+ nsec = readl(base + REG_NSEC);
+ seclo = readl(base + REG_SECLO);
+ } while (seclo0 != seclo);
+
+ return seclo * NSEC_PER_SEC + nsec;
+}
+
+static cycle_t jcore_clocksource_read(struct clocksource *cs)
+{
+ return jcore_sched_clock_read();
+}
+
+static int jcore_pit_disable(struct jcore_pit *pit)
+{
+ writel(0, pit->base + REG_PITEN);
+ return 0;
+}
+
+static int jcore_pit_set(unsigned long delta, struct jcore_pit *pit)
+{
+ jcore_pit_disable(pit);
+ writel(delta, pit->base + REG_THROT);
+ writel(pit->enable_val, pit->base + REG_PITEN);
+ return 0;
+}
+
+static int jcore_pit_set_state_shutdown(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_oneshot(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_periodic(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_set(pit->periodic_delta, pit);
+}
+
+static int jcore_pit_set_next_event(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_set(delta, pit);
+}
+
+static int jcore_pit_local_init(unsigned cpu)
+{
+ struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
+ unsigned buspd, freq;
+
+ pr_info("Local J-Core PIT init on cpu %u\n", cpu);
+
+ buspd = readl(pit->base + REG_BUSPD);
+ freq = DIV_ROUND_CLOSEST(NSEC_PER_SEC, buspd);
+ pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
+
+ clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
+
+ return 0;
+}
+
+static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id)
+{
+ struct jcore_pit *pit = this_cpu_ptr(dev_id);
+
+ if (clockevent_state_oneshot(&pit->ced))
+ jcore_pit_disable(pit);
+
+ pit->ced.event_handler(&pit->ced);
+
+ return IRQ_HANDLED;
+}
+
+static int __init jcore_pit_init(struct device_node *node)
+{
+ int err;
+ unsigned pit_irq, cpu;
+ unsigned long hwirq;
+ u32 irqprio, enable_val;
+
+ jcore_pit_base = of_iomap(node, 0);
+ if (!jcore_pit_base) {
+ pr_err("Error: Cannot map base address for J-Core PIT\n");
+ return -ENXIO;
+ }
+
+ pit_irq = irq_of_parse_and_map(node, 0);
+ if (!pit_irq) {
+ pr_err("Error: J-Core PIT has no IRQ\n");
+ return -ENXIO;
+ }
+
+ pr_info("Initializing J-Core PIT at %p IRQ %d\n",
+ jcore_pit_base, pit_irq);
+
+ err = clocksource_mmio_init(jcore_pit_base, "jcore_pit_cs",
+ NSEC_PER_SEC, 400, 32,
+ jcore_clocksource_read);
+ if (err) {
+ pr_err("Error registering clocksource device: %d\n", err);
+ return err;
+ }
+
+ sched_clock_register(jcore_sched_clock_read, 32, NSEC_PER_SEC);
+
+ jcore_pit_percpu = alloc_percpu(struct jcore_pit);
+ if (!jcore_pit_percpu) {
+ pr_err("Failed to allocate memory for clock event device\n");
+ return -ENOMEM;
+ }
+
+ err = request_irq(pit_irq, jcore_timer_interrupt,
+ IRQF_TIMER | IRQF_PERCPU,
+ "jcore_pit", jcore_pit_percpu);
+ if (err) {
+ pr_err("pit irq request failed: %d\n", err);
+ free_percpu(jcore_pit_percpu);
+ return err;
+ }
+
+ /*
+ * The J-Core PIT is not hard-wired to a particular IRQ, but
+ * integrated with the interrupt controller such that the IRQ it
+ * generates is programmable, as follows:
+ *
+ * The bit layout of the PIT enable register is:
+ *
+ * .....e..ppppiiiiiiii............
+ *
+ * where the .'s indicate unrelated/unused bits, e is enable,
+ * p is priority, and i is hard irq number.
+ *
+ * For the PIT included in AIC1 (obsolete but still in use),
+ * any hard irq (trap number) can be programmed via the 8
+ * iiiiiiii bits, and a priority (0-15) is programmable
+ * separately in the pppp bits.
+ *
+ * For the PIT included in AIC2 (current), the programming
+ * interface is equivalent modulo interrupt mapping. This is
+ * why a different compatible tag was not used. However only
+ * traps 64-127 (the ones actually intended to be used for
+ * interrupts, rather than syscalls/exceptions/etc.) can be
+ * programmed (the high 2 bits of i are ignored) and the
+ * priority pppp is <<2'd and or'd onto the irq number. This
+ * choice seems to have been made on the hardware engineering
+ * side under an assumption that preserving old AIC1 priority
+ * mappings was important. Future models will likely ignore
+ * the pppp field.
+ */
+ hwirq = irq_get_irq_data(pit_irq)->hwirq;
+ irqprio = (hwirq >> 2) & PIT_PRIO_MASK;
+ enable_val = (1U << PIT_ENABLE_SHIFT)
+ | (hwirq << PIT_IRQ_SHIFT)
+ | (irqprio << PIT_PRIO_SHIFT);
+
+ for_each_present_cpu(cpu) {
+ struct jcore_pit *pit = per_cpu_ptr(jcore_pit_percpu, cpu);
+
+ pit->base = of_iomap(node, cpu);
+ if (!pit->base) {
+ pr_err("Unable to map PIT for cpu %u\n", cpu);
+ continue;
+ }
+
+ pit->ced.name = "jcore_pit";
+ pit->ced.features = CLOCK_EVT_FEAT_PERIODIC
+ | CLOCK_EVT_FEAT_ONESHOT
+ | CLOCK_EVT_FEAT_PERCPU;
+ pit->ced.cpumask = cpumask_of(cpu);
+ pit->ced.rating = 400;
+ pit->ced.irq = pit_irq;
+ pit->ced.set_state_shutdown = jcore_pit_set_state_shutdown;
+ pit->ced.set_state_periodic = jcore_pit_set_state_periodic;
+ pit->ced.set_state_oneshot = jcore_pit_set_state_oneshot;
+ pit->ced.set_next_event = jcore_pit_set_next_event;
+
+ pit->enable_val = enable_val;
+ }
+
+ cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
+ "AP_JCORE_TIMER_STARTING",
+ jcore_pit_local_init, NULL);
+
+ return 0;
+}
+
+CLOCKSOURCE_OF_DECLARE(jcore_pit, "jcore,pit", jcore_pit_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index c184eb84101e..4f87f3e76d83 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -152,6 +152,13 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
+{
+ struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
+
+ return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
+}
+
static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -210,8 +217,13 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
base + TIMER_CTL_REG(1));
- ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name,
- rate, 340, 32, clocksource_mmio_readl_down);
+ cs->clksrc.name = node->name;
+ cs->clksrc.rating = 340;
+ cs->clksrc.read = sun5i_clksrc_read;
+ cs->clksrc.mask = CLOCKSOURCE_MASK(32);
+ cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
pr_err("Couldn't register clock source.\n");
goto err_remove_notifier;
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 1b2f28f69a81..4852d9efe74e 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -80,11 +80,17 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
{
struct cppc_cpudata *cpu;
struct cpufreq_freqs freqs;
+ u32 desired_perf;
int ret = 0;
cpu = all_cpu_data[policy->cpu];
- cpu->perf_ctrls.desired_perf = (u64)target_freq * policy->max / cppc_dmi_max_khz;
+ desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz;
+ /* Return if it is exactly the same perf */
+ if (desired_perf == cpu->perf_ctrls.desired_perf)
+ return ret;
+
+ cpu->perf_ctrls.desired_perf = desired_perf;
freqs.old = policy->cur;
freqs.new = target_freq;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 18da4f8051d3..13475890d792 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -17,6 +17,7 @@
struct cs_policy_dbs_info {
struct policy_dbs_info policy_dbs;
unsigned int down_skip;
+ unsigned int requested_freq;
};
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
@@ -61,6 +62,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ unsigned int requested_freq = dbs_info->requested_freq;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int load = dbs_update(policy);
@@ -72,10 +74,16 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
if (cs_tuners->freq_step == 0)
goto out;
+ /*
+ * If requested_freq is out of range, it is likely that the limits
+ * changed in the meantime, so fall back to current frequency in that
+ * case.
+ */
+ if (requested_freq > policy->max || requested_freq < policy->min)
+ requested_freq = policy->cur;
+
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
- unsigned int requested_freq = policy->cur;
-
dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
@@ -83,8 +91,11 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
goto out;
requested_freq += get_freq_target(cs_tuners, policy);
+ if (requested_freq > policy->max)
+ requested_freq = policy->max;
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
+ dbs_info->requested_freq = requested_freq;
goto out;
}
@@ -95,7 +106,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
- unsigned int freq_target, requested_freq = policy->cur;
+ unsigned int freq_target;
/*
* if we cannot reduce the frequency anymore, break out early
*/
@@ -109,6 +120,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
requested_freq = policy->min;
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
+ dbs_info->requested_freq = requested_freq;
}
out:
@@ -287,6 +299,7 @@ static void cs_start(struct cpufreq_policy *policy)
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
dbs_info->down_skip = 0;
+ dbs_info->requested_freq = policy->cur;
}
static struct dbs_governor cs_governor = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 806f2039571e..4737520ec823 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -179,6 +179,7 @@ struct _pid {
/**
* struct cpudata - Per CPU instance data storage
* @cpu: CPU number for this instance data
+ * @policy: CPUFreq policy value
* @update_util: CPUFreq utility callback information
* @update_util_set: CPUFreq utility callback is set
* @iowait_boost: iowait-related boost fraction
@@ -201,6 +202,7 @@ struct _pid {
struct cpudata {
int cpu;
+ unsigned int policy;
struct update_util_data update_util;
bool update_util_set;
@@ -225,7 +227,7 @@ struct cpudata {
static struct cpudata **all_cpu_data;
/**
- * struct pid_adjust_policy - Stores static PID configuration data
+ * struct pstate_adjust_policy - Stores static PID configuration data
* @sample_rate_ms: PID calculation sample rate in ms
* @sample_rate_ns: Sample rate calculation in ns
* @deadband: PID deadband
@@ -562,12 +564,12 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
int min, hw_min, max, hw_max, cpu, range, adj_range;
u64 value, cap;
- rdmsrl(MSR_HWP_CAPABILITIES, cap);
- hw_min = HWP_LOWEST_PERF(cap);
- hw_max = HWP_HIGHEST_PERF(cap);
- range = hw_max - hw_min;
-
for_each_cpu(cpu, cpumask) {
+ rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+ hw_min = HWP_LOWEST_PERF(cap);
+ hw_max = HWP_HIGHEST_PERF(cap);
+ range = hw_max - hw_min;
+
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
adj_range = limits->min_perf_pct * range / 100;
min = hw_min + adj_range;
@@ -1142,10 +1144,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
-static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
- int pstate = cpu->pstate.min_pstate;
-
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
cpu->pstate.current_pstate = pstate;
/*
@@ -1157,6 +1157,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
pstate_funcs.get_val(cpu, pstate));
}
+static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+{
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+}
+
+static void intel_pstate_max_within_limits(struct cpudata *cpu)
+{
+ int min_pstate, max_pstate;
+
+ update_turbo_state();
+ intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
+ intel_pstate_set_pstate(cpu, max_pstate);
+}
+
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -1232,6 +1246,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
int32_t busy_frac, boost;
+ int target, avg_pstate;
busy_frac = div_fp(sample->mperf, sample->tsc);
@@ -1242,7 +1257,26 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
busy_frac = boost;
sample->busy_scaled = busy_frac * 100;
- return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled);
+
+ target = limits->no_turbo || limits->turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+ target += target >> 2;
+ target = mul_fp(target, busy_frac);
+ if (target < cpu->pstate.min_pstate)
+ target = cpu->pstate.min_pstate;
+
+ /*
+ * If the average P-state during the previous cycle was higher than the
+ * current target, add 50% of the difference to the target to reduce
+ * possible performance oscillations and offset possible performance
+ * loss related to moving the workload from one CPU to another within
+ * a package/module.
+ */
+ avg_pstate = get_avg_pstate(cpu);
+ if (avg_pstate > target)
+ target += (avg_pstate - target) >> 1;
+
+ return target;
}
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
@@ -1251,10 +1285,11 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
u64 duration_ns;
/*
- * perf_scaled is the average performance during the last sampling
- * period scaled by the ratio of the maximum P-state to the P-state
- * requested last time (in percent). That measures the system's
- * response to the previous P-state selection.
+ * perf_scaled is the ratio of the average P-state during the last
+ * sampling period to the P-state requested last time (in percent).
+ *
+ * That measures the system's response to the previous P-state
+ * selection.
*/
max_pstate = cpu->pstate.max_pstate_physical;
current_pstate = cpu->pstate.current_pstate;
@@ -1304,7 +1339,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
from = cpu->pstate.current_pstate;
- target_pstate = pstate_funcs.get_target_pstate(cpu);
+ target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
+ cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
intel_pstate_update_pstate(cpu, target_pstate);
@@ -1470,7 +1506,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
policy->cpuinfo.max_freq, policy->max);
- cpu = all_cpu_data[0];
+ cpu = all_cpu_data[policy->cpu];
+ cpu->policy = policy->policy;
+
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
@@ -1478,7 +1516,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
policy->max = policy->cpuinfo.max_freq;
}
- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
limits = &performance_limits;
if (policy->max >= policy->cpuinfo.max_freq) {
pr_debug("set performance\n");
@@ -1514,6 +1552,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
out:
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ /*
+ * NOHZ_FULL CPUs need this as the governor callback may not
+ * be invoked on them.
+ */
+ intel_pstate_clear_update_util_hook(policy->cpu);
+ intel_pstate_max_within_limits(cpu);
+ }
+
intel_pstate_set_update_util_hook(policy->cpu);
intel_pstate_hwp_set_policy(policy);
diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips
index 4102be01d06a..512ee37b374b 100644
--- a/drivers/cpuidle/Kconfig.mips
+++ b/drivers/cpuidle/Kconfig.mips
@@ -5,7 +5,7 @@ config MIPS_CPS_CPUIDLE
bool "CPU Idle driver for MIPS CPS platforms"
depends on CPU_IDLE && MIPS_CPS
depends on SYS_SUPPORTS_MIPS_CPS
- select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT || CPU_MIPSR6
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select MIPS_CPS_PM
default y
diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c
index 1adb6980b707..926ba9871c62 100644
--- a/drivers/cpuidle/cpuidle-cps.c
+++ b/drivers/cpuidle/cpuidle-cps.c
@@ -163,7 +163,7 @@ static int __init cps_cpuidle_init(void)
core = cpu_data[cpu].core;
device = &per_cpu(cpuidle_dev, cpu);
device->cpu = cpu;
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
#endif
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index daadd20aa936..3e2ab3b14eea 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -14,7 +14,7 @@ if DEV_DAX
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
- depends on NVDIMM_DAX
+ depends on LIBNVDIMM && NVDIMM_DAX
default DEV_DAX
help
Support raw access to persistent memory. Note that this
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 9630d8837ba9..4a15fa5df98b 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -44,7 +44,6 @@ static void dax_pmem_percpu_exit(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_exit(ref);
- wait_for_completion(&dax_pmem->cmp);
}
static void dax_pmem_percpu_kill(void *data)
@@ -54,6 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_kill(ref);
+ wait_for_completion(&dax_pmem->cmp);
}
static int dax_pmem_probe(struct device *dev)
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 478006b7764a..bf3ea7603a58 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -137,6 +137,10 @@ static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
cur_time = jiffies;
+ /* Immediately exit if previous_freq is not initialized yet. */
+ if (!devfreq->previous_freq)
+ goto out;
+
prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
if (prev_lev < 0) {
ret = prev_lev;
@@ -594,17 +598,19 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (devfreq->governor)
err = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_START, NULL);
- mutex_unlock(&devfreq_list_lock);
if (err) {
dev_err(dev, "%s: Unable to start governor for the device\n",
__func__);
goto err_init;
}
+ mutex_unlock(&devfreq_list_lock);
return devfreq;
err_init:
list_del(&devfreq->node);
+ mutex_unlock(&devfreq_list_lock);
+
device_unregister(&devfreq->dev);
err_out:
return ERR_PTR(err);
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index 0fdae8608961..cd949800eed9 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -17,6 +17,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP
tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
+ select REGMAP_MMIO
help
This add the devfreq-event driver for Exynos SoC. It provides NoC
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index a5841403bde8..49e712aca0c1 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -176,9 +176,6 @@ static int exynos_nocp_get_event(struct devfreq_event_dev *edev,
return 0;
out:
- edata->load_count = 0;
- edata->total_count = 0;
-
dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
return ret;
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index ca957a5f4291..b8cde096a808 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -51,7 +51,7 @@ static void qcom_usb_extcon_detect_cable(struct work_struct *work)
if (ret)
return;
- extcon_set_state(info->edev, EXTCON_USB_HOST, !id);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !id);
}
static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id)
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 631c977b0da5..180f0a96528c 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -566,6 +566,11 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
PCILYNX_MAX_REGISTER);
+ if (lynx->registers == NULL) {
+ dev_err(&dev->dev, "Failed to map registers\n");
+ ret = -ENOMEM;
+ goto fail_deallocate_lynx;
+ }
lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
@@ -578,7 +583,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
lynx->rcv_buffer == NULL) {
dev_err(&dev->dev, "Failed to allocate receive buffer\n");
ret = -ENOMEM;
- goto fail_deallocate;
+ goto fail_deallocate_buffers;
}
lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
@@ -641,7 +646,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
dev_err(&dev->dev,
"Failed to allocate shared interrupt %d\n", dev->irq);
ret = -EIO;
- goto fail_deallocate;
+ goto fail_deallocate_buffers;
}
lynx->misc.parent = &dev->dev;
@@ -668,7 +673,7 @@ fail_free_irq:
reg_write(lynx, PCI_INT_ENABLE, 0);
free_irq(lynx->pci_device->irq, lynx);
-fail_deallocate:
+fail_deallocate_buffers:
if (lynx->rcv_start_pcl)
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
@@ -679,6 +684,8 @@ fail_deallocate:
pci_free_consistent(lynx->pci_device, PAGE_SIZE,
lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers);
+
+fail_deallocate_lynx:
kfree(lynx);
fail_disable:
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c06945160a41..5e23e2d305e7 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-mno-mmx -mno-sse
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
-cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
+cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \
-fno-builtin -fpic -mno-single-pic-base
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
@@ -79,5 +79,6 @@ quiet_cmd_stubcopy = STUBCPY $@
# decompressor. So move our .data to .data.efistub, which is preserved
# explicitly by the decompressor linker script.
#
-STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
+STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
+ -R ___ksymtab+sort -R ___kcrctab+sort
STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 26ee00f6bd58..d011cb89d25e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -284,7 +284,7 @@ config GPIO_MM_LANTIQ
config GPIO_MOCKUP
tristate "GPIO Testing Driver"
- depends on GPIOLIB
+ depends on GPIOLIB && SYSFS
select GPIO_SYSFS
help
This enables GPIO Testing driver, which provides a way to test GPIO
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 9457e2022bf6..dc37dbe4b46d 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -219,6 +219,7 @@ static const struct of_device_id ath79_gpio_of_match[] = {
{ .compatible = "qca,ar9340-gpio" },
{},
};
+MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
static int ath79_gpio_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 425501c39527..793518a30afe 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -239,7 +239,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, h->host_data);
- irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq);
return 0;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b9daa0bf32a4..ee1724806f46 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -308,8 +308,10 @@ static int mxs_gpio_probe(struct platform_device *pdev)
writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
- if (irq_base < 0)
- return irq_base;
+ if (irq_base < 0) {
+ err = irq_base;
+ goto out_iounmap;
+ }
port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
&irq_domain_simple_ops, NULL);
@@ -349,6 +351,8 @@ out_irqdomain_remove:
irq_domain_remove(port->domain);
out_irqdesc_free:
irq_free_descs(irq_base, 32);
+out_iounmap:
+ iounmap(port->base);
return err;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 45c8817d068c..e422568e14ad 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -794,6 +794,22 @@ static int pca953x_probe(struct i2c_client *client,
}
mutex_init(&chip->i2c_lock);
+ /*
+ * In case we have an i2c-mux controlled by a GPIO provided by an
+ * expander using the same driver higher on the device tree, read the
+ * i2c adapter nesting depth and use the retrieved value as lockdep
+ * subclass for chip->i2c_lock.
+ *
+ * REVISIT: This solution is not complete. It protects us from lockdep
+ * false positives when the expander controlling the i2c-mux is on
+ * a different level on the device tree, but not when it's on the same
+ * level on a different branch (in which case the subclass number
+ * would be the same).
+ *
+ * TODO: Once a correct solution is developed, a similar fix should be
+ * applied to all other i2c-controlled GPIO expanders (and potentially
+ * regmap-i2c).
+ */
lockdep_set_subclass(&chip->i2c_lock,
i2c_adapter_depth(client->adapter));
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index e7d422a6b90b..5b0042776ec7 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -409,7 +409,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
* 801/1801/1600, bits are cleared when read.
* Edge detect register is not present on 801/1600/1801
*/
- if (stmpe->partnum != STMPE801 || stmpe->partnum != STMPE1600 ||
+ if (stmpe->partnum != STMPE801 && stmpe->partnum != STMPE1600 &&
stmpe->partnum != STMPE1801) {
stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
stmpe_reg_write(stmpe,
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 99256115bea5..c2a80b4cbf32 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -66,6 +66,7 @@ static const struct of_device_id ts4800_gpio_of_match[] = {
{ .compatible = "technologic,ts4800-gpio", },
{},
};
+MODULE_DEVICE_TABLE(of, ts4800_gpio_of_match);
static struct platform_driver ts4800_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 58ece201b8e6..72a4b326fd0d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -653,14 +653,17 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
int idx, i;
unsigned int irq_flags;
+ int ret = -ENOENT;
for (i = 0, idx = 0; idx <= index; i++) {
struct acpi_gpio_info info;
struct gpio_desc *desc;
desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
- if (IS_ERR(desc))
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
break;
+ }
if (info.gpioint && idx++ == index) {
int irq = gpiod_to_irq(desc);
@@ -679,7 +682,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
}
}
- return -ENOENT;
+ return ret;
}
EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f0fc3a0d37c8..20e09b7c2de3 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -333,6 +333,13 @@ struct linehandle_state {
u32 numdescs;
};
+#define GPIOHANDLE_REQUEST_VALID_FLAGS \
+ (GPIOHANDLE_REQUEST_INPUT | \
+ GPIOHANDLE_REQUEST_OUTPUT | \
+ GPIOHANDLE_REQUEST_ACTIVE_LOW | \
+ GPIOHANDLE_REQUEST_OPEN_DRAIN | \
+ GPIOHANDLE_REQUEST_OPEN_SOURCE)
+
static long linehandle_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
@@ -344,6 +351,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
/* TODO: check if descriptors are really input */
for (i = 0; i < lh->numdescs; i++) {
val = gpiod_get_value_cansleep(lh->descs[i]);
@@ -444,6 +453,17 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
u32 lflags = handlereq.flags;
struct gpio_desc *desc;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
desc = &gdev->descs[offset];
ret = gpiod_request(desc, lh->label);
if (ret)
@@ -536,6 +556,10 @@ struct lineevent_state {
struct mutex read_lock;
};
+#define GPIOEVENT_REQUEST_VALID_FLAGS \
+ (GPIOEVENT_REQUEST_RISING_EDGE | \
+ GPIOEVENT_REQUEST_FALLING_EDGE)
+
static unsigned int lineevent_poll(struct file *filep,
struct poll_table_struct *wait)
{
@@ -623,6 +647,8 @@ static long lineevent_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
val = gpiod_get_value_cansleep(le->desc);
if (val < 0)
return val;
@@ -726,6 +752,18 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
lflags = eventreq.handleflags;
eflags = eventreq.eventflags;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
+ (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
/* This is just wrong: we don't look for events on output lines */
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
ret = -EINVAL;
@@ -823,6 +861,8 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
struct gpiochip_info chipinfo;
+ memset(&chipinfo, 0, sizeof(chipinfo));
+
strncpy(chipinfo.name, dev_name(&gdev->dev),
sizeof(chipinfo.name));
chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
@@ -839,7 +879,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- if (lineinfo.line_offset > gdev->ngpio)
+ if (lineinfo.line_offset >= gdev->ngpio)
return -EINVAL;
desc = &gdev->descs[lineinfo.line_offset];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 2e3a0543760d..e3281d4e3e41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -765,7 +765,7 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
return ret;
}
-static void amdgpu_connector_destroy(struct drm_connector *connector)
+static void amdgpu_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
@@ -773,6 +773,12 @@ static void amdgpu_connector_destroy(struct drm_connector *connector)
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
amdgpu_connector->ddc_bus->has_aux = false;
}
+}
+
+static void amdgpu_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
amdgpu_connector_free_edid(connector);
kfree(amdgpu_connector->con_priv);
drm_connector_unregister(connector);
@@ -826,6 +832,7 @@ static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_lcd_property,
};
@@ -936,6 +943,7 @@ static const struct drm_connector_funcs amdgpu_connector_vga_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_property,
};
@@ -1203,6 +1211,7 @@ static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = {
.detect = amdgpu_connector_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
@@ -1493,6 +1502,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
@@ -1502,6 +1512,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_lcd_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e203e5561107..a5e2fcbef0f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
}
+
+ ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7dbe85d67d26..b4f4a9239069 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1408,16 +1408,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
- adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
- continue;
- /* enable clockgating to save power */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
- return r;
- }
if (adev->ip_blocks[i].funcs->late_init) {
r = adev->ip_blocks[i].funcs->late_init((void *)adev);
if (r) {
@@ -1426,6 +1416,18 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
}
adev->ip_block_status[i].late_initialized = true;
}
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+ /* enable clockgating to save power */
+ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_GATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ return r;
+ }
+ }
}
return 0;
@@ -1435,6 +1437,30 @@ static int amdgpu_fini(struct amdgpu_device *adev)
{
int i, r;
+ /* need to disable SMC first */
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].hw)
+ continue;
+ if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+ /* ungate blocks before hw fini so that we can shutdown the blocks safely */
+ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ return r;
+ }
+ r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ }
+ adev->ip_block_status[i].hw = false;
+ break;
+ }
+ }
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].hw)
continue;
@@ -2073,7 +2099,8 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
if (!adev->ip_block_status[i].valid)
continue;
if (adev->ip_blocks[i].funcs->check_soft_reset)
- adev->ip_blocks[i].funcs->check_soft_reset(adev);
+ adev->ip_block_status[i].hang =
+ adev->ip_blocks[i].funcs->check_soft_reset(adev);
if (adev->ip_block_status[i].hang) {
DRM_INFO("IP block:%d is hang!\n", i);
asic_hang = true;
@@ -2102,12 +2129,20 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
{
- if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
- DRM_INFO("Some block need full reset!\n");
- return true;
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
+ if (adev->ip_block_status[i].hang) {
+ DRM_INFO("Some block need full reset!\n");
+ return true;
+ }
+ }
}
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index fe36caf1b7d7..14f57d9915e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
printk("\n");
}
+
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ vblank_in_pixels =
+ amdgpu_crtc->hw_mode.crtc_htotal *
+ (amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ (amdgpu_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index aa074fac0c7f..f3efb1c5dae9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -754,6 +754,10 @@ static const char *amdgpu_vram_names[] = {
int amdgpu_bo_init(struct amdgpu_device *adev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(adev->mc.aper_base,
+ adev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
adev->mc.aper_size);
@@ -769,6 +773,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
arch_phys_wc_del(adev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
}
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e1fa8731d1e2..3cb5e903cd62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -345,8 +345,8 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
ent = debugfs_create_file(name,
S_IFREG | S_IRUGO, root,
ring, &amdgpu_debugfs_ring_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ if (!ent)
+ return -ENOMEM;
i_size_write(ent->d_inode, ring->ring_size + 12);
ring->ent = ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 887483b8b818..dcaf691f56b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -555,10 +555,13 @@ struct amdgpu_ttm_tt {
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+ unsigned int flags = 0;
unsigned pinned = 0;
int r;
+ if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+ flags |= FOLL_WRITE;
+
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only use anonymous memory
to prevent problems with writeback */
@@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
list_add(&guptask.list, &gtt->guptasks);
spin_unlock(&gtt->guptasklock);
- r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+ r = get_user_pages(userptr, num_pages, flags, p, NULL);
spin_lock(&gtt->guptasklock);
list_del(&guptask.list);
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index f80a0834e889..3c082e143730 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1514,14 +1514,16 @@ static int cz_dpm_set_powergating_state(void *handle,
return 0;
}
-/* borrowed from KV, need future unify */
static int cz_dpm_get_temperature(struct amdgpu_device *adev)
{
int actual_temp = 0;
- uint32_t temp = RREG32_SMC(0xC0300E0C);
+ uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP);
+ uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
- if (temp)
+ if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
actual_temp = 1000 * ((temp / 8) - 49);
+ else
+ actual_temp = 1000 * (temp / 8);
return actual_temp;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 613ebb7ed50f..4108c686aa7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3188,16 +3188,11 @@ static int dce_v10_0_wait_for_idle(void *handle)
return 0;
}
-static int dce_v10_0_check_soft_reset(void *handle)
+static bool dce_v10_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (dce_v10_0_is_display_hung(adev))
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
- else
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
-
- return 0;
+ return dce_v10_0_is_display_hung(adev);
}
static int dce_v10_0_soft_reset(void *handle)
@@ -3205,9 +3200,6 @@ static int dce_v10_0_soft_reset(void *handle)
u32 srbm_soft_reset = 0, tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
- return 0;
-
if (dce_v10_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6c6ff57b1c95..ee6a48a09214 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4087,14 +4087,21 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
+ u32 tmp;
gfx_v8_0_rlc_stop(adev);
/* disable CG */
- WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
+ tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
+ RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+ WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10)
- WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
+ adev->asic_type == CHIP_POLARIS10) {
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
+ tmp &= ~0x3;
+ WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
+ }
/* disable PG */
WREG32(mmRLC_PG_CNTL, 0);
@@ -5137,7 +5144,7 @@ static int gfx_v8_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v8_0_check_soft_reset(void *handle)
+static bool gfx_v8_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -5189,16 +5196,14 @@ static int gfx_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
if (grbm_soft_reset || srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
adev->gfx.grbm_soft_reset = grbm_soft_reset;
adev->gfx.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
adev->gfx.grbm_soft_reset = 0;
adev->gfx.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
@@ -5226,7 +5231,8 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5264,7 +5270,8 @@ static int gfx_v8_0_soft_reset(void *handle)
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5334,7 +5341,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1b319f5bc696..c22ef140a542 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1099,7 +1099,7 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
-static int gmc_v8_0_check_soft_reset(void *handle)
+static bool gmc_v8_0_check_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1116,20 +1116,19 @@ static int gmc_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
adev->mc.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
adev->mc.srbm_soft_reset = 0;
+ return false;
}
- return 0;
}
static int gmc_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_stop(adev, &adev->mc.save);
@@ -1145,7 +1144,7 @@ static int gmc_v8_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->mc.srbm_soft_reset;
@@ -1175,7 +1174,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_resume(adev, &adev->mc.save);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index f325fd86430b..a9d10941fb53 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1268,7 +1268,7 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v3_0_check_soft_reset(void *handle)
+static bool sdma_v3_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -1281,14 +1281,12 @@ static int sdma_v3_0_check_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
adev->sdma.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
adev->sdma.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static int sdma_v3_0_pre_soft_reset(void *handle)
@@ -1296,7 +1294,7 @@ static int sdma_v3_0_pre_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1315,7 +1313,7 @@ static int sdma_v3_0_post_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1335,7 +1333,7 @@ static int sdma_v3_0_soft_reset(void *handle)
u32 srbm_soft_reset = 0;
u32 tmp;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 8bd08925b370..3de7bca5854b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3499,6 +3499,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
max_sclk = 75000;
max_mclk = 80000;
}
+ /* Limit clocks for some HD8600 parts */
+ if (adev->pdev->device == 0x6660 &&
+ adev->pdev->revision == 0x83) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index d127d59f953a..b4ea229bb449 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -373,7 +373,7 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int tonga_ih_check_soft_reset(void *handle)
+static bool tonga_ih_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -384,21 +384,19 @@ static int tonga_ih_check_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
adev->irq.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
adev->irq.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static int tonga_ih_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
return tonga_ih_hw_fini(adev);
@@ -408,7 +406,7 @@ static int tonga_ih_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
return tonga_ih_hw_init(adev);
@@ -419,7 +417,7 @@ static int tonga_ih_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->irq.srbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index e0fd9f21ed95..ab3df6d75656 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -770,7 +770,7 @@ static int uvd_v6_0_wait_for_idle(void *handle)
}
#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
-static int uvd_v6_0_check_soft_reset(void *handle)
+static bool uvd_v6_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -782,19 +782,19 @@ static int uvd_v6_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
adev->uvd.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
adev->uvd.srbm_soft_reset = 0;
+ return false;
}
- return 0;
}
+
static int uvd_v6_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
uvd_v6_0_stop(adev);
@@ -806,7 +806,7 @@ static int uvd_v6_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->uvd.srbm_soft_reset;
@@ -836,7 +836,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
mdelay(5);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 3f6db4ec0102..8533269ec160 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -561,7 +561,7 @@ static int vce_v3_0_wait_for_idle(void *handle)
#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
-static int vce_v3_0_check_soft_reset(void *handle)
+static bool vce_v3_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -591,16 +591,15 @@ static int vce_v3_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
adev->vce.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
adev->vce.srbm_soft_reset = 0;
+ return false;
}
- mutex_unlock(&adev->grbm_idx_mutex);
- return 0;
}
static int vce_v3_0_soft_reset(void *handle)
@@ -608,7 +607,7 @@ static int vce_v3_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->vce.srbm_soft_reset;
@@ -638,7 +637,7 @@ static int vce_v3_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
@@ -651,7 +650,7 @@ static int vce_v3_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index c934b78c9e2f..bec8125bceb0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -165,7 +165,7 @@ struct amd_ip_funcs {
/* poll for idle */
int (*wait_for_idle)(void *handle);
/* check soft reset the IP block */
- int (*check_soft_reset)(void *handle);
+ bool (*check_soft_reset)(void *handle);
/* pre soft reset the IP block */
int (*pre_soft_reset)(void *handle);
/* soft reset the IP block */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 92b117843875..8cee4e0f9fde 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -49,6 +49,7 @@ static const pem_event_action * const uninitialize_event[] = {
uninitialize_display_phy_access_tasks,
disable_gfx_voltage_island_power_gating_tasks,
disable_gfx_clock_gating_tasks,
+ uninitialize_thermal_controller_tasks,
set_boot_state_tasks,
adjust_power_state_tasks,
disable_dynamic_state_management_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 7e4fcbbbe086..960424913496 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1785,6 +1785,21 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
return 0;
}
+static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+ int actual_temp = 0;
+ uint32_t val = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
+ uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
+
+ if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
+ actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ else
+ actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return actual_temp;
+}
+
static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1881,6 +1896,9 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
case AMDGPU_PP_SENSOR_VCE_POWER:
*value = cz_hwmgr->vce_power_gated ? 0 : 1;
return 0;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ *value = cz_thermal_get_temperature(hwmgr);
+ return 0;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 508245d49d33..609996c84ad5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1030,20 +1030,19 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
/* disable SCLK dpm */
- if (!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Disable) == 0),
- "Failed to disable SCLK DPM!",
- return -EINVAL);
+ if (!data->sclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable SCLK DPM when DPM is disabled",
+ return 0);
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
+ }
/* disable MCLK dpm */
if (!data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Disable) == 0),
- "Failed to disable MCLK DPM!",
- return -EINVAL);
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable MCLK DPM when DPM is disabled",
+ return 0);
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
}
return 0;
@@ -1069,10 +1068,13 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
return -EINVAL);
}
- if (smu7_disable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
- return -EINVAL;
- }
+ smu7_disable_sclk_mclk_dpm(hwmgr);
+
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable voltage DPM when DPM is disabled",
+ return 0);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
return 0;
}
@@ -1226,7 +1228,7 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+ smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
tmp_result = smu7_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1306,6 +1308,12 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable thermal auto throttle!", result = tmp_result);
+ if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
+ PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)),
+ "Failed to disable AVFS!",
+ return -EINVAL);
+ }
+
tmp_result = smu7_stop_dpm(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to stop DPM!", result = tmp_result);
@@ -1452,8 +1460,10 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
- if (table_info != NULL)
- sclk_table = table_info->vdd_dep_on_sclk;
+ if (table_info == NULL)
+ return -EINVAL;
+
+ sclk_table = table_info->vdd_dep_on_sclk;
for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
@@ -3802,13 +3812,15 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
{
- const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1);
- const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2);
+ const struct smu7_power_state *psa;
+ const struct smu7_power_state *psb;
int i;
if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
return -EINVAL;
+ psa = cast_const_phw_smu7_power_state(pstate1);
+ psb = cast_const_phw_smu7_power_state(pstate2);
/* If the two states don't even have the same number of performance levels they cannot be the same state. */
if (psa->performance_level_count != psb->performance_level_count) {
*equal = false;
@@ -4324,6 +4336,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.set_mclk_od = smu7_set_mclk_od,
.get_clock_by_type = smu7_get_clock_by_type,
.read_sensor = smu7_read_sensor,
+ .dynamic_state_management_disable = smu7_disable_dpm_tasks,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index eda802bc63c8..8c889caba420 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2458,7 +2458,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
- if (!data->is_memory_gddr5) {
+ if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++) {
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2f58e9e2a59c..a51f8cbcfe26 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- if (dcrtc->dpms != dpms) {
- dcrtc->dpms = dpms;
- if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
- armada_drm_crtc_update(dcrtc);
- if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
- clk_disable_unprepare(dcrtc->clk);
+ if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
if (dpms_blanked(dpms))
armada_drm_vblank_off(dcrtc);
- else
+ else if (!IS_ERR(dcrtc->clk))
+ WARN_ON(clk_prepare_enable(dcrtc->clk));
+ dcrtc->dpms = dpms;
+ armada_drm_crtc_update(dcrtc);
+ if (!dpms_blanked(dpms))
drm_crtc_vblank_on(&dcrtc->crtc);
+ else if (!IS_ERR(dcrtc->clk))
+ clk_disable_unprepare(dcrtc->clk);
+ } else if (dcrtc->dpms != dpms) {
+ dcrtc->dpms = dpms;
}
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 608df4c90520..0743e65cb240 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -267,6 +267,8 @@ int ast_mm_init(struct ast_private *ast)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -275,11 +277,15 @@ int ast_mm_init(struct ast_private *ast)
void ast_mm_fini(struct ast_private *ast)
{
+ struct drm_device *dev = ast->dev;
+
ttm_bo_device_release(&ast->ttm.bdev);
ast_ttm_global_release(ast);
arch_phys_wc_del(ast->fb_mtrr);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void ast_ttm_placement(struct ast_bo *bo, int domain)
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bb2438dd8733..5e7e63ce7bce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -267,6 +267,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -276,6 +279,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
void cirrus_mm_fini(struct cirrus_device *cirrus)
{
+ struct drm_device *dev = cirrus->dev;
+
if (!cirrus->mm_inited)
return;
@@ -285,6 +290,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
arch_phys_wc_del(cirrus->fb_mtrr);
cirrus->fb_mtrr = 0;
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 1df2d33d0b40..ffb2ab389d1d 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -54,9 +54,6 @@ int drm_name_info(struct seq_file *m, void *data)
mutex_lock(&dev->master_mutex);
master = dev->master;
- if (!master)
- goto out_unlock;
-
seq_printf(m, "%s", dev->driver->name);
if (dev->dev)
seq_printf(m, " dev=%s", dev_name(dev->dev));
@@ -65,7 +62,6 @@ int drm_name_info(struct seq_file *m, void *data)
if (dev->unique)
seq_printf(m, " unique=%s", dev->unique);
seq_printf(m, "\n");
-out_unlock:
mutex_unlock(&dev->master_mutex);
return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index cb86c7e5495c..d9230132dfbc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -329,20 +329,34 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
/*
* Append a LINK to the submitted command buffer to return to
* the ring buffer. return_target is the ring target address.
- * We need three dwords: event, wait, link.
+ * We need at most 7 dwords in the return target: 2 cache flush +
+ * 2 semaphore stall + 1 event + 1 wait + 1 link.
*/
- return_dwords = 3;
+ return_dwords = 7;
return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
CMD_LINK(cmdbuf, return_dwords, return_target);
/*
- * Append event, wait and link pointing back to the wait
- * command to the ring buffer.
+ * Append a cache flush, stall, event, wait and link pointing back to
+ * the wait command to the ring buffer.
*/
+ if (gpu->exec_state == ETNA_PIPE_2D) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_PE2D);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_DEPTH |
+ VIVS_GL_FLUSH_CACHE_COLOR);
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ }
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, return_target + 8);
+ CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+ buffer->user_size - 4);
if (drm_debug & DRM_UT_DRIVER)
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5ce3603e6eac..0370b842d9cc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct page **pvec;
uintptr_t ptr;
+ unsigned int flags = 0;
pvec = drm_malloc_ab(npages, sizeof(struct page *));
if (!pvec)
return ERR_PTR(-ENOMEM);
+ if (!etnaviv_obj->userptr.ro)
+ flags |= FOLL_WRITE;
+
pinned = 0;
ptr = etnaviv_obj->userptr.ptr;
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
- !etnaviv_obj->userptr.ro, 0,
- pvec + pinned, NULL);
+ flags, pvec + pinned, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index d3796ed8d8c5..169ac96e8f08 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -330,7 +330,8 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
return (u32)buf->vram_node.start;
mutex_lock(&mmu->lock);
- ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size);
+ ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
+ buf->size + SZ_64K);
if (ret < 0) {
mutex_unlock(&mmu->lock);
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index aa92decf4233..fbd13fabdf2d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -488,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
goto err_free;
}
- ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
+ ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ g2d_userptr->vec);
if (ret != npages) {
DRM_ERROR("failed to get user pages from userptr.\n");
if (ret < 0)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 3371635cd4d7..b2d5e188b1b8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -51,6 +51,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
DCU_MODE_DCU_MODE(DCU_MODE_OFF));
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
DCU_UPDATE_MODE_READREG);
+ clk_disable_unprepare(fsl_dev->pix_clk);
}
static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
@@ -58,6 +59,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ clk_prepare_enable(fsl_dev->pix_clk);
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
DCU_MODE_DCU_MODE_MASK,
DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
@@ -116,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
return;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0884c45aefe8..e04efbed1a54 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -267,12 +267,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
return ret;
}
- ret = clk_prepare_enable(fsl_dev->pix_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable pix clk\n");
- goto disable_dcu_clk;
- }
-
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm);
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
@@ -284,10 +280,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
enable_irq(fsl_dev->irq);
return 0;
-
-disable_dcu_clk:
- clk_disable_unprepare(fsl_dev->clk);
- return ret;
}
#endif
@@ -401,18 +393,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
goto disable_clk;
}
- ret = clk_prepare_enable(fsl_dev->pix_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable pix clk\n");
- goto unregister_pix_clk;
- }
-
fsl_dev->tcon = fsl_tcon_init(dev);
drm = drm_dev_alloc(driver, dev);
if (IS_ERR(drm)) {
ret = PTR_ERR(drm);
- goto disable_pix_clk;
+ goto unregister_pix_clk;
}
fsl_dev->dev = dev;
@@ -433,8 +419,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
unref:
drm_dev_unref(drm);
-disable_pix_clk:
- clk_disable_unprepare(fsl_dev->pix_clk);
unregister_pix_clk:
clk_unregister(fsl_dev->pix_clk);
disable_clk:
@@ -447,7 +431,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
clk_disable_unprepare(fsl_dev->clk);
- clk_disable_unprepare(fsl_dev->pix_clk);
clk_unregister(fsl_dev->pix_clk);
drm_put_dev(fsl_dev->drm);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index a7e5486bd1e9..9e6f7d8112b3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -211,11 +211,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev)
for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
}
- regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_OFF));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
}
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 26edcc899712..e1dd75b18118 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -20,38 +20,6 @@
#include "fsl_dcu_drm_drv.h"
#include "fsl_tcon.h"
-static int
-fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return 0;
-}
-
-static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- if (fsl_dev->tcon)
- fsl_tcon_bypass_disable(fsl_dev->tcon);
-}
-
-static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- if (fsl_dev->tcon)
- fsl_tcon_bypass_enable(fsl_dev->tcon);
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .atomic_check = fsl_dcu_drm_encoder_atomic_check,
- .disable = fsl_dcu_drm_encoder_disable,
- .enable = fsl_dcu_drm_encoder_enable,
-};
-
static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
int ret;
encoder->possible_crtcs = 1;
+
+ /* Use bypass mode for parallel RGB/LVDS encoder */
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
+
ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
if (ret < 0)
return ret;
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e537930c64b5..c6f780f5abc9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
+ unsigned int flags = 0;
+
+ if (!obj->userptr.read_only)
+ flags |= FOLL_WRITE;
ret = -EFAULT;
if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
- !obj->userptr.read_only, 0,
+ flags,
pvec + pinned, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 919b35f2ad24..dcf7d11ac380 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -266,6 +266,9 @@ int mgag200_mm_init(struct mga_device *mdev)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -274,10 +277,14 @@ int mgag200_mm_init(struct mga_device *mdev)
void mgag200_mm_fini(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
+
ttm_bo_device_release(&mdev->ttm.bdev);
mgag200_ttm_global_release(mdev);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
mdev->fb_mtrr = 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1825dbc33192..a6dbe8258040 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -398,6 +398,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* VRAM init */
drm->gem.vram_available = drm->device.info.ram_user;
+ arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
drm->gem.vram_available >> PAGE_SHIFT);
if (ret) {
@@ -430,6 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
+ struct nvkm_device *device = nvxx_device(&drm->device);
+
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
@@ -439,4 +444,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
+ arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 6a4b020dd0b4..5a26eb4545aa 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
struct drm_device *dev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
- radeon_crtc->hw_mode.clock;
- vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
- radeon_crtc->hw_mode.crtc_vdisplay +
- (radeon_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ vblank_in_pixels =
+ radeon_crtc->hw_mode.crtc_htotal *
+ (radeon_crtc->hw_mode.crtc_vblank_end -
+ radeon_crtc->hw_mode.crtc_vdisplay +
+ (radeon_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 50e96d2c593d..e18839d52e3e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -927,6 +927,16 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
return ret;
}
+static void radeon_connector_unregister(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->ddc_bus->has_aux) {
+ drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
+ radeon_connector->ddc_bus->has_aux = false;
+ }
+}
+
static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -984,6 +994,7 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_lvds_set_property,
};
@@ -1111,6 +1122,7 @@ static const struct drm_connector_funcs radeon_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
@@ -1188,6 +1200,7 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_tv_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
@@ -1519,6 +1532,7 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
.detect = radeon_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1832,6 +1846,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1841,6 +1856,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1850,6 +1866,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b8ab30a7dd6d..cdb8cb568c15 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1675,20 +1675,20 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
- radeon_fbdev_fini(rdev);
- kfree(rdev->mode_info.bios_hardcoded_edid);
-
- /* free i2c buses */
- radeon_i2c_fini(rdev);
-
if (rdev->mode_info.mode_config_initialized) {
- radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_crtc_force_disable_all(rdev->ddev);
+ radeon_fbdev_fini(rdev);
+ radeon_afmt_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
+
+ kfree(rdev->mode_info.bios_hardcoded_edid);
+
+ /* free i2c buses */
+ radeon_i2c_fini(rdev);
}
static bool is_hdtv_mode(const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 91c8f4339566..00ea0002b539 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -96,9 +96,10 @@
* 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
* 2.46.0 - Add PFP_SYNC_ME support on evergreen
* 2.47.0 - Add UVD_NO_OP register support
+ * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 47
+#define KMS_DRIVER_MINOR 48
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 021aa005623f..29f7817af821 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -982,9 +982,8 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
return;
+ WARN_ON(i2c->has_aux);
i2c_del_adapter(&i2c->adapter);
- if (i2c->has_aux)
- drm_dp_aux_unregister(&i2c->aux);
kfree(i2c);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index be30861afae9..41b72ce6613f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -446,6 +446,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
int radeon_bo_init(struct radeon_device *rdev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(rdev->mc.aper_base,
+ rdev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
if (!rdev->fastfb_working) {
rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
@@ -463,6 +467,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
arch_phys_wc_del(rdev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
}
/* Returns how many bytes TTM can move per IB.
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 455268214b89..3de5e6e21662 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
struct page **pages = ttm->pages + pinned;
- r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
+ r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
+ pages, NULL);
if (r < 0)
goto release_pages;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 7ee9aafbdf74..e402be8821c4 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4431,6 +4431,7 @@ static bool si_vm_reg_valid(u32 reg)
case SPI_CONFIG_CNTL:
case SPI_CONFIG_CNTL_1:
case TA_CNTL_AUX:
+ case TA_CS_BC_BASE_ADDR:
return true;
default:
DRM_ERROR("Invalid register 0x%x in CS\n", reg);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index eb220eecba78..65a911ddd509 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1145,6 +1145,7 @@
#define SPI_LB_CU_MASK 0x9354
#define TA_CNTL_AUX 0x9508
+#define TA_CS_BC_BASE_ADDR 0x950C
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 7e2a12c4fed2..1a3ad769f8c8 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
down_read(&current->mm->mmap_sem);
ret = get_user_pages((unsigned long)xfer->mem_addr,
vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE),
- 0, vsg->pages, NULL);
+ (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
+ vsg->pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e8ae3dc476d1..18061a4bc2f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
-module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
-module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
+module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
-module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
+module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
-module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
-module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 070d750af16d..1e59a486bba8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -43,7 +43,7 @@
#define VMWGFX_DRIVER_DATE "20160210"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 10
+#define VMWGFX_DRIVER_MINOR 11
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dc5beff2b4aa..c7b53d987f06 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,17 +35,37 @@
#define VMW_RES_HT_ORDER 12
/**
+ * enum vmw_resource_relocation_type - Relocation type for resources
+ *
+ * @vmw_res_rel_normal: Traditional relocation. The resource id in the
+ * command stream is replaced with the actual id after validation.
+ * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
+ * with a NOP.
+ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
+ * after validation is -1, the command is replaced with a NOP. Otherwise no
+ * action.
+ */
+enum vmw_resource_relocation_type {
+ vmw_res_rel_normal,
+ vmw_res_rel_nop,
+ vmw_res_rel_cond_nop,
+ vmw_res_rel_max
+};
+
+/**
* struct vmw_resource_relocation - Relocation info for resources
*
* @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource.
- * @offset: Offset of 4 byte entries into the command buffer where the
+ * @offset: Offset of single byte entries into the command buffer where the
* id that needs fixup is located.
+ * @rel_type: Type of relocation.
*/
struct vmw_resource_relocation {
struct list_head head;
const struct vmw_resource *res;
- unsigned long offset;
+ u32 offset:29;
+ enum vmw_resource_relocation_type rel_type:3;
};
/**
@@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_dma_buffer *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
-
+/**
+ * vmw_ptr_diff - Compute the offset from a to b in bytes
+ *
+ * @a: A starting pointer.
+ * @b: A pointer offset in the same address space.
+ *
+ * Returns: The offset in bytes between the two pointers.
+ */
+static size_t vmw_ptr_diff(void *a, void *b)
+{
+ return (unsigned long) b - (unsigned long) a;
+}
/**
* vmw_resources_unreserve - unreserve resources previously reserved for
@@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
* @list: Pointer to head of relocation list.
* @res: The resource.
* @offset: Offset into the command buffer currently being parsed where the
- * id that needs fixup is located. Granularity is 4 bytes.
+ * id that needs fixup is located. Granularity is one byte.
+ * @rel_type: Relocation type.
*/
static int vmw_resource_relocation_add(struct list_head *list,
const struct vmw_resource *res,
- unsigned long offset)
+ unsigned long offset,
+ enum vmw_resource_relocation_type
+ rel_type)
{
struct vmw_resource_relocation *rel;
@@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
rel->res = res;
rel->offset = offset;
+ rel->rel_type = rel_type;
list_add_tail(&rel->head, list);
return 0;
@@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
+ /* Validate the struct vmw_resource_relocation member size */
+ BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
+ BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
+
list_for_each_entry(rel, list, head) {
- if (likely(rel->res != NULL))
- cb[rel->offset] = rel->res->id;
- else
- cb[rel->offset] = SVGA_3D_CMD_NOP;
+ u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
+ switch (rel->rel_type) {
+ case vmw_res_rel_normal:
+ *addr = rel->res->id;
+ break;
+ case vmw_res_rel_nop:
+ *addr = SVGA_3D_CMD_NOP;
+ break;
+ default:
+ if (rel->res->id == -1)
+ *addr = SVGA_3D_CMD_NOP;
+ break;
+ }
}
}
@@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
*p_val = NULL;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id_loc - sw_context->buf_start);
+ vmw_ptr_diff(sw_context->buf_start,
+ id_loc),
+ vmw_res_rel_normal);
if (unlikely(ret != 0))
return ret;
@@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id_loc - sw_context->buf_start);
+ vmw_ptr_diff(sw_context->buf_start, id_loc),
+ vmw_res_rel_normal);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
@@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
-
- return 0;
+ NULL,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_nop);
}
/**
@@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
-
- return 0;
+ NULL,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_nop);
}
/**
@@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
* @header: Pointer to the command header in the command stream.
*
* Check that the view exists, and if it was not created using this
- * command batch, make sure it's validated (present in the device) so that
- * the remove command will not confuse the device.
+ * command batch, conditionally make this command a NOP.
*/
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
return ret;
/*
- * Add view to the validate list iff it was not created using this
- * command batch.
+ * If the view wasn't created during this command batch, it might
+ * have been removed due to a context swapout, so add a
+ * relocation to conditionally make this command a NOP to avoid
+ * device errors.
*/
- return vmw_view_res_val_add(sw_context, view);
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ view,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_cond_nop);
}
/**
@@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
cmd->body.shaderResourceViewId);
}
+/**
+ * vmw_cmd_dx_transfer_from_buffer -
+ * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXTransferFromBuffer body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcSid, NULL);
+ if (ret != 0)
+ return ret;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.destSid, NULL);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_buffer_copy_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
&vmw_cmd_pred_copy_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
+ &vmw_cmd_dx_transfer_from_buffer,
+ true, false, true),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
int ret;
*header = NULL;
- if (!dev_priv->cman || kernel_commands)
- return kernel_commands;
-
if (command_size > SVGA_CB_MAX_SIZE) {
DRM_ERROR("Command buffer is too large.\n");
return ERR_PTR(-EINVAL);
}
+ if (!dev_priv->cman || kernel_commands)
+ return kernel_commands;
+
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
@@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
- DRM_INFO("Dummy query bo pin count: %d\n",
- dev_priv->dummy_query_bo->pin_count);
-
out_unlock:
return;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6a328d507a28..52ca1c9d070e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
- if (nonblock)
- return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
-
- lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
+ lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index c2a721a8cef9..b445ce9b9757 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
if (res->id != -1) {
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
return;
@@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
submit_size = vmw_surface_define_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
@@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
uint8_t *cmd;
struct vmw_private *dev_priv = res->dev_priv;
- BUG_ON(val_buf->bo == NULL);
-
+ BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"DMA.\n");
return -ENOMEM;
@@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
submit_size = vmw_surface_destroy_size();
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"eviction.\n");
return -ENOMEM;
@@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
int ret;
struct vmw_resource *res = &srf->res;
- BUG_ON(res_free == NULL);
+ BUG_ON(!res_free);
if (!dev_priv->has_mob)
vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_init(dev_priv, res, true, res_free,
@@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct drm_vmw_size __user *user_sizes;
int ret;
int i, j;
uint32_t cur_bo_offset;
@@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
+ if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
@@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
srf->num_sizes = num_sizes;
user_srf->size = size;
-
- srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
+ srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr,
+ sizeof(*srf->sizes) * srf->num_sizes);
+ if (IS_ERR(srf->sizes)) {
+ ret = PTR_ERR(srf->sizes);
goto out_no_sizes;
}
- srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
- GFP_KERNEL);
- if (unlikely(srf->offsets == NULL)) {
+ srf->offsets = kmalloc_array(srf->num_sizes,
+ sizeof(*srf->offsets),
+ GFP_KERNEL);
+ if (unlikely(!srf->offsets)) {
ret = -ENOMEM;
goto out_no_offsets;
}
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- req->size_addr;
-
- ret = copy_from_user(srf->sizes, user_sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- goto out_no_copy;
- }
-
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->multisample_count = 0;
@@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
- if (unlikely(base == NULL)) {
+ if (unlikely(!base)) {
DRM_ERROR("Could not find surface to reference.\n");
goto out_no_lookup;
}
@@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd = vmw_fifo_reserve(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
@@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd1 == NULL)) {
+ if (unlikely(!cmd1)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"binding.\n");
return -ENOMEM;
@@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"unbinding.\n");
return -ENOMEM;
@@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
@@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- if (srf->res.backup == NULL) {
+ if (!srf->res.backup) {
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
@@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
+ if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 8fd4bf77f264..818ea7d93533 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -234,58 +234,6 @@ static __u8 pid0011_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 pid0006_rdesc_fixed[] = {
- 0x05, 0x01, /* Usage Page (Generic Desktop) */
- 0x09, 0x04, /* Usage (Joystick) */
- 0xA1, 0x01, /* Collection (Application) */
- 0xA1, 0x02, /* Collection (Logical) */
- 0x75, 0x08, /* Report Size (8) */
- 0x95, 0x05, /* Report Count (5) */
- 0x15, 0x00, /* Logical Minimum (0) */
- 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
- 0x35, 0x00, /* Physical Minimum (0) */
- 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
- 0x09, 0x30, /* Usage (X) */
- 0x09, 0x33, /* Usage (Ry) */
- 0x09, 0x32, /* Usage (Z) */
- 0x09, 0x31, /* Usage (Y) */
- 0x09, 0x34, /* Usage (Ry) */
- 0x81, 0x02, /* Input (Variable) */
- 0x75, 0x04, /* Report Size (4) */
- 0x95, 0x01, /* Report Count (1) */
- 0x25, 0x07, /* Logical Maximum (7) */
- 0x46, 0x3B, 0x01, /* Physical Maximum (315) */
- 0x65, 0x14, /* Unit (Centimeter) */
- 0x09, 0x39, /* Usage (Hat switch) */
- 0x81, 0x42, /* Input (Variable) */
- 0x65, 0x00, /* Unit (None) */
- 0x75, 0x01, /* Report Size (1) */
- 0x95, 0x0C, /* Report Count (12) */
- 0x25, 0x01, /* Logical Maximum (1) */
- 0x45, 0x01, /* Physical Maximum (1) */
- 0x05, 0x09, /* Usage Page (Button) */
- 0x19, 0x01, /* Usage Minimum (0x01) */
- 0x29, 0x0C, /* Usage Maximum (0x0C) */
- 0x81, 0x02, /* Input (Variable) */
- 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined) */
- 0x75, 0x01, /* Report Size (1) */
- 0x95, 0x08, /* Report Count (8) */
- 0x25, 0x01, /* Logical Maximum (1) */
- 0x45, 0x01, /* Physical Maximum (1) */
- 0x09, 0x01, /* Usage (0x01) */
- 0x81, 0x02, /* Input (Variable) */
- 0xC0, /* End Collection */
- 0xA1, 0x02, /* Collection (Logical) */
- 0x75, 0x08, /* Report Size (8) */
- 0x95, 0x07, /* Report Count (7) */
- 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
- 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
- 0x09, 0x02, /* Usage (0x02) */
- 0x91, 0x02, /* Output (Variable) */
- 0xC0, /* End Collection */
- 0xC0 /* End Collection */
-};
-
static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -296,16 +244,34 @@ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(pid0011_rdesc_fixed);
}
break;
- case 0x0006:
- if (*rsize == sizeof(pid0006_rdesc_fixed)) {
- rdesc = pid0006_rdesc_fixed;
- *rsize = sizeof(pid0006_rdesc_fixed);
- }
- break;
}
return rdesc;
}
+#define map_abs(c) hid_map_usage(hi, usage, bit, max, EV_ABS, (c))
+#define map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c))
+
+static int dr_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid) {
+ /*
+ * revert to the old hid-input behavior where axes
+ * can be randomly assigned when hid->usage is reused.
+ */
+ case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
+ case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
+ if (field->flags & HID_MAIN_ITEM_RELATIVE)
+ map_rel(usage->hid & 0xf);
+ else
+ map_abs(usage->hid & 0xf);
+ return 1;
+ }
+
+ return 0;
+}
+
static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
@@ -352,6 +318,7 @@ static struct hid_driver dr_driver = {
.id_table = dr_devices,
.report_fixup = dr_report_fixup,
.probe = dr_probe,
+ .input_mapping = dr_input_mapping,
};
module_hid_driver(dr_driver);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index cd59c79eebdd..6cfb5cacc253 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -64,6 +64,9 @@
#define USB_VENDOR_ID_AKAI 0x2011
#define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715
+#define USB_VENDOR_ID_AKAI_09E8 0x09E8
+#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX 0x0031
+
#define USB_VENDOR_ID_ALCOR 0x058f
#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c
index d8d55f37b4f5..d3e1ab162f7c 100644
--- a/drivers/hid/hid-led.c
+++ b/drivers/hid/hid-led.c
@@ -100,6 +100,7 @@ struct hidled_device {
const struct hidled_config *config;
struct hid_device *hdev;
struct hidled_rgb *rgb;
+ u8 *buf;
struct mutex lock;
};
@@ -118,13 +119,19 @@ static int hidled_send(struct hidled_device *ldev, __u8 *buf)
mutex_lock(&ldev->lock);
+ /*
+ * buffer provided to hid_hw_raw_request must not be on the stack
+ * and must not be part of a data structure
+ */
+ memcpy(ldev->buf, buf, ldev->config->report_size);
+
if (ldev->config->report_type == RAW_REQUEST)
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
else if (ldev->config->report_type == OUTPUT_REPORT)
- ret = hid_hw_output_report(ldev->hdev, buf,
+ ret = hid_hw_output_report(ldev->hdev, ldev->buf,
ldev->config->report_size);
else
ret = -EINVAL;
@@ -147,17 +154,21 @@ static int hidled_recv(struct hidled_device *ldev, __u8 *buf)
mutex_lock(&ldev->lock);
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ memcpy(ldev->buf, buf, ldev->config->report_size);
+
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0)
goto err;
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
+
+ memcpy(buf, ldev->buf, ldev->config->report_size);
err:
mutex_unlock(&ldev->lock);
@@ -447,6 +458,10 @@ static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!ldev)
return -ENOMEM;
+ ldev->buf = devm_kmalloc(&hdev->dev, MAX_REPORT_SIZE, GFP_KERNEL);
+ if (!ldev->buf)
+ return -ENOMEM;
+
ret = hid_parse(hdev);
if (ret)
return ret;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0a0eca5da47d..354d49ea36dd 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -56,6 +56,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 4aa3cb63fd41..bcd06306f3e8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -314,10 +314,14 @@ static void heartbeat_onchannelcallback(void *context)
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
struct icmsg_negotiate *negop = NULL;
- vmbus_recvpacket(channel, hbeat_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ while (1) {
+
+ vmbus_recvpacket(channel, hbeat_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (!recvlen)
+ break;
- if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 98114cef1e43..2fe1828bd10b 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -194,10 +194,10 @@ static struct adm9240_data *adm9240_update_device(struct device *dev)
* 0.5'C per two measurement cycles thus ignore possible
* but unlikely aliasing error on lsb reading. --Grant
*/
- data->temp = ((i2c_smbus_read_byte_data(client,
+ data->temp = (i2c_smbus_read_byte_data(client,
ADM9240_REG_TEMP) << 8) |
i2c_smbus_read_byte_data(client,
- ADM9240_REG_TEMP_CONF)) / 128;
+ ADM9240_REG_TEMP_CONF);
for (i = 0; i < 2; i++) { /* read fans */
data->fan[i] = i2c_smbus_read_byte_data(client,
@@ -263,7 +263,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
- return sprintf(buf, "%d\n", data->temp * 500); /* 9-bit value */
+ return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
}
static ssize_t show_max(struct device *dev, struct device_attribute *devattr,
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index bef84e085973..c1b9275978f9 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -268,11 +268,13 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
long *val)
{
struct max31790_data *data = max31790_update_device(dev);
- u8 fan_config = data->fan_config[channel];
+ u8 fan_config;
if (IS_ERR(data))
return PTR_ERR(data);
+ fan_config = data->fan_config[channel];
+
switch (attr) {
case hwmon_pwm_input:
*val = data->pwm[channel] >> 8;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6d94e2ec5b4f..d252276feadf 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -79,12 +79,12 @@ config I2C_AMD8111
config I2C_HIX5HD2
tristate "Hix5hd2 high-speed I2C driver"
- depends on ARCH_HIX5HD2 || COMPILE_TEST
+ depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST
help
- Say Y here to include support for high-speed I2C controller in the
- Hisilicon based hix5hd2 SoCs.
+ Say Y here to include support for the high-speed I2C controller
+ used in HiSilicon hix5hd2 SoCs.
- This driver can also be built as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called i2c-hix5hd2.
config I2C_I801
@@ -589,10 +589,10 @@ config I2C_IMG
config I2C_IMX
tristate "IMX I2C interface"
- depends on ARCH_MXC || ARCH_LAYERSCAPE
+ depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
help
Say Y here if you want to use the IIC bus controller on
- the Freescale i.MX/MXC or Layerscape processors.
+ the Freescale i.MX/MXC, Layerscape or ColdFire processors.
This driver can also be built as a module. If so, the module
will be called i2c-imx.
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 1fe93c43215c..11e866d05368 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -95,6 +95,9 @@
#define DW_IC_STATUS_TFE BIT(2)
#define DW_IC_STATUS_MST_ACTIVITY BIT(5)
+#define DW_IC_SDA_HOLD_RX_SHIFT 16
+#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
+
#define DW_IC_ERR_TX_ABRT 0x1
#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
@@ -420,12 +423,20 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
/* Configure SDA Hold Time if required */
reg = dw_readl(dev, DW_IC_COMP_VERSION);
if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
- if (dev->sda_hold_time) {
- dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- } else {
+ if (!dev->sda_hold_time) {
/* Keep previous hold time setting if no one set it */
dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
}
+ /*
+ * Workaround for avoiding TX arbitration lost in case I2C
+ * slave pulls SDA down "too quickly" after falling egde of
+ * SCL by enabling non-zero SDA RX hold. Specification says it
+ * extends incoming SDA low to high transition while SCL is
+ * high but it apprears to help also above issue.
+ */
+ if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
+ dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
+ dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
} else {
dev_warn(dev->dev,
"Hardware too old to adjust SDA hold time.\n");
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 9604024e0eb0..49f2084f7bb5 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -368,6 +368,7 @@ static const struct of_device_id dc_i2c_match[] = {
{ .compatible = "cnxt,cx92755-i2c" },
{ },
};
+MODULE_DEVICE_TABLE(of, dc_i2c_match);
static struct platform_driver dc_i2c_driver = {
.probe = dc_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 08847e8b8998..eb3627f35d12 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -146,6 +146,7 @@
#define SMBHSTCFG_HST_EN 1
#define SMBHSTCFG_SMB_SMI_EN 2
#define SMBHSTCFG_I2C_EN 4
+#define SMBHSTCFG_SPD_WD 0x10
/* TCO configuration bits for TCOCTL */
#define TCOCTL_EN 0x0100
@@ -865,9 +866,16 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
block = 1;
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- /* NB: page 240 of ICH5 datasheet shows that the R/#W
- * bit should be cleared here, even when reading */
- outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
+ /*
+ * NB: page 240 of ICH5 datasheet shows that the R/#W
+ * bit should be cleared here, even when reading.
+ * However if SPD Write Disable is set (Lynx Point and later),
+ * the read will fail if we don't set the R/#W bit.
+ */
+ outb_p(((addr & 0x7f) << 1) |
+ ((priv->original_hstcfg & SMBHSTCFG_SPD_WD) ?
+ (read_write & 0x01) : 0),
+ SMBHSTADD(priv));
if (read_write == I2C_SMBUS_READ) {
/* NB: page 240 of ICH5 datasheet also shows
* that DATA1 is the cmd field when reading */
@@ -1573,6 +1581,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Disable SMBus interrupt feature if SMBus using SMI# */
priv->features &= ~FEATURE_IRQ;
}
+ if (temp & SMBHSTCFG_SPD_WD)
+ dev_info(&dev->dev, "SPD Write Disable is set\n");
/* Clear special mode bits */
if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 592a8f26a708..47fc1f1acff7 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1009,10 +1009,13 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
rinfo->sda_gpio = of_get_named_gpio(pdev->dev.of_node, "sda-gpios", 0);
rinfo->scl_gpio = of_get_named_gpio(pdev->dev.of_node, "scl-gpios", 0);
- if (!gpio_is_valid(rinfo->sda_gpio) ||
- !gpio_is_valid(rinfo->scl_gpio) ||
- IS_ERR(i2c_imx->pinctrl_pins_default) ||
- IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
+ if (rinfo->sda_gpio == -EPROBE_DEFER ||
+ rinfo->scl_gpio == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (!gpio_is_valid(rinfo->sda_gpio) ||
+ !gpio_is_valid(rinfo->scl_gpio) ||
+ IS_ERR(i2c_imx->pinctrl_pins_default) ||
+ IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
dev_dbg(&pdev->dev, "recovery information incomplete\n");
return 0;
}
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index b8ea62105f42..30132c3957cd 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -729,6 +729,7 @@ static const struct of_device_id jz4780_i2c_of_matches[] = {
{ .compatible = "ingenic,jz4780-i2c", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches);
static int jz4780_i2c_probe(struct platform_device *pdev)
{
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 50702c7bb244..df220666d627 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -694,6 +694,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
t_calc->div_low--;
t_calc->div_high--;
+ /* Give the tuning value 0, that would not update con register */
+ t_calc->tuning = 0;
/* Maximum divider supported by hw is 0xffff */
if (t_calc->div_low > 0xffff) {
t_calc->div_low = 0xffff;
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 263685c7a512..05cf192ef1ac 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
struct mbox_chan *mbox_chan;
struct mbox_client mbox_client;
struct completion rd_complete;
- u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];
+ u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
u32 *resp_msg;
};
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 2a972ed7aa0d..e29ff37a43bd 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -426,6 +426,7 @@ static const struct of_device_id xlp9xx_i2c_of_match[] = {
{ .compatible = "netlogic,xlp980-i2c", },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = {
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 0968f59b6df5..ad17d88d8573 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -358,6 +358,7 @@ static const struct of_device_id xlr_i2c_dt_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids);
static int xlr_i2c_probe(struct platform_device *pdev)
{
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 98fffa3a09f7..1704fc84d647 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1680,7 +1680,8 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
static void of_i2c_register_devices(struct i2c_adapter *adap)
{
- struct device_node *node;
+ struct device_node *bus, *node;
+ struct i2c_client *client;
/* Only register child devices if the adapter has a node pointer set */
if (!adap->dev.of_node)
@@ -1688,11 +1689,24 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
- for_each_available_child_of_node(adap->dev.of_node, node) {
+ bus = of_get_child_by_name(adap->dev.of_node, "i2c-bus");
+ if (!bus)
+ bus = of_node_get(adap->dev.of_node);
+
+ for_each_available_child_of_node(bus, node) {
if (of_node_test_and_set_flag(node, OF_POPULATED))
continue;
- of_i2c_register_device(adap, node);
+
+ client = of_i2c_register_device(adap, node);
+ if (IS_ERR(client)) {
+ dev_warn(&adap->dev,
+ "Failed to create I2C device for %s\n",
+ node->full_name);
+ of_node_clear_flag(node, OF_POPULATED);
+ }
}
+
+ of_node_put(bus);
}
static int of_dev_node_match(struct device *dev, void *data)
@@ -2293,6 +2307,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%s'\n",
rd->dn->full_name);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(client));
}
break;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7edcf3238620..99c051490eff 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -437,6 +437,8 @@ config STX104
config TI_ADC081C
tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADC081C,
ADC101C and ADC121C ADC chips.
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index bd321b305a0a..ef761a508630 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -213,13 +213,14 @@ static int atlas_check_ec_calibration(struct atlas_data *data)
struct device *dev = &data->client->dev;
int ret;
unsigned int val;
+ __be16 rval;
- ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);
if (ret)
return ret;
- dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
- be16_to_cpu(val) % 100);
+ val = be16_to_cpu(rval);
+ dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);
ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
if (ret)
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 39dd2026ccc9..066161a4bccd 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -123,22 +123,24 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
{
unsigned int storage_bytes = data->chip->read_size;
unsigned int shift = chan->scan_type.shift + (chan->address * 8);
- unsigned int buf;
+ __be16 buf16;
+ __be32 buf32;
int ret;
- ret = spi_read(data->spi, (void *) &buf, storage_bytes);
- if (ret)
- return ret;
-
switch (storage_bytes) {
case 2:
- *val = be16_to_cpu(buf);
+ ret = spi_read(data->spi, (void *)&buf16, storage_bytes);
+ *val = be16_to_cpu(buf16);
break;
case 4:
- *val = be32_to_cpu(buf);
+ ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
+ *val = be32_to_cpu(buf32);
break;
}
+ if (ret)
+ return ret;
+
/* check to be sure this is a valid reading */
if (*val & data->chip->status_bit)
return -EINVAL;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 19a418a1b631..fb3fb89640e5 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -89,4 +89,6 @@ source "drivers/infiniband/sw/rxe/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
+source "drivers/infiniband/hw/qedr/Kconfig"
+
endif # INFINIBAND
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c68746ce6624..224ad274ea0b 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
+ unsigned int gup_flags = FOLL_WRITE;
if (dmasync)
dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (ret)
goto out;
+ if (!umem->writable)
+ gup_flags |= FOLL_FORCE;
+
need_release = 1;
sg_list_start = umem->sg_head.sgl;
@@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
- 1, !umem->writable, page_list, vma_list);
+ gup_flags, page_list, vma_list);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 75077a018675..1f0fe3217f23 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
u64 off;
int j, k, ret = 0, start_idx, npages = 0;
u64 base_virt_addr;
+ unsigned int flags = 0;
if (access_mask == 0)
return -EINVAL;
@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
goto out_put_task;
}
+ if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ flags |= FOLL_WRITE;
+
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
k = start_idx;
@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
*/
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
- access_mask & ODP_WRITE_ALLOWED_BIT,
- 0, local_page_list, NULL);
+ flags, local_page_list, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 21fe401ff178..e7a5ed9f6f3f 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_HNS) += hns/
+obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 875597b0e69c..097365932b09 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -83,8 +83,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct hns_roce_mtt *hr_mtt,
struct hns_roce_uar *hr_uar,
- struct hns_roce_cq *hr_cq, int vector,
- int collapsed)
+ struct hns_roce_cq *hr_cq, int vector)
{
struct hns_roce_cmd_mailbox *mailbox = NULL;
struct hns_roce_cq_table *cq_table = NULL;
@@ -153,6 +152,9 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
hr_cq->cons_index = 0;
hr_cq->uar = hr_uar;
+ atomic_set(&hr_cq->refcount, 1);
+ init_completion(&hr_cq->free);
+
return 0;
err_radix:
@@ -192,6 +194,11 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
/* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+ /* wait for all interrupt processed */
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+ wait_for_completion(&hr_cq->free);
+
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
spin_unlock_irq(&cq_table->lock);
@@ -300,10 +307,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1;
- mutex_init(&hr_cq->resize_mutex);
spin_lock_init(&hr_cq->lock);
- hr_cq->hr_resize_buf = NULL;
- hr_cq->resize_umem = NULL;
if (context) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
@@ -338,8 +342,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
}
/* Allocate cq index, fill cq_context */
- ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
- uar, hr_cq, vector, 0);
+ ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
+ hr_cq, vector);
if (ret) {
dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
goto err_mtt;
@@ -353,12 +357,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
if (context) {
if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
ret = -EFAULT;
- goto err_mtt;
+ goto err_cqc;
}
}
return &hr_cq->ib_cq;
+err_cqc:
+ hns_roce_free_cq(hr_dev, hr_cq);
+
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (context)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index ea735800eb18..341731553a60 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -62,7 +62,7 @@
#define HNS_ROCE_AEQE_OF_VEC_NUM 1
/* 4G/4K = 1M */
-#define HNS_ROCE_SL_SHIFT 29
+#define HNS_ROCE_SL_SHIFT 28
#define HNS_ROCE_TCLASS_SHIFT 20
#define HNS_ROCE_FLOW_LABLE_MASK 0xfffff
@@ -74,7 +74,9 @@
#define MR_TYPE_DMA 0x03
#define PKEY_ID 0xffff
+#define GUID_LEN 8
#define NODE_DESC_SIZE 64
+#define DB_REG_OFFSET 0x1000
#define SERV_TYPE_RC 0
#define SERV_TYPE_RD 1
@@ -282,20 +284,11 @@ struct hns_roce_cq_buf {
struct hns_roce_mtt hr_mtt;
};
-struct hns_roce_cq_resize {
- struct hns_roce_cq_buf hr_buf;
- int cqe;
-};
-
struct hns_roce_cq {
struct ib_cq ib_cq;
struct hns_roce_cq_buf hr_buf;
- /* pointer to store information after resize*/
- struct hns_roce_cq_resize *hr_resize_buf;
spinlock_t lock;
- struct mutex resize_mutex;
struct ib_umem *umem;
- struct ib_umem *resize_umem;
void (*comp)(struct hns_roce_cq *);
void (*event)(struct hns_roce_cq *, enum hns_roce_event);
@@ -408,6 +401,7 @@ struct hns_roce_qp {
u32 buff_size;
struct mutex mutex;
u8 port;
+ u8 phy_port;
u8 sl;
u8 resp_depth;
u8 state;
@@ -471,7 +465,6 @@ struct hns_roce_caps {
u32 max_rq_desc_sz; /* 64 */
int max_qp_init_rdma;
int max_qp_dest_rdma;
- int sqp_start;
int num_cqs;
int max_cqes;
int reserved_cqs;
@@ -512,6 +505,8 @@ struct hns_roce_hw {
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector);
+ int (*clear_hem)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj);
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
@@ -533,7 +528,6 @@ struct hns_roce_dev {
struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock;
- spinlock_t cq_db_lock;
spinlock_t bt_cmd_lock;
struct hns_roce_ib_iboe iboe;
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
index 98af7fecf2f1..21e21b03cfb5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -66,9 +66,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
{
struct device *dev = &hr_dev->pdev->dev;
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -96,13 +93,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
default:
break;
}
-
- hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
}
static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
@@ -111,9 +101,6 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
{
struct device *dev = &hr_dev->pdev->dev;
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Access Violation Work Queue Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -141,13 +128,69 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
default:
break;
}
+}
+
+static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int phy_port;
+ int qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+ phy_port = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
+ if (qpn <= 1)
+ qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
+ "QP %d, phy_port %d.\n", qpn, phy_port);
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 cqn;
+
+ cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
+ break;
+ default:
+ break;
+ }
- hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ hns_roce_cq_event(hr_dev, cqn, event_type);
}
static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
@@ -185,7 +228,7 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_aeqe *aeqe;
int aeqes_found = 0;
- int qpn = 0;
+ int event_type;
while ((aeqe = next_aeqe_sw(eq))) {
dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
@@ -195,9 +238,10 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
/* Memory barrier */
rmb();
- switch (roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)) {
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
+ switch (event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
dev_warn(dev, "PATH MIG not supported\n");
break;
@@ -211,23 +255,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
dev_warn(dev, "PATH MIG failed\n");
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "qpn = 0x%lx\n",
- roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S));
- hns_roce_qp_event(hr_dev,
- roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
@@ -235,40 +265,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
dev_warn(dev, "SRQ not support!\n");
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%lx access err.\n",
- roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
- hns_roce_cq_event(hr_dev,
- le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- break;
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%lx overflow\n",
- roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
- hns_roce_cq_event(hr_dev,
- le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- break;
case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- dev_warn(dev, "CQ ID invalid.\n");
- hns_roce_cq_event(hr_dev,
- le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
break;
case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
dev_warn(dev, "port change.\n");
@@ -290,11 +289,8 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
break;
default:
- dev_warn(dev, "Unhandled event 0x%lx on EQ %d at index %u\n",
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S),
- eq->eqn, eq->cons_index);
+ dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
+ event_type, eq->eqn, eq->cons_index);
break;
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h
index fe4388191a3c..c6d212d12e03 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.h
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.h
@@ -107,6 +107,10 @@ struct hns_roce_aeqe {
#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
(((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \
+ (((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S)
+
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
(((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index d53d64362389..250d8f280390 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -36,14 +36,10 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
-#define HW_SYNC_TIMEOUT_MSECS 500
-#define HW_SYNC_SLEEP_TIME_INTERVAL 20
-
#define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17)
#define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17)
#define DMA_ADDR_T_SHIFT 12
-#define BT_CMD_SYNC_SHIFT 31
#define BT_BA_SHIFT 32
struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
@@ -213,74 +209,6 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return ret;
}
-static int hns_roce_clear_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long obj)
-{
- struct device *dev = &hr_dev->pdev->dev;
- unsigned long end = 0;
- unsigned long flags;
- void __iomem *bt_cmd;
- uint32_t bt_cmd_val[2];
- u32 bt_cmd_h_val = 0;
- int ret = 0;
-
- switch (table->type) {
- case HEM_TYPE_QPC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
- break;
- case HEM_TYPE_MTPT:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
- HEM_TYPE_MTPT);
- break;
- case HEM_TYPE_CQC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
- break;
- case HEM_TYPE_SRQC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
- HEM_TYPE_SRQC);
- break;
- default:
- return ret;
- }
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
- roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
- roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 0);
-
- spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
-
- bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
-
- end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
- while (1) {
- if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
- if (!(time_before(jiffies, end))) {
- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
- flags);
- return -EBUSY;
- }
- } else {
- break;
- }
- msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
- }
-
- bt_cmd_val[0] = 0;
- bt_cmd_val[1] = bt_cmd_h_val;
- hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
-
- return ret;
-}
-
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
@@ -333,7 +261,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
if (--table->hem[i]->refcount == 0) {
/* Clear HEM base address */
- if (hns_roce_clear_hem(hr_dev, table, obj))
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj))
dev_warn(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
@@ -456,7 +384,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
for (i = 0; i < table->num_hem; ++i)
if (table->hem[i]) {
- if (hns_roce_clear_hem(hr_dev, table,
+ if (hr_dev->hw->clear_hem(hr_dev, table,
i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
dev_err(dev, "Clear HEM base address failed.\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index ad6617588fba..435748858252 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -34,6 +34,10 @@
#ifndef _HNS_ROCE_HEM_H
#define _HNS_ROCE_HEM_H
+#define HW_SYNC_TIMEOUT_MSECS 500
+#define HW_SYNC_SLEEP_TIME_INTERVAL 20
+#define BT_CMD_SYNC_SHIFT 31
+
enum {
/* MAP HEM(Hardware Entry Memory) */
HEM_TYPE_QPC = 0,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 399f5dedaf2d..71232e5fabf6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -73,8 +73,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
u32 ind = 0;
int ret = 0;
- spin_lock_irqsave(&qp->sq.lock, flags);
+ if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
+ ibqp->qp_type != IB_QPT_RC)) {
+ dev_err(dev, "un-supported QP type\n");
+ *bad_wr = NULL;
+ return -EOPNOTSUPP;
+ }
+ spin_lock_irqsave(&qp->sq.lock, flags);
ind = qp->sq_next_wqe;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
@@ -162,7 +168,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_SGID_INDEX_M,
UD_SEND_WQE_U32_36_SGID_INDEX_S,
- hns_get_gid_index(hr_dev, qp->port,
+ hns_get_gid_index(hr_dev, qp->phy_port,
ah->av.gid_index));
roce_set_field(ud_sq_wqe->u32_40,
@@ -205,8 +211,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
(wr->send_flags & IB_SEND_FENCE ?
(cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
- wqe = (struct hns_roce_wqe_ctrl_seg *)wqe +
- sizeof(struct hns_roce_wqe_ctrl_seg);
+ wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
switch (wr->opcode) {
case IB_WR_RDMA_READ:
@@ -235,8 +240,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
}
ctrl->flag |= cpu_to_le32(ps_opcode);
- wqe = (struct hns_roce_wqe_raddr_seg *)wqe +
- sizeof(struct hns_roce_wqe_raddr_seg);
+ wqe += sizeof(struct hns_roce_wqe_raddr_seg);
dseg = wqe;
if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
@@ -253,8 +257,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
memcpy(wqe, ((void *) (uintptr_t)
wr->sg_list[i].addr),
wr->sg_list[i].length);
- wqe = (struct hns_roce_wqe_raddr_seg *)
- wqe + wr->sg_list[i].length;
+ wqe += wr->sg_list[i].length;
}
ctrl->flag |= HNS_ROCE_WQE_INLINE;
} else {
@@ -266,9 +269,6 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
HNS_ROCE_WQE_SGE_NUM_BIT);
}
ind++;
- } else {
- dev_dbg(dev, "unSupported QP type\n");
- break;
}
}
@@ -285,7 +285,7 @@ out:
SQ_DOORBELL_U32_4_SQ_HEAD_S,
(qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
- SQ_DOORBELL_U32_4_PORT_S, qp->port);
+ SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
@@ -365,14 +365,14 @@ out:
/* SW update GSI rq header */
reg_val = roce_read(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port);
+ QP1C_CFGN_OFFSET * hr_qp->phy_port);
roce_set_field(reg_val,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
hr_qp->rq.head);
roce_write(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port, reg_val);
+ QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
} else {
rq_db.u32_4 = 0;
rq_db.u32_8 = 0;
@@ -789,6 +789,66 @@ static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
}
}
+static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ int ret;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.qpc_buf.buf)
+ return -ENOMEM;
+
+ priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.mtpt_buf.buf) {
+ ret = -ENOMEM;
+ goto err_failed_alloc_mtpt_buf;
+ }
+
+ priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.cqc_buf.buf) {
+ ret = -ENOMEM;
+ goto err_failed_alloc_cqc_buf;
+ }
+
+ return 0;
+
+err_failed_alloc_cqc_buf:
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
+
+err_failed_alloc_mtpt_buf:
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
+
+ return ret;
+}
+
+static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
+}
+
/**
* hns_roce_v1_reset - reset RoCE
* @hr_dev: RoCE device struct pointer
@@ -879,7 +939,6 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
- caps->sqp_start = 0;
caps->reserved_lkey = 0;
caps->reserved_pds = 0;
caps->reserved_mrws = 1;
@@ -944,8 +1003,18 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
+ ret = hns_roce_bt_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "bt init failed!\n");
+ goto error_failed_bt_init;
+ }
+
return 0;
+error_failed_bt_init:
+ hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
+ hns_roce_raq_free(hr_dev);
+
error_failed_raq_init:
hns_roce_db_free(hr_dev);
return ret;
@@ -953,6 +1022,7 @@ error_failed_raq_init:
void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
{
+ hns_roce_bt_free(hr_dev);
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
hns_roce_raq_free(hr_dev);
hns_roce_db_free(hr_dev);
@@ -1192,9 +1262,7 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
return get_sw_cqe(hr_cq, hr_cq->cons_index);
}
-void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index,
- spinlock_t *doorbell_lock)
-
+void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
u32 doorbell[2];
@@ -1254,8 +1322,7 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
*/
wmb();
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index,
- &to_hr_dev(hr_cq->ib_cq.device)->cq_db_lock);
+ hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
}
}
@@ -1485,7 +1552,8 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
/* SQ conrespond to CQE */
sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
CQE_BYTE_4_WQE_INDEX_M,
- CQE_BYTE_4_WQE_INDEX_S));
+ CQE_BYTE_4_WQE_INDEX_S)&
+ ((*cur_qp)->sq.wqe_cnt-1));
switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) {
case HNS_ROCE_WQE_OPCODE_SEND:
wc->opcode = IB_WC_SEND;
@@ -1591,10 +1659,8 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
break;
}
- if (npolled) {
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index,
- &to_hr_dev(ibcq->device)->cq_db_lock);
- }
+ if (npolled)
+ hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
spin_unlock_irqrestore(&hr_cq->lock, flags);
@@ -1604,6 +1670,74 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return ret;
}
+int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ unsigned long end = 0, flags = 0;
+ uint32_t bt_cmd_val[2] = {0};
+ void __iomem *bt_cmd;
+ u64 bt_ba = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
+ bt_ba = priv->bt_table.qpc_buf.map >> 12;
+ break;
+ case HEM_TYPE_MTPT:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
+ bt_ba = priv->bt_table.mtpt_buf.map >> 12;
+ break;
+ case HEM_TYPE_CQC:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
+ bt_ba = priv->bt_table.cqc_buf.map >> 12;
+ break;
+ case HEM_TYPE_SRQC:
+ dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
+ return -EINVAL;
+ default:
+ return 0;
+ }
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
+ roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
+ roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
+
+ spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+ end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
+ while (1) {
+ if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+ if (!(time_before(jiffies, end))) {
+ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
+ flags);
+ return -EBUSY;
+ }
+ } else {
+ break;
+ }
+ msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
+ }
+
+ bt_cmd_val[0] = (uint32_t)bt_ba;
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
+ hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
+
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
+
+ return 0;
+}
+
static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt,
enum hns_roce_qp_state cur_state,
@@ -1733,13 +1867,10 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
- QP1C_BYTES_16_PORT_NUM_S, hr_qp->port);
+ QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
roce_set_bit(context->qp1c_bytes_16,
QP1C_BYTES_16_SIGNALING_TYPE_S,
hr_qp->sq_signal_bits);
- roce_set_bit(context->qp1c_bytes_16,
- QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S,
- hr_qp->sq_signal_bits);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
1);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
@@ -1784,7 +1915,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
/* Copy context to QP1C register */
addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(*context));
+ hr_qp->phy_port * sizeof(*context));
writel(context->qp1c_bytes_4, addr);
writel(context->sq_rq_bt_l, addr + 1);
@@ -1795,15 +1926,16 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
writel(context->qp1c_bytes_28, addr + 6);
writel(context->qp1c_bytes_32, addr + 7);
writel(context->cur_sq_wqe_ba_l, addr + 8);
+ writel(context->qp1c_bytes_40, addr + 9);
}
/* Modify QP1C status */
reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(*context));
+ hr_qp->phy_port * sizeof(*context));
roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(*context), reg_val);
+ hr_qp->phy_port * sizeof(*context), reg_val);
hr_qp->state = new_state;
if (new_state == IB_QPS_RESET) {
@@ -1836,12 +1968,10 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_qp_context *context;
- struct hns_roce_rq_db rq_db;
dma_addr_t dma_handle_2 = 0;
dma_addr_t dma_handle = 0;
uint32_t doorbell[2] = {0};
int rq_pa_start = 0;
- u32 reg_val = 0;
u64 *mtts_2 = NULL;
int ret = -EINVAL;
u64 *mtts = NULL;
@@ -2119,7 +2249,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_68,
QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
- QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, 0);
+ QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
+ hr_qp->rq.head);
roce_set_field(context->qpc_bytes_68,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
@@ -2186,7 +2317,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->port);
+ hr_qp->phy_port);
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_SL_M,
QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
@@ -2257,20 +2388,17 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_bit(context->qpc_bytes_140,
QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
- roce_set_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
- attr->qp_state);
-
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, 0);
+ QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
+ attr->retry_cnt);
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, 0);
+ QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
+ attr->rnr_retry);
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_LSN_M,
QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
@@ -2281,10 +2409,19 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
attr->retry_cnt);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
- attr->timeout);
+ if (attr->timeout < 0x12) {
+ dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
+ attr->timeout);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
+ 0x12);
+ } else {
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
+ attr->timeout);
+ }
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
@@ -2292,7 +2429,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->port);
+ hr_qp->phy_port);
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_SL_M,
QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
@@ -2357,21 +2494,15 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
0);
- } else if ((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
+ } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
- roce_set_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
- attr->qp_state);
-
- } else {
- dev_err(dev, "not support this modify\n");
+ (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
+ dev_err(dev, "not support this status migration\n");
goto out;
}
@@ -2397,43 +2528,32 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
/* Memory barrier */
wmb();
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
- /* SW update GSI rq header */
- reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port);
- roce_set_field(reg_val,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_write(hr_dev, ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port, reg_val);
- } else {
- rq_db.u32_4 = 0;
- rq_db.u32_8 = 0;
-
- roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
- RQ_DOORBELL_U32_4_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
- RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
- RQ_DOORBELL_U32_8_CMD_S, 1);
- roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
- 1);
- doorbell[0] = rq_db.u32_4;
- doorbell[1] = rq_db.u32_8;
-
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
+ RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
+ roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
+ RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
+ roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
+ RQ_DOORBELL_U32_8_CMD_S, 1);
+ roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
+
+ if (ibqp->uobject) {
+ hr_qp->rq.db_reg_l = hr_dev->reg_base +
+ ROCEE_DB_OTHERS_L_0_REG +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
}
+
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
}
hr_qp->state = new_state;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
- if (attr_mask & IB_QP_PORT)
- hr_qp->port = (attr->port_num - 1);
+ if (attr_mask & IB_QP_PORT) {
+ hr_qp->port = attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ }
if (new_state == IB_QPS_RESET && !ibqp->uobject) {
hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
@@ -2789,6 +2909,7 @@ struct hns_roce_hw hns_roce_hw_v1 = {
.set_mtu = hns_roce_v1_set_mtu,
.write_mtpt = hns_roce_v1_write_mtpt,
.write_cqc = hns_roce_v1_write_cqc,
+ .clear_hem = hns_roce_v1_clear_hem,
.modify_qp = hns_roce_v1_modify_qp,
.query_qp = hns_roce_v1_query_qp,
.destroy_qp = hns_roce_v1_destroy_qp,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 316b592b1636..539b0a3b92b0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -102,6 +102,8 @@
#define HNS_ROCE_V1_EXT_ODB_ALFUL \
(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
+
#define HNS_ROCE_ODB_POLL_MODE 0
#define HNS_ROCE_SDB_NORMAL_MODE 0
@@ -971,9 +973,16 @@ struct hns_roce_db_table {
struct hns_roce_ext_db *ext_db;
};
+struct hns_roce_bt_table {
+ struct hns_roce_buf_list qpc_buf;
+ struct hns_roce_buf_list mtpt_buf;
+ struct hns_roce_buf_list cqc_buf;
+};
+
struct hns_roce_v1_priv {
struct hns_roce_db_table db_table;
struct hns_roce_raq_table raq_table;
+ struct hns_roce_bt_table bt_table;
};
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index f64f0dde9a88..764e35a54457 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -355,8 +355,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_qp = hr_dev->caps.num_qps;
props->max_qp_wr = hr_dev->caps.max_wqes;
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
- IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_LOCAL_DMA_LKEY;
+ IB_DEVICE_RC_RNR_NAK_GEN;
props->max_sge = hr_dev->caps.max_sq_sg;
props->max_sge_rd = 1;
props->max_cq = hr_dev->caps.num_cqs;
@@ -372,6 +371,25 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
return 0;
}
+static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
+ u8 port_num)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct net_device *ndev;
+
+ if (port_num < 1 || port_num > hr_dev->caps.num_ports)
+ return NULL;
+
+ rcu_read_lock();
+
+ ndev = hr_dev->iboe.netdevs[port_num - 1];
+ if (ndev)
+ dev_hold(ndev);
+
+ rcu_read_unlock();
+ return ndev;
+}
+
static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr *props)
{
@@ -584,6 +602,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
struct device *dev = &hr_dev->pdev->dev;
iboe = &hr_dev->iboe;
+ spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev;
strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
@@ -618,6 +637,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->query_port = hns_roce_query_port;
ib_dev->modify_port = hns_roce_modify_port;
ib_dev->get_link_layer = hns_roce_get_link_layer;
+ ib_dev->get_netdev = hns_roce_get_netdev;
ib_dev->query_gid = hns_roce_query_gid;
ib_dev->query_pkey = hns_roce_query_pkey;
ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
@@ -667,8 +687,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
goto error_failed_setup_mtu_gids;
}
- spin_lock_init(&iboe->lock);
-
iboe->nb.notifier_call = hns_roce_netdev_event;
ret = register_netdevice_notifier(&iboe->nb);
if (ret) {
@@ -777,6 +795,15 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base);
+ /* read the node_guid of IB device from the DT or ACPI */
+ ret = device_property_read_u8_array(dev, "node-guid",
+ (u8 *)&hr_dev->ib_dev.node_guid,
+ GUID_LEN);
+ if (ret) {
+ dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
+ return ret;
+ }
+
/* get the RoCE associated ethernet ports or netdevices */
for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
if (dev_of_node(dev)) {
@@ -923,7 +950,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
struct device *dev = &hr_dev->pdev->dev;
spin_lock_init(&hr_dev->sm_lock);
- spin_lock_init(&hr_dev->cq_db_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);
ret = hns_roce_init_uar_table(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 59f5e2be046b..fb87883ead34 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -564,11 +564,14 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
mr->umem->page_size);
+ ret = -EINVAL;
+ goto err_umem;
}
if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
length);
+ ret = -EINVAL;
goto err_umem;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 16271b5bd170..05db7d59812a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -35,19 +35,7 @@
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
{
- struct device *dev = &hr_dev->pdev->dev;
- unsigned long pd_number;
- int ret = 0;
-
- ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number);
- if (ret == -1) {
- dev_err(dev, "alloc pdn from pdbitmap failed\n");
- return -ENOMEM;
- }
-
- *pdn = pd_number;
-
- return 0;
+ return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
}
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
@@ -117,9 +105,15 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
if (ret == -1)
return -ENOMEM;
- uar->index = (uar->index - 1) % hr_dev->caps.phy_num_uars + 1;
+ if (uar->index > 0)
+ uar->index = (uar->index - 1) %
+ (hr_dev->caps.phy_num_uars - 1) + 1;
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
+ return -EINVAL;
+ }
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 645c18d809a5..e86dd8d06777 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -32,14 +32,14 @@
*/
#include <linux/platform_device.h>
+#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
#include "hns_roce_user.h"
-#define DB_REG_OFFSET 0x1000
-#define SQP_NUM 12
+#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
@@ -113,16 +113,8 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
int align, unsigned long *base)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- int ret = 0;
- unsigned long qpn;
-
- ret = hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, &qpn);
- if (ret == -1)
- return -ENOMEM;
-
- *base = qpn;
- return 0;
+ return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
}
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
@@ -255,7 +247,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- if (base_qpn < (hr_dev->caps.sqp_start + 2 * hr_dev->caps.num_ports))
+ if (base_qpn < SQP_NUM)
return;
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
@@ -345,12 +337,10 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
- enum ib_qp_type type,
struct hns_roce_qp *hr_qp)
{
struct device *dev = &hr_dev->pdev->dev;
u32 max_cnt;
- (void)type;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
@@ -476,7 +466,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
/* Set SQ size */
ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
- init_attr->qp_type, hr_qp);
+ hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
goto err_out;
@@ -617,21 +607,19 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return ERR_PTR(-ENOMEM);
hr_qp = &hr_sqp->hr_qp;
+ hr_qp->port = init_attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
+ hr_dev->iboe.phy_port[hr_qp->port];
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
- hr_dev->caps.sqp_start +
- hr_dev->caps.num_ports +
- init_attr->port_num - 1, hr_qp);
+ hr_qp->ibqp.qp_num, hr_qp);
if (ret) {
dev_err(dev, "Create GSI QP failed!\n");
kfree(hr_sqp);
return ERR_PTR(ret);
}
- hr_qp->port = (init_attr->port_num - 1);
- hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start +
- hr_dev->caps.num_ports +
- init_attr->port_num - 1;
break;
}
default:{
@@ -670,6 +658,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct device *dev = &hr_dev->pdev->dev;
int ret = -EINVAL;
int p;
+ enum ib_mtu active_mtu;
mutex_lock(&hr_qp->mutex);
@@ -700,6 +689,19 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
}
+ if (attr_mask & IB_QP_PATH_MTU) {
+ p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+ active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
+
+ if (attr->path_mtu > IB_MTU_2048 ||
+ attr->path_mtu < IB_MTU_256 ||
+ attr->path_mtu > active_mtu) {
+ dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
+ attr->path_mtu);
+ goto out;
+ }
+ }
+
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
@@ -782,29 +784,11 @@ static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
{
- struct ib_qp *ibqp = &hr_qp->ibqp;
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
-
- if ((n < 0) || (n > hr_qp->rq.wqe_cnt)) {
- dev_err(&hr_dev->pdev->dev, "rq wqe index:%d,rq wqe cnt:%d\r\n",
- n, hr_qp->rq.wqe_cnt);
- return NULL;
- }
-
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
}
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
{
- struct ib_qp *ibqp = &hr_qp->ibqp;
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
-
- if ((n < 0) || (n > hr_qp->sq.wqe_cnt)) {
- dev_err(&hr_dev->pdev->dev, "sq wqe index:%d,sq wqe cnt:%d\r\n",
- n, hr_qp->sq.wqe_cnt);
- return NULL;
- }
-
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
}
@@ -837,8 +821,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
/* A port include two SQP, six port total 12 */
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
- hr_dev->caps.num_qps - 1,
- hr_dev->caps.sqp_start + SQP_NUM,
+ hr_dev->caps.num_qps - 1, SQP_NUM,
reserved_from_top);
if (ret) {
dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 22174774dbb8..63036c731626 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
- resp.cache_line_size = L1_CACHE_BYTES;
+ resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 41f4c2afbcdd..7ce97daf26c6 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -52,7 +52,6 @@ enum {
enum {
MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_CACHE_LINE_SIZE = 64,
};
static const u32 mlx5_ib_opcode[] = {
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 6c00d04b8b28..c6fe89d79248 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
+ ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
new file mode 100644
index 000000000000..6c9f3923e838
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -0,0 +1,8 @@
+config INFINIBAND_QEDR
+ tristate "QLogic RoCE driver"
+ depends on 64BIT && QEDE
+ select QED_LL2
+ select QED_RDMA
+ ---help---
+ This driver provides low-level InfiniBand over Ethernet
+ support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile
new file mode 100644
index 000000000000..ba7067c77f2f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
+
+qedr-y := main.o verbs.o qedr_cm.o
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
new file mode 100644
index 000000000000..7b74d09a8217
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -0,0 +1,914 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/netdevice.h>
+#include <linux/iommu.h>
+#include <net/addrconf.h>
+#include <linux/qed/qede_roce.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+
+MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(QEDR_MODULE_VERSION);
+
+#define QEDR_WQ_MULTIPLIER_DFT (3)
+
+void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
+ enum ib_event_type type)
+{
+ struct ib_event ibev;
+
+ ibev.device = &dev->ibdev;
+ ibev.element.port_num = port_num;
+ ibev.event = type;
+
+ ib_dispatch_event(&ibev);
+}
+
+static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
+ u8 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
+ size_t str_len)
+{
+ struct qedr_dev *qedr = get_qedr_dev(ibdev);
+ u32 fw_ver = (u32)qedr->attr.fw_ver;
+
+ snprintf(str, str_len, "%d. %d. %d. %d",
+ (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
+ (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
+}
+
+static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
+{
+ struct qedr_dev *qdev;
+
+ qdev = get_qedr_dev(dev);
+ dev_hold(qdev->ndev);
+
+ /* The HW vendor's device driver must guarantee
+ * that this function returns NULL before the net device reaches
+ * NETDEV_UNREGISTER_FINAL state.
+ */
+ return qdev->ndev;
+}
+
+static int qedr_register_device(struct qedr_dev *dev)
+{
+ strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
+
+ dev->ibdev.node_guid = dev->attr.node_guid;
+ memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
+ dev->ibdev.owner = THIS_MODULE;
+ dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
+
+ dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
+ QEDR_UVERBS(QUERY_DEVICE) |
+ QEDR_UVERBS(QUERY_PORT) |
+ QEDR_UVERBS(ALLOC_PD) |
+ QEDR_UVERBS(DEALLOC_PD) |
+ QEDR_UVERBS(CREATE_COMP_CHANNEL) |
+ QEDR_UVERBS(CREATE_CQ) |
+ QEDR_UVERBS(RESIZE_CQ) |
+ QEDR_UVERBS(DESTROY_CQ) |
+ QEDR_UVERBS(REQ_NOTIFY_CQ) |
+ QEDR_UVERBS(CREATE_QP) |
+ QEDR_UVERBS(MODIFY_QP) |
+ QEDR_UVERBS(QUERY_QP) |
+ QEDR_UVERBS(DESTROY_QP) |
+ QEDR_UVERBS(REG_MR) |
+ QEDR_UVERBS(DEREG_MR) |
+ QEDR_UVERBS(POLL_CQ) |
+ QEDR_UVERBS(POST_SEND) |
+ QEDR_UVERBS(POST_RECV);
+
+ dev->ibdev.phys_port_cnt = 1;
+ dev->ibdev.num_comp_vectors = dev->num_cnq;
+ dev->ibdev.node_type = RDMA_NODE_IB_CA;
+
+ dev->ibdev.query_device = qedr_query_device;
+ dev->ibdev.query_port = qedr_query_port;
+ dev->ibdev.modify_port = qedr_modify_port;
+
+ dev->ibdev.query_gid = qedr_query_gid;
+ dev->ibdev.add_gid = qedr_add_gid;
+ dev->ibdev.del_gid = qedr_del_gid;
+
+ dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
+ dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
+ dev->ibdev.mmap = qedr_mmap;
+
+ dev->ibdev.alloc_pd = qedr_alloc_pd;
+ dev->ibdev.dealloc_pd = qedr_dealloc_pd;
+
+ dev->ibdev.create_cq = qedr_create_cq;
+ dev->ibdev.destroy_cq = qedr_destroy_cq;
+ dev->ibdev.resize_cq = qedr_resize_cq;
+ dev->ibdev.req_notify_cq = qedr_arm_cq;
+
+ dev->ibdev.create_qp = qedr_create_qp;
+ dev->ibdev.modify_qp = qedr_modify_qp;
+ dev->ibdev.query_qp = qedr_query_qp;
+ dev->ibdev.destroy_qp = qedr_destroy_qp;
+
+ dev->ibdev.query_pkey = qedr_query_pkey;
+
+ dev->ibdev.create_ah = qedr_create_ah;
+ dev->ibdev.destroy_ah = qedr_destroy_ah;
+
+ dev->ibdev.get_dma_mr = qedr_get_dma_mr;
+ dev->ibdev.dereg_mr = qedr_dereg_mr;
+ dev->ibdev.reg_user_mr = qedr_reg_user_mr;
+ dev->ibdev.alloc_mr = qedr_alloc_mr;
+ dev->ibdev.map_mr_sg = qedr_map_mr_sg;
+
+ dev->ibdev.poll_cq = qedr_poll_cq;
+ dev->ibdev.post_send = qedr_post_send;
+ dev->ibdev.post_recv = qedr_post_recv;
+
+ dev->ibdev.process_mad = qedr_process_mad;
+ dev->ibdev.get_port_immutable = qedr_port_immutable;
+ dev->ibdev.get_netdev = qedr_get_netdev;
+
+ dev->ibdev.dma_device = &dev->pdev->dev;
+
+ dev->ibdev.get_link_layer = qedr_link_layer;
+ dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
+
+ return ib_register_device(&dev->ibdev, NULL);
+}
+
+/* This function allocates fast-path status block memory */
+static int qedr_alloc_mem_sb(struct qedr_dev *dev,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = dma_alloc_coherent(&dev->pdev->dev,
+ sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
+ if (!sb_virt)
+ return -ENOMEM;
+
+ rc = dev->ops->common->sb_init(dev->cdev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_CNQ);
+ if (rc) {
+ pr_err("Status block initialization failed\n");
+ dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
+ sb_virt, sb_phys);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qedr_free_mem_sb(struct qedr_dev *dev,
+ struct qed_sb_info *sb_info, int sb_id)
+{
+ if (sb_info->sb_virt) {
+ dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
+ dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt, sb_info->sb_phys);
+ }
+}
+
+static void qedr_free_resources(struct qedr_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->num_cnq; i++) {
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+ dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+ }
+
+ kfree(dev->cnq_array);
+ kfree(dev->sb_array);
+ kfree(dev->sgid_tbl);
+}
+
+static int qedr_alloc_resources(struct qedr_dev *dev)
+{
+ struct qedr_cnq *cnq;
+ __le16 *cons_pi;
+ u16 n_entries;
+ int i, rc;
+
+ dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
+ QEDR_MAX_SGID, GFP_KERNEL);
+ if (!dev->sgid_tbl)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->sgid_lock);
+
+ /* Allocate Status blocks for CNQ */
+ dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
+ GFP_KERNEL);
+ if (!dev->sb_array) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ dev->cnq_array = kcalloc(dev->num_cnq,
+ sizeof(*dev->cnq_array), GFP_KERNEL);
+ if (!dev->cnq_array) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
+
+ /* Allocate CNQ PBLs */
+ n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
+ for (i = 0; i < dev->num_cnq; i++) {
+ cnq = &dev->cnq_array[i];
+
+ rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
+ dev->sb_start + i);
+ if (rc)
+ goto err3;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ n_entries,
+ sizeof(struct regpair *),
+ &cnq->pbl);
+ if (rc)
+ goto err4;
+
+ cnq->dev = dev;
+ cnq->sb = &dev->sb_array[i];
+ cons_pi = dev->sb_array[i].sb_virt->pi_array;
+ cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
+ cnq->index = i;
+ sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
+ i, qed_chain_get_cons_idx(&cnq->pbl));
+ }
+
+ return 0;
+err4:
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+err3:
+ for (--i; i >= 0; i--) {
+ dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+ }
+ kfree(dev->cnq_array);
+err2:
+ kfree(dev->sb_array);
+err1:
+ kfree(dev->sgid_tbl);
+ return rc;
+}
+
+/* QEDR sysfs interface */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qedr_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+}
+
+static ssize_t show_hca_type(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
+
+static struct device_attribute *qedr_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_hca_type
+};
+
+static void qedr_remove_sysfiles(struct qedr_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+ device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
+}
+
+static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
+{
+ struct pci_dev *bridge;
+ u32 val;
+
+ dev->atomic_cap = IB_ATOMIC_NONE;
+
+ bridge = pdev->bus->self;
+ if (!bridge)
+ return;
+
+ /* Check whether we are connected directly or via a switch */
+ while (bridge && bridge->bus->parent) {
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
+ bridge->bus->number, bridge->bus->primary);
+ /* Need to check Atomic Op Routing Supported all the way to
+ * root complex.
+ */
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+ if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) {
+ pcie_capability_clear_word(pdev,
+ PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ return;
+ }
+ bridge = bridge->bus->parent->self;
+ }
+ bridge = pdev->bus->self;
+
+ /* according to bridge capability */
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+ if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) {
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ dev->atomic_cap = IB_ATOMIC_GLOB;
+ } else {
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ }
+}
+
+static const struct qed_rdma_ops *qed_ops;
+
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+static irqreturn_t qedr_irq_handler(int irq, void *handle)
+{
+ u16 hw_comp_cons, sw_comp_cons;
+ struct qedr_cnq *cnq = handle;
+ struct regpair *cq_handle;
+ struct qedr_cq *cq;
+
+ qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
+
+ qed_sb_update_sb_idx(cnq->sb);
+
+ hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+ /* Align protocol-index and chain reads */
+ rmb();
+
+ while (sw_comp_cons != hw_comp_cons) {
+ cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
+ cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
+ cq_handle->lo);
+
+ if (cq == NULL) {
+ DP_ERR(cnq->dev,
+ "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
+ cq_handle->hi, cq_handle->lo, sw_comp_cons,
+ hw_comp_cons);
+
+ break;
+ }
+
+ if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
+ DP_ERR(cnq->dev,
+ "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
+ cq_handle->hi, cq_handle->lo, cq);
+ break;
+ }
+
+ cq->arm_flags = 0;
+
+ if (cq->ibcq.comp_handler)
+ (*cq->ibcq.comp_handler)
+ (&cq->ibcq, cq->ibcq.cq_context);
+
+ sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+ cnq->n_comp++;
+
+ }
+
+ qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
+ sw_comp_cons);
+
+ qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
+
+ return IRQ_HANDLED;
+}
+
+static void qedr_sync_free_irqs(struct qedr_dev *dev)
+{
+ u32 vector;
+ int i;
+
+ for (i = 0; i < dev->int_info.used_cnt; i++) {
+ if (dev->int_info.msix_cnt) {
+ vector = dev->int_info.msix[i * dev->num_hwfns].vector;
+ synchronize_irq(vector);
+ free_irq(vector, &dev->cnq_array[i]);
+ }
+ }
+
+ dev->int_info.used_cnt = 0;
+}
+
+static int qedr_req_msix_irqs(struct qedr_dev *dev)
+{
+ int i, rc = 0;
+
+ if (dev->num_cnq > dev->int_info.msix_cnt) {
+ DP_ERR(dev,
+ "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
+ dev->num_cnq, dev->int_info.msix_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->num_cnq; i++) {
+ rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
+ qedr_irq_handler, 0, dev->cnq_array[i].name,
+ &dev->cnq_array[i]);
+ if (rc) {
+ DP_ERR(dev, "Request cnq %d irq failed\n", i);
+ qedr_sync_free_irqs(dev);
+ } else {
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
+ dev->cnq_array[i].name, i,
+ &dev->cnq_array[i]);
+ dev->int_info.used_cnt++;
+ }
+ }
+
+ return rc;
+}
+
+static int qedr_setup_irqs(struct qedr_dev *dev)
+{
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
+
+ /* Learn Interrupt configuration */
+ rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
+ if (rc < 0)
+ return rc;
+
+ rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
+ if (rc) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
+ return rc;
+ }
+
+ if (dev->int_info.msix_cnt) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
+ dev->int_info.msix_cnt);
+ rc = qedr_req_msix_irqs(dev);
+ if (rc)
+ return rc;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
+
+ return 0;
+}
+
+static int qedr_set_device_attr(struct qedr_dev *dev)
+{
+ struct qed_rdma_device *qed_attr;
+ struct qedr_device_attr *attr;
+ u32 page_size;
+
+ /* Part 1 - query core capabilities */
+ qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
+
+ /* Part 2 - check capabilities */
+ page_size = ~dev->attr.page_size_caps + 1;
+ if (page_size > PAGE_SIZE) {
+ DP_ERR(dev,
+ "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
+ PAGE_SIZE, page_size);
+ return -ENODEV;
+ }
+
+ /* Part 3 - copy and update capabilities */
+ attr = &dev->attr;
+ attr->vendor_id = qed_attr->vendor_id;
+ attr->vendor_part_id = qed_attr->vendor_part_id;
+ attr->hw_ver = qed_attr->hw_ver;
+ attr->fw_ver = qed_attr->fw_ver;
+ attr->node_guid = qed_attr->node_guid;
+ attr->sys_image_guid = qed_attr->sys_image_guid;
+ attr->max_cnq = qed_attr->max_cnq;
+ attr->max_sge = qed_attr->max_sge;
+ attr->max_inline = qed_attr->max_inline;
+ attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
+ attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
+ attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
+ attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
+ attr->max_dev_resp_rd_atomic_resc =
+ qed_attr->max_dev_resp_rd_atomic_resc;
+ attr->max_cq = qed_attr->max_cq;
+ attr->max_qp = qed_attr->max_qp;
+ attr->max_mr = qed_attr->max_mr;
+ attr->max_mr_size = qed_attr->max_mr_size;
+ attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
+ attr->max_mw = qed_attr->max_mw;
+ attr->max_fmr = qed_attr->max_fmr;
+ attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
+ attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
+ attr->max_pd = qed_attr->max_pd;
+ attr->max_ah = qed_attr->max_ah;
+ attr->max_pkey = qed_attr->max_pkey;
+ attr->max_srq = qed_attr->max_srq;
+ attr->max_srq_wr = qed_attr->max_srq_wr;
+ attr->dev_caps = qed_attr->dev_caps;
+ attr->page_size_caps = qed_attr->page_size_caps;
+ attr->dev_ack_delay = qed_attr->dev_ack_delay;
+ attr->reserved_lkey = qed_attr->reserved_lkey;
+ attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
+ attr->max_stats_queues = qed_attr->max_stats_queues;
+
+ return 0;
+}
+
+void qedr_unaffiliated_event(void *context,
+ u8 event_code)
+{
+ pr_err("unaffiliated event not implemented yet\n");
+}
+
+void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
+{
+#define EVENT_TYPE_NOT_DEFINED 0
+#define EVENT_TYPE_CQ 1
+#define EVENT_TYPE_QP 2
+ struct qedr_dev *dev = (struct qedr_dev *)context;
+ union event_ring_data *data = fw_handle;
+ u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
+ data->roce_handle.lo;
+ u8 event_type = EVENT_TYPE_NOT_DEFINED;
+ struct ib_event event;
+ struct ib_cq *ibcq;
+ struct ib_qp *ibqp;
+ struct qedr_cq *cq;
+ struct qedr_qp *qp;
+
+ switch (e_code) {
+ case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
+ event.event = IB_EVENT_CQ_ERR;
+ event_type = EVENT_TYPE_CQ;
+ break;
+ case ROCE_ASYNC_EVENT_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
+ event.event = IB_EVENT_QP_FATAL;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ default:
+ DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
+ roce_handle64);
+ }
+
+ switch (event_type) {
+ case EVENT_TYPE_CQ:
+ cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
+ if (cq) {
+ ibcq = &cq->ibcq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.element.cq = ibcq;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
+ } else {
+ WARN(1,
+ "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
+ roce_handle64);
+ }
+ DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq);
+ break;
+ case EVENT_TYPE_QP:
+ qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
+ if (qp) {
+ ibqp = &qp->ibqp;
+ if (ibqp->event_handler) {
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+ } else {
+ WARN(1,
+ "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
+ roce_handle64);
+ }
+ DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp);
+ break;
+ default:
+ break;
+ }
+}
+
+static int qedr_init_hw(struct qedr_dev *dev)
+{
+ struct qed_rdma_add_user_out_params out_params;
+ struct qed_rdma_start_in_params *in_params;
+ struct qed_rdma_cnq_params *cur_pbl;
+ struct qed_rdma_events events;
+ dma_addr_t p_phys_table;
+ u32 page_cnt;
+ int rc = 0;
+ int i;
+
+ in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
+ if (!in_params) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ in_params->desired_cnq = dev->num_cnq;
+ for (i = 0; i < dev->num_cnq; i++) {
+ cur_pbl = &in_params->cnq_pbl_list[i];
+
+ page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
+ cur_pbl->num_pbl_pages = page_cnt;
+
+ p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
+ cur_pbl->pbl_ptr = (u64)p_phys_table;
+ }
+
+ events.affiliated_event = qedr_affiliated_event;
+ events.unaffiliated_event = qedr_unaffiliated_event;
+ events.context = dev;
+
+ in_params->events = &events;
+ in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
+ in_params->max_mtu = dev->ndev->mtu;
+ ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
+
+ rc = dev->ops->rdma_init(dev->cdev, in_params);
+ if (rc)
+ goto out;
+
+ rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
+ if (rc)
+ goto out;
+
+ dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
+ dev->db_phys_addr = out_params.dpi_phys_addr;
+ dev->db_size = out_params.dpi_size;
+ dev->dpi = out_params.dpi;
+
+ rc = qedr_set_device_attr(dev);
+out:
+ kfree(in_params);
+ if (rc)
+ DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
+
+ return rc;
+}
+
+void qedr_stop_hw(struct qedr_dev *dev)
+{
+ dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
+ dev->ops->rdma_stop(dev->rdma_ctx);
+}
+
+static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
+ struct net_device *ndev)
+{
+ struct qed_dev_rdma_info dev_info;
+ struct qedr_dev *dev;
+ int rc = 0, i;
+
+ dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev) {
+ pr_err("Unable to allocate ib device\n");
+ return NULL;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
+
+ dev->pdev = pdev;
+ dev->ndev = ndev;
+ dev->cdev = cdev;
+
+ qed_ops = qed_get_rdma_ops();
+ if (!qed_ops) {
+ DP_ERR(dev, "Failed to get qed roce operations\n");
+ goto init_err;
+ }
+
+ dev->ops = qed_ops;
+ rc = qed_ops->fill_dev_info(cdev, &dev_info);
+ if (rc)
+ goto init_err;
+
+ dev->num_hwfns = dev_info.common.num_hwfns;
+ dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
+
+ dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
+ if (!dev->num_cnq) {
+ DP_ERR(dev, "not enough CNQ resources.\n");
+ goto init_err;
+ }
+
+ dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
+
+ qedr_pci_set_atomic(dev, pdev);
+
+ rc = qedr_alloc_resources(dev);
+ if (rc)
+ goto init_err;
+
+ rc = qedr_init_hw(dev);
+ if (rc)
+ goto alloc_err;
+
+ rc = qedr_setup_irqs(dev);
+ if (rc)
+ goto irq_err;
+
+ rc = qedr_register_device(dev);
+ if (rc) {
+ DP_ERR(dev, "Unable to allocate register device\n");
+ goto reg_err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+ if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
+ goto sysfs_err;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
+ return dev;
+
+sysfs_err:
+ ib_unregister_device(&dev->ibdev);
+reg_err:
+ qedr_sync_free_irqs(dev);
+irq_err:
+ qedr_stop_hw(dev);
+alloc_err:
+ qedr_free_resources(dev);
+init_err:
+ ib_dealloc_device(&dev->ibdev);
+ DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
+
+ return NULL;
+}
+
+static void qedr_remove(struct qedr_dev *dev)
+{
+ /* First unregister with stack to stop all the active traffic
+ * of the registered clients.
+ */
+ qedr_remove_sysfiles(dev);
+ ib_unregister_device(&dev->ibdev);
+
+ qedr_stop_hw(dev);
+ qedr_sync_free_irqs(dev);
+ qedr_free_resources(dev);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static int qedr_close(struct qedr_dev *dev)
+{
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+
+ return 0;
+}
+
+static void qedr_shutdown(struct qedr_dev *dev)
+{
+ qedr_close(dev);
+ qedr_remove(dev);
+}
+
+static void qedr_mac_address_change(struct qedr_dev *dev)
+{
+ union ib_gid *sgid = &dev->sgid_tbl[0];
+ u8 guid[8], mac_addr[6];
+ int rc;
+
+ /* Update SGID */
+ ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
+ guid[0] = mac_addr[0] ^ 2;
+ guid[1] = mac_addr[1];
+ guid[2] = mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = mac_addr[3];
+ guid[6] = mac_addr[4];
+ guid[7] = mac_addr[5];
+ sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ memcpy(&sgid->raw[8], guid, sizeof(guid));
+
+ /* Update LL2 */
+ rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev,
+ dev->gsi_ll2_mac_address,
+ dev->ndev->dev_addr);
+
+ ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+
+ if (rc)
+ DP_ERR(dev, "Error updating mac filter\n");
+}
+
+/* event handling via NIC driver ensures that all the NIC specific
+ * initialization done before RoCE driver notifies
+ * event to stack.
+ */
+static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
+{
+ switch (event) {
+ case QEDE_UP:
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ break;
+ case QEDE_DOWN:
+ qedr_close(dev);
+ break;
+ case QEDE_CLOSE:
+ qedr_shutdown(dev);
+ break;
+ case QEDE_CHANGE_ADDR:
+ qedr_mac_address_change(dev);
+ break;
+ default:
+ pr_err("Event not supported\n");
+ }
+}
+
+static struct qedr_driver qedr_drv = {
+ .name = "qedr_driver",
+ .add = qedr_add,
+ .remove = qedr_remove,
+ .notify = qedr_notify,
+};
+
+static int __init qedr_init_module(void)
+{
+ return qede_roce_register_driver(&qedr_drv);
+}
+
+static void __exit qedr_exit_module(void)
+{
+ qede_roce_unregister_driver(&qedr_drv);
+}
+
+module_init(qedr_init_module);
+module_exit(qedr_exit_module);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
new file mode 100644
index 000000000000..620badd7d4fb
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -0,0 +1,495 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_H__
+#define __QEDR_H__
+
+#include <linux/pci.h>
+#include <rdma/ib_addr.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qede_roce.h>
+#include "qedr_hsi.h"
+
+#define QEDR_MODULE_VERSION "8.10.10.0"
+#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
+#define DP_NAME(dev) ((dev)->ibdev.name)
+
+#define DP_DEBUG(dev, module, fmt, ...) \
+ pr_debug("(%s) " module ": " fmt, \
+ DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
+
+#define QEDR_MSG_INIT "INIT"
+#define QEDR_MSG_MISC "MISC"
+#define QEDR_MSG_CQ " CQ"
+#define QEDR_MSG_MR " MR"
+#define QEDR_MSG_RQ " RQ"
+#define QEDR_MSG_SQ " SQ"
+#define QEDR_MSG_QP " QP"
+#define QEDR_MSG_GSI " GSI"
+
+#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
+
+struct qedr_dev;
+
+struct qedr_cnq {
+ struct qedr_dev *dev;
+ struct qed_chain pbl;
+ struct qed_sb_info *sb;
+ char name[32];
+ u64 n_comp;
+ __le16 *hw_cons_ptr;
+ u8 index;
+};
+
+#define QEDR_MAX_SGID 128
+
+struct qedr_device_attr {
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u64 fw_ver;
+ u64 node_guid;
+ u64 sys_image_guid;
+ u8 max_cnq;
+ u8 max_sge;
+ u16 max_inline;
+ u32 max_sqe;
+ u32 max_rqe;
+ u8 max_qp_resp_rd_atomic_resc;
+ u8 max_qp_req_rd_atomic_resc;
+ u64 max_dev_resp_rd_atomic_resc;
+ u32 max_cq;
+ u32 max_qp;
+ u32 max_mr;
+ u64 max_mr_size;
+ u32 max_cqe;
+ u32 max_mw;
+ u32 max_fmr;
+ u32 max_mr_mw_fmr_pbl;
+ u64 max_mr_mw_fmr_size;
+ u32 max_pd;
+ u32 max_ah;
+ u8 max_pkey;
+ u32 max_srq;
+ u32 max_srq_wr;
+ u8 max_srq_sge;
+ u8 max_stats_queues;
+ u32 dev_caps;
+
+ u64 page_size_caps;
+ u8 dev_ack_delay;
+ u32 reserved_lkey;
+ u32 bad_pkey_counter;
+ struct qed_rdma_events events;
+};
+
+struct qedr_dev {
+ struct ib_device ibdev;
+ struct qed_dev *cdev;
+ struct pci_dev *pdev;
+ struct net_device *ndev;
+
+ enum ib_atomic_cap atomic_cap;
+
+ void *rdma_ctx;
+ struct qedr_device_attr attr;
+
+ const struct qed_rdma_ops *ops;
+ struct qed_int_info int_info;
+
+ struct qed_sb_info *sb_array;
+ struct qedr_cnq *cnq_array;
+ int num_cnq;
+ int sb_start;
+
+ void __iomem *db_addr;
+ u64 db_phys_addr;
+ u32 db_size;
+ u16 dpi;
+
+ union ib_gid *sgid_tbl;
+
+ /* Lock for sgid table */
+ spinlock_t sgid_lock;
+
+ u64 guid;
+
+ u32 dp_module;
+ u8 dp_level;
+ u8 num_hwfns;
+ uint wq_multiplier;
+ u8 gsi_ll2_mac_address[ETH_ALEN];
+ int gsi_qp_created;
+ struct qedr_cq *gsi_sqcq;
+ struct qedr_cq *gsi_rqcq;
+ struct qedr_qp *gsi_qp;
+};
+
+#define QEDR_MAX_SQ_PBL (0x8000)
+#define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *))
+#define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge))
+#define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
+ QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+ QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\
+ (RDMA_RING_PAGE_SIZE) / \
+ (QEDR_SQE_ELEMENT_SIZE) /\
+ (QEDR_MAX_SQE_ELEMENTS_PER_SQE))
+/* RQ */
+#define QEDR_MAX_RQ_PBL (0x2000)
+#define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *))
+#define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge))
+#define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+ QEDR_RQE_ELEMENT_SIZE)
+#define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\
+ (RDMA_RING_PAGE_SIZE) / \
+ (QEDR_RQE_ELEMENT_SIZE) /\
+ (QEDR_MAX_RQE_ELEMENTS_PER_RQE))
+
+#define QEDR_CQE_SIZE (sizeof(union rdma_cqe))
+#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
+#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
+ sizeof(u64)) - 1)
+#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
+ (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
+
+#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
+
+#define QEDR_MAX_PORT (1)
+
+#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+
+#define QEDR_ROCE_PKEY_MAX 1
+#define QEDR_ROCE_PKEY_TABLE_LEN 1
+#define QEDR_ROCE_PKEY_DEFAULT 0xffff
+
+struct qedr_pbl {
+ struct list_head list_entry;
+ void *va;
+ dma_addr_t pa;
+};
+
+struct qedr_ucontext {
+ struct ib_ucontext ibucontext;
+ struct qedr_dev *dev;
+ struct qedr_pd *pd;
+ u64 dpi_addr;
+ u64 dpi_phys_addr;
+ u32 dpi_size;
+ u16 dpi;
+
+ struct list_head mm_head;
+
+ /* Lock to protect mm list */
+ struct mutex mm_list_lock;
+};
+
+union db_prod64 {
+ struct rdma_pwm_val32_data data;
+ u64 raw;
+};
+
+enum qedr_cq_type {
+ QEDR_CQ_TYPE_GSI,
+ QEDR_CQ_TYPE_KERNEL,
+ QEDR_CQ_TYPE_USER,
+};
+
+struct qedr_pbl_info {
+ u32 num_pbls;
+ u32 num_pbes;
+ u32 pbl_size;
+ u32 pbe_size;
+ bool two_layered;
+};
+
+struct qedr_userq {
+ struct ib_umem *umem;
+ struct qedr_pbl_info pbl_info;
+ struct qedr_pbl *pbl_tbl;
+ u64 buf_addr;
+ size_t buf_len;
+};
+
+struct qedr_cq {
+ struct ib_cq ibcq;
+
+ enum qedr_cq_type cq_type;
+ u32 sig;
+
+ u16 icid;
+
+ /* Lock to protect completion handler */
+ spinlock_t comp_handler_lock;
+
+ /* Lock to protect multiplem CQ's */
+ spinlock_t cq_lock;
+ u8 arm_flags;
+ struct qed_chain pbl;
+
+ void __iomem *db_addr;
+ union db_prod64 db;
+
+ u8 pbl_toggle;
+ union rdma_cqe *latest_cqe;
+ union rdma_cqe *toggle_cqe;
+
+ u32 cq_cons;
+
+ struct qedr_userq q;
+};
+
+struct qedr_pd {
+ struct ib_pd ibpd;
+ u32 pd_id;
+ struct qedr_ucontext *uctx;
+};
+
+struct qedr_mm {
+ struct {
+ u64 phy_addr;
+ unsigned long len;
+ } key;
+ struct list_head entry;
+};
+
+union db_prod32 {
+ struct rdma_pwm_val16_data data;
+ u32 raw;
+};
+
+struct qedr_qp_hwq_info {
+ /* WQE Elements */
+ struct qed_chain pbl;
+ u64 p_phys_addr_tbl;
+ u32 max_sges;
+
+ /* WQE */
+ u16 prod;
+ u16 cons;
+ u16 wqe_cons;
+ u16 gsi_cons;
+ u16 max_wr;
+
+ /* DB */
+ void __iomem *db;
+ union db_prod32 db_data;
+};
+
+#define QEDR_INC_SW_IDX(p_info, index) \
+ do { \
+ p_info->index = (p_info->index + 1) & \
+ qed_chain_get_capacity(p_info->pbl) \
+ } while (0)
+
+enum qedr_qp_err_bitmap {
+ QEDR_QP_ERR_SQ_FULL = 1,
+ QEDR_QP_ERR_RQ_FULL = 2,
+ QEDR_QP_ERR_BAD_SR = 4,
+ QEDR_QP_ERR_BAD_RR = 8,
+ QEDR_QP_ERR_SQ_PBL_FULL = 16,
+ QEDR_QP_ERR_RQ_PBL_FULL = 32,
+};
+
+struct qedr_qp {
+ struct ib_qp ibqp; /* must be first */
+ struct qedr_dev *dev;
+
+ struct qedr_qp_hwq_info sq;
+ struct qedr_qp_hwq_info rq;
+
+ u32 max_inline_data;
+
+ /* Lock for QP's */
+ spinlock_t q_lock;
+ struct qedr_cq *sq_cq;
+ struct qedr_cq *rq_cq;
+ struct qedr_srq *srq;
+ enum qed_roce_qp_state state;
+ u32 id;
+ struct qedr_pd *pd;
+ enum ib_qp_type qp_type;
+ struct qed_rdma_qp *qed_qp;
+ u32 qp_id;
+ u16 icid;
+ u16 mtu;
+ int sgid_idx;
+ u32 rq_psn;
+ u32 sq_psn;
+ u32 qkey;
+ u32 dest_qp_num;
+
+ /* Relevant to qps created from kernel space only (ULPs) */
+ u8 prev_wqe_size;
+ u16 wqe_cons;
+ u32 err_bitmap;
+ bool signaled;
+
+ /* SQ shadow */
+ struct {
+ u64 wr_id;
+ enum ib_wc_opcode opcode;
+ u32 bytes_len;
+ u8 wqe_size;
+ bool signaled;
+ dma_addr_t icrc_mapping;
+ u32 *icrc;
+ struct qedr_mr *mr;
+ } *wqe_wr_id;
+
+ /* RQ shadow */
+ struct {
+ u64 wr_id;
+ struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
+ u8 wqe_size;
+
+ u8 smac[ETH_ALEN];
+ u16 vlan_id;
+ int rc;
+ } *rqe_wr_id;
+
+ /* Relevant to qps created from user space only (applications) */
+ struct qedr_userq usq;
+ struct qedr_userq urq;
+};
+
+struct qedr_ah {
+ struct ib_ah ibah;
+ struct ib_ah_attr attr;
+};
+
+enum qedr_mr_type {
+ QEDR_MR_USER,
+ QEDR_MR_KERNEL,
+ QEDR_MR_DMA,
+ QEDR_MR_FRMR,
+};
+
+struct mr_info {
+ struct qedr_pbl *pbl_table;
+ struct qedr_pbl_info pbl_info;
+ struct list_head free_pbl_list;
+ struct list_head inuse_pbl_list;
+ u32 completed;
+ u32 completed_handled;
+};
+
+struct qedr_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+
+ struct qed_rdma_register_tid_in_params hw_mr;
+ enum qedr_mr_type type;
+
+ struct qedr_dev *dev;
+ struct mr_info info;
+
+ u64 *pages;
+ u32 npages;
+};
+
+#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
+
+#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
+ RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
+#define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
+ RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
+#define QEDR_RESP_RDMA_IMM (QEDR_RESP_IMM | QEDR_RESP_RDMA)
+
+static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
+{
+ info->cons = (info->cons + 1) % info->max_wr;
+ info->wqe_cons++;
+}
+
+static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
+{
+ info->prod = (info->prod + 1) % info->max_wr;
+}
+
+static inline int qedr_get_dmac(struct qedr_dev *dev,
+ struct ib_ah_attr *ah_attr, u8 *mac_addr)
+{
+ union ib_gid zero_sgid = { { 0 } };
+ struct in6_addr in6;
+
+ if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
+ DP_ERR(dev, "Local port GID not supported\n");
+ eth_zero_addr(mac_addr);
+ return -EINVAL;
+ }
+
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ ether_addr_copy(mac_addr, ah_attr->dmac);
+
+ return 0;
+}
+
+static inline
+struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct qedr_ucontext, ibucontext);
+}
+
+static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct qedr_dev, ibdev);
+}
+
+static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct qedr_pd, ibpd);
+}
+
+static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct qedr_cq, ibcq);
+}
+
+static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct qedr_qp, ibqp);
+}
+
+static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct qedr_ah, ibah);
+}
+
+static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct qedr_mr, ibmr);
+}
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
new file mode 100644
index 000000000000..63890ebb72bd
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -0,0 +1,622 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include "qedr_hsi.h"
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qedr.h"
+#include "qedr_hsi.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_hsi.h"
+#include "qedr_cm.h"
+
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
+{
+ info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
+}
+
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ dev->gsi_qp_created = 1;
+ dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
+ dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
+ dev->gsi_qp = qp;
+}
+
+void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
+{
+ struct qedr_dev *dev = (struct qedr_dev *)_qdev;
+ struct qedr_cq *cq = dev->gsi_sqcq;
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
+ dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
+ cq->ibcq.comp_handler ? "Yes" : "No");
+
+ dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
+ pkt->header.baddr);
+ kfree(pkt);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+ qedr_inc_sw_gsi_cons(&qp->sq);
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (cq->ibcq.comp_handler) {
+ spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+ spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+ }
+}
+
+void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_rx_params *params)
+{
+ struct qedr_dev *dev = (struct qedr_dev *)_dev;
+ struct qedr_cq *cq = dev->gsi_rqcq;
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc;
+ qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id;
+ qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len;
+ ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac);
+
+ qedr_inc_sw_gsi_cons(&qp->rq);
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (cq->ibcq.comp_handler) {
+ spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+ spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+ }
+}
+
+static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ struct qed_rdma_destroy_cq_in_params iparams;
+ struct qed_rdma_destroy_cq_out_params oparams;
+ struct qedr_cq *cq;
+
+ cq = get_qedr_cq(attrs->send_cq);
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+
+ cq = get_qedr_cq(attrs->recv_cq);
+ /* if a dedicated recv_cq was used, delete it too */
+ if (iparams.icid != cq->icid) {
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
+}
+
+static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
+ attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
+ attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_send_wr is too large %d>%d\n",
+ attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp)
+{
+ struct qed_roce_ll2_params ll2_params;
+ int rc;
+
+ rc = qedr_check_gsi_qp_attrs(dev, attrs);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /* configure and start LL2 */
+ memset(&ll2_params, 0, sizeof(ll2_params));
+ ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
+ ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
+ ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
+ ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
+ ll2_params.cb_cookie = (void *)dev;
+ ll2_params.mtu = dev->ndev->mtu;
+ ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
+ rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
+ if (rc) {
+ DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
+ return ERR_PTR(rc);
+ }
+
+ /* create QP */
+ qp->ibqp.qp_num = 1;
+ qp->rq.max_wr = attrs->cap.max_recv_wr;
+ qp->sq.max_wr = attrs->cap.max_send_wr;
+
+ qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->rqe_wr_id)
+ goto err;
+ qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->wqe_wr_id)
+ goto err;
+
+ qedr_store_gsi_qp_cq(dev, qp, attrs);
+ ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+ /* the GSI CQ is handled by the driver so remove it from the FW */
+ qedr_destroy_gsi_cq(dev, attrs);
+ dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+ dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+
+ DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
+
+ return &qp->ibqp;
+
+err:
+ kfree(qp->rqe_wr_id);
+
+ rc = dev->ops->roce_ll2_stop(dev->cdev);
+ if (rc)
+ DP_ERR(dev, "create gsi qp: failed destroy on create\n");
+
+ return ERR_PTR(-ENOMEM);
+}
+
+int qedr_destroy_gsi_qp(struct qedr_dev *dev)
+{
+ int rc;
+
+ rc = dev->ops->roce_ll2_stop(dev->cdev);
+ if (rc)
+ DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
+ else
+ DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
+
+ return rc;
+}
+
+#define QEDR_MAX_UD_HEADER_SIZE (100)
+#define QEDR_GSI_QPN (1)
+static inline int qedr_gsi_build_header(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_send_wr *swr,
+ struct ib_ud_header *udh,
+ int *roce_mode)
+{
+ bool has_vlan = false, has_grh_ipv6 = true;
+ struct ib_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
+ struct ib_global_route *grh = &ah_attr->grh;
+ union ib_gid sgid;
+ int send_size = 0;
+ u16 vlan_id = 0;
+ u16 ether_type;
+ struct ib_gid_attr sgid_attr;
+ int rc;
+ int ip_ver = 0;
+
+ bool has_udp = false;
+ int i;
+
+ send_size = 0;
+ for (i = 0; i < swr->num_sge; ++i)
+ send_size += swr->sg_list[i].length;
+
+ rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num,
+ grh->sgid_index, &sgid, &sgid_attr);
+ if (rc) {
+ DP_ERR(dev,
+ "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
+ ah_attr->port_num, grh->sgid_index);
+ return rc;
+ }
+
+ vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+ if (vlan_id < VLAN_CFI_MASK)
+ has_vlan = true;
+ if (sgid_attr.ndev)
+ dev_put(sgid_attr.ndev);
+
+ if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
+ DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
+ ah_attr->grh.sgid_index);
+ return -ENOENT;
+ }
+
+ has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
+ if (!has_udp) {
+ /* RoCE v1 */
+ ether_type = ETH_P_ROCE;
+ *roce_mode = ROCE_V1;
+ } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+ /* RoCE v2 IPv4 */
+ ip_ver = 4;
+ ether_type = ETH_P_IP;
+ has_grh_ipv6 = false;
+ *roce_mode = ROCE_V2_IPV4;
+ } else {
+ /* RoCE v2 IPv6 */
+ ip_ver = 6;
+ ether_type = ETH_P_IPV6;
+ *roce_mode = ROCE_V2_IPV6;
+ }
+
+ rc = ib_ud_header_init(send_size, false, true, has_vlan,
+ has_grh_ipv6, ip_ver, has_udp, 0, udh);
+ if (rc) {
+ DP_ERR(dev, "gsi post send: failed to init header\n");
+ return rc;
+ }
+
+ /* ENET + VLAN headers */
+ ether_addr_copy(udh->eth.dmac_h, ah_attr->dmac);
+ ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
+ if (has_vlan) {
+ udh->eth.type = htons(ETH_P_8021Q);
+ udh->vlan.tag = htons(vlan_id);
+ udh->vlan.type = htons(ether_type);
+ } else {
+ udh->eth.type = htons(ether_type);
+ }
+
+ /* BTH */
+ udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
+ udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
+ udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
+ udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
+ udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+
+ /* DETH */
+ udh->deth.qkey = htonl(0x80010000);
+ udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
+
+ if (has_grh_ipv6) {
+ /* GRH / IPv6 header */
+ udh->grh.traffic_class = grh->traffic_class;
+ udh->grh.flow_label = grh->flow_label;
+ udh->grh.hop_limit = grh->hop_limit;
+ udh->grh.destination_gid = grh->dgid;
+ memcpy(&udh->grh.source_gid.raw, &sgid.raw,
+ sizeof(udh->grh.source_gid.raw));
+ } else {
+ /* IPv4 header */
+ u32 ipv4_addr;
+
+ udh->ip4.protocol = IPPROTO_UDP;
+ udh->ip4.tos = htonl(ah_attr->grh.flow_label);
+ udh->ip4.frag_off = htons(IP_DF);
+ udh->ip4.ttl = ah_attr->grh.hop_limit;
+
+ ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
+ udh->ip4.saddr = ipv4_addr;
+ ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw);
+ udh->ip4.daddr = ipv4_addr;
+ /* note: checksum is calculated by the device */
+ }
+
+ /* UDP */
+ if (has_udp) {
+ udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
+ udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
+ udh->udp.csum = 0;
+ /* UDP length is untouched hence is zero */
+ }
+ return 0;
+}
+
+static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_send_wr *swr,
+ struct qed_roce_ll2_packet **p_packet)
+{
+ u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
+ struct qed_roce_ll2_packet *packet;
+ struct pci_dev *pdev = dev->pdev;
+ int roce_mode, header_size;
+ struct ib_ud_header udh;
+ int i, rc;
+
+ *p_packet = NULL;
+
+ rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
+ if (rc)
+ return rc;
+
+ header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
+
+ packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+ if (!packet)
+ return -ENOMEM;
+
+ packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
+ &packet->header.baddr,
+ GFP_ATOMIC);
+ if (!packet->header.vaddr) {
+ kfree(packet);
+ return -ENOMEM;
+ }
+
+ if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
+ packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+ else
+ packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+
+ packet->roce_mode = roce_mode;
+ memcpy(packet->header.vaddr, ud_header_buffer, header_size);
+ packet->header.len = header_size;
+ packet->n_seg = swr->num_sge;
+ for (i = 0; i < packet->n_seg; i++) {
+ packet->payload[i].baddr = swr->sg_list[i].addr;
+ packet->payload[i].len = swr->sg_list[i].length;
+ }
+
+ *p_packet = packet;
+
+ return 0;
+}
+
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qed_roce_ll2_packet *pkt = NULL;
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_roce_ll2_tx_params params;
+ struct qedr_dev *dev = qp->dev;
+ unsigned long flags;
+ int rc;
+
+ if (qp->state != QED_ROCE_QP_STATE_RTS) {
+ *bad_wr = wr;
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
+ qp->state);
+ return -EINVAL;
+ }
+
+ if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
+ DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
+ wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (wr->opcode != IB_WR_SEND) {
+ DP_ERR(dev,
+ "gsi post send: failed due to unsupported opcode %d\n",
+ wr->opcode);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ memset(&params, 0, sizeof(params));
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
+ if (rc) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ goto err;
+ }
+
+ rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, &params);
+ if (!rc) {
+ qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+ qedr_inc_sw_prod(&qp->sq);
+ DP_DEBUG(qp->dev, QEDR_MSG_GSI,
+ "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
+ wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
+ } else {
+ if (rc == QED_ROCE_TX_HEAD_FAILURE) {
+ /* TX failed while posting header - release resources */
+ dma_free_coherent(&dev->pdev->dev, pkt->header.len,
+ pkt->header.vaddr, pkt->header.baddr);
+ kfree(pkt);
+ } else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
+ /* NTD since TX failed while posting a fragment. We will
+ * release the resources on TX callback
+ */
+ }
+
+ DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
+ rc = -EAGAIN;
+ *bad_wr = wr;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (wr->next) {
+ DP_ERR(dev,
+ "gsi post send: failed second WR. Only one WR may be passed at a time\n");
+ *bad_wr = wr->next;
+ rc = -EINVAL;
+ }
+
+ return rc;
+
+err:
+ *bad_wr = wr;
+ return rc;
+}
+
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_roce_ll2_buffer buf;
+ unsigned long flags;
+ int status = 0;
+ int rc;
+
+ if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
+ (qp->state != QED_ROCE_QP_STATE_RTS)) {
+ *bad_wr = wr;
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
+ qp->state);
+ return -EINVAL;
+ }
+
+ memset(&buf, 0, sizeof(buf));
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ while (wr) {
+ if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
+ wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
+ goto err;
+ }
+
+ buf.baddr = wr->sg_list[0].addr;
+ buf.len = wr->sg_list[0].length;
+
+ rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1);
+ if (rc) {
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer (rc=%d)\n",
+ rc);
+ goto err;
+ }
+
+ memset(&qp->rqe_wr_id[qp->rq.prod], 0,
+ sizeof(qp->rqe_wr_id[qp->rq.prod]));
+ qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
+ qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+
+ qedr_inc_sw_prod(&qp->rq);
+
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return status;
+err:
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ return -ENOMEM;
+}
+
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+ int i = 0;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
+ while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
+ memset(&wc[i], 0, sizeof(*wc));
+
+ wc[i].qp = &qp->ibqp;
+ wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+ wc[i].opcode = IB_WC_RECV;
+ wc[i].pkey_index = 0;
+ wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
+ IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
+ /* 0 - currently only one recv sg is supported */
+ wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
+ wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
+ ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
+ wc[i].wc_flags |= IB_WC_WITH_SMAC;
+ if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+ wc[i].wc_flags |= IB_WC_WITH_VLAN;
+ wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+ }
+
+ qedr_inc_sw_cons(&qp->rq);
+ i++;
+ }
+
+ while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
+ memset(&wc[i], 0, sizeof(*wc));
+
+ wc[i].qp = &qp->ibqp;
+ wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+ wc[i].opcode = IB_WC_SEND;
+ wc[i].status = IB_WC_SUCCESS;
+
+ qedr_inc_sw_cons(&qp->sq);
+ i++;
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
+ num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
+ qp->sq.gsi_cons, qp->ibqp.qp_num);
+
+ return i;
+}
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h
new file mode 100644
index 000000000000..9ba6e15cd93f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_cm.h
@@ -0,0 +1,61 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef LINUX_QEDR_CM_H_
+#define LINUX_QEDR_CM_H_
+
+#define QEDR_GSI_MAX_RECV_WR (4096)
+#define QEDR_GSI_MAX_SEND_WR (4096)
+
+#define QEDR_GSI_MAX_RECV_SGE (1) /* LL2 FW limitation */
+
+#define ETH_P_ROCE (0x8915)
+#define QEDR_ROCE_V2_UDP_SPORT (0000)
+
+static inline u32 qedr_get_ipv4_from_gid(u8 *gid)
+{
+ return *(u32 *)(void *)&gid[12];
+}
+
+/* RDMA CM */
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp);
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
+int qedr_destroy_gsi_qp(struct qedr_dev *dev);
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info);
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h
new file mode 100644
index 000000000000..66d27521373f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_hsi.h
@@ -0,0 +1,56 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_ROCE__
+#define __QED_HSI_ROCE__
+
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/roce_common.h>
+#include "qedr_hsi_rdma.h"
+
+/* Affiliated asynchronous events / errors enumeration */
+enum roce_async_events_type {
+ ROCE_ASYNC_EVENT_NONE = 0,
+ ROCE_ASYNC_EVENT_COMM_EST = 1,
+ ROCE_ASYNC_EVENT_SQ_DRAINED,
+ ROCE_ASYNC_EVENT_SRQ_LIMIT,
+ ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
+ ROCE_ASYNC_EVENT_CQ_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
+ ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
+ ROCE_ASYNC_EVENT_SRQ_EMPTY,
+ MAX_ROCE_ASYNC_EVENTS_TYPE
+};
+
+#endif /* __QED_HSI_ROCE__ */
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
new file mode 100644
index 000000000000..5c98d2055cad
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -0,0 +1,748 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_RDMA__
+#define __QED_HSI_RDMA__
+
+#include <linux/qed/rdma_common.h>
+
+/* rdma completion notification queue element */
+struct rdma_cnqe {
+ struct regpair cq_handle;
+};
+
+struct rdma_cqe_responder {
+ struct regpair srq_wr_id;
+ struct regpair qp_handle;
+ __le32 imm_data_or_inv_r_Key;
+ __le32 length;
+ __le32 imm_data_hi;
+ __le16 rq_cons;
+ u8 flags;
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_RESPONDER_TYPE_MASK 0x3
+#define RDMA_CQE_RESPONDER_TYPE_SHIFT 1
+#define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3
+#define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4
+#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5
+#define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3
+#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6
+ u8 status;
+};
+
+struct rdma_cqe_requester {
+ __le16 sq_cons;
+ __le16 reserved0;
+ __le32 reserved1;
+ struct regpair qp_handle;
+ struct regpair reserved2;
+ __le32 reserved3;
+ __le16 reserved4;
+ u8 flags;
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_REQUESTER_TYPE_MASK 0x3
+#define RDMA_CQE_REQUESTER_TYPE_SHIFT 1
+#define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F
+#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3
+ u8 status;
+};
+
+struct rdma_cqe_common {
+ struct regpair reserved0;
+ struct regpair qp_handle;
+ __le16 reserved1[7];
+ u8 flags;
+#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_COMMON_TYPE_MASK 0x3
+#define RDMA_CQE_COMMON_TYPE_SHIFT 1
+#define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F
+#define RDMA_CQE_COMMON_RESERVED2_SHIFT 3
+ u8 status;
+};
+
+/* rdma completion queue element */
+union rdma_cqe {
+ struct rdma_cqe_responder resp;
+ struct rdma_cqe_requester req;
+ struct rdma_cqe_common cmn;
+};
+
+/* * CQE requester status enumeration */
+enum rdma_cqe_requester_status_enum {
+ RDMA_CQE_REQ_STS_OK,
+ RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
+ RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
+ RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
+ RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
+ MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
+};
+
+/* CQE responder status enumeration */
+enum rdma_cqe_responder_status_enum {
+ RDMA_CQE_RESP_STS_OK,
+ RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
+ RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
+ RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
+ RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
+ MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
+};
+
+/* CQE type enumeration */
+enum rdma_cqe_type {
+ RDMA_CQE_TYPE_REQUESTER,
+ RDMA_CQE_TYPE_RESPONDER_RQ,
+ RDMA_CQE_TYPE_RESPONDER_SRQ,
+ RDMA_CQE_TYPE_INVALID,
+ MAX_RDMA_CQE_TYPE
+};
+
+struct rdma_sq_sge {
+ __le32 length;
+ struct regpair addr;
+ __le32 l_key;
+};
+
+struct rdma_rq_sge {
+ struct regpair addr;
+ __le32 length;
+ __le32 flags;
+#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF
+#define RDMA_RQ_SGE_L_KEY_SHIFT 0
+#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7
+#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26
+#define RDMA_RQ_SGE_RESERVED0_MASK 0x7
+#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
+};
+
+struct rdma_srq_sge {
+ struct regpair addr;
+ __le32 length;
+ __le32 l_key;
+};
+
+/* Rdma doorbell data for SQ and RQ */
+struct rdma_pwm_val16_data {
+ __le16 icid;
+ __le16 value;
+};
+
+union rdma_pwm_val16_data_union {
+ struct rdma_pwm_val16_data as_struct;
+ __le32 as_dword;
+};
+
+/* Rdma doorbell data for CQ */
+struct rdma_pwm_val32_data {
+ __le16 icid;
+ u8 agg_flags;
+ u8 params;
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
+ __le32 value;
+};
+
+/* DIF Block size options */
+enum rdma_dif_block_size {
+ RDMA_DIF_BLOCK_512 = 0,
+ RDMA_DIF_BLOCK_4096 = 1,
+ MAX_RDMA_DIF_BLOCK_SIZE
+};
+
+/* DIF CRC initial value */
+enum rdma_dif_crc_seed {
+ RDMA_DIF_CRC_SEED_0000 = 0,
+ RDMA_DIF_CRC_SEED_FFFF = 1,
+ MAX_RDMA_DIF_CRC_SEED
+};
+
+/* RDMA DIF Error Result Structure */
+struct rdma_dif_error_result {
+ __le32 error_intervals;
+ __le32 dif_error_1st_interval;
+ u8 flags;
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT 0
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK 0xF
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT 3
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT 7
+ u8 reserved1[55];
+};
+
+/* DIF IO direction */
+enum rdma_dif_io_direction_flg {
+ RDMA_DIF_DIR_RX = 0,
+ RDMA_DIF_DIR_TX = 1,
+ MAX_RDMA_DIF_IO_DIRECTION_FLG
+};
+
+/* RDMA DIF Runt Result Structure */
+struct rdma_dif_runt_result {
+ __le16 guard_tag;
+ __le16 reserved[3];
+};
+
+/* Memory window type enumeration */
+enum rdma_mw_type {
+ RDMA_MW_TYPE_1,
+ RDMA_MW_TYPE_2A,
+ MAX_RDMA_MW_TYPE
+};
+
+struct rdma_sq_atomic_wqe {
+ __le32 reserved1;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ struct regpair remote_va;
+ __le32 r_key;
+ __le32 reserved2;
+ struct regpair cmp_data;
+ struct regpair swap_data;
+};
+
+/* First element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_1st {
+ __le32 reserved1;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_2nd {
+ struct regpair remote_va;
+ __le32 r_key;
+ __le32 reserved2;
+};
+
+/* Third element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_3rd {
+ struct regpair cmp_data;
+ struct regpair swap_data;
+};
+
+struct rdma_sq_bind_wqe {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7
+#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
+#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F
+#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2
+ u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_BIND_WQE_RESERVED2_MASK 0x7
+#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ __le32 parent_l_key;
+ __le32 reserved4;
+};
+
+/* First element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_1st {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_2nd {
+ u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2
+ u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK 0x7
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ __le32 parent_l_key;
+ __le32 reserved4;
+};
+
+/* Structure with only the SQ WQE common
+ * fields. Size is of one SQ element (16B)
+ */
+struct rdma_sq_common_wqe {
+ __le32 reserved1[3];
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK 0x7
+#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_fmr_wqe {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT 5
+#define RDMA_SQ_FMR_WQE_BIND_EN_MASK 0x1
+#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT 6
+#define RDMA_SQ_FMR_WQE_RESERVED1_MASK 0x1
+#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT 7
+ u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_FMR_WQE_RESERVED2_MASK 0x7
+#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ struct regpair pbl_addr;
+ __le32 dif_base_ref_tag;
+ __le16 dif_app_tag;
+ __le16 dif_app_tag_mask;
+ __le16 dif_runt_crc_value;
+ __le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
+ __le32 Reserved5;
+};
+
+/* First element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_1st {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK 0x3
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_2nd {
+ u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT 5
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT 6
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT 7
+ u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK 0x7
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ struct regpair pbl_addr;
+};
+
+/* Third element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_3rd {
+ __le32 dif_base_ref_tag;
+ __le16 dif_app_tag;
+ __le16 dif_app_tag_mask;
+ __le16 dif_runt_crc_value;
+ __le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
+ __le32 Reserved5;
+};
+
+struct rdma_sq_local_inv_wqe {
+ struct regpair reserved;
+ __le32 inv_l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_rdma_wqe {
+ __le32 imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ struct regpair remote_va;
+ __le32 r_key;
+ u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F
+#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3
+ u8 reserved2[3];
+};
+
+/* First element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_1st {
+ __le32 imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x3
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_2nd {
+ struct regpair remote_va;
+ __le32 r_key;
+ u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK 0x1F
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT 3
+ u8 reserved2[3];
+};
+
+/* SQ WQE req type enumeration */
+enum rdma_sq_req_type {
+ RDMA_SQ_REQ_TYPE_SEND,
+ RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
+ RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
+ RDMA_SQ_REQ_TYPE_RDMA_WR,
+ RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
+ RDMA_SQ_REQ_TYPE_RDMA_RD,
+ RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
+ RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
+ RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
+ RDMA_SQ_REQ_TYPE_FAST_MR,
+ RDMA_SQ_REQ_TYPE_BIND,
+ RDMA_SQ_REQ_TYPE_INVALID,
+ MAX_RDMA_SQ_REQ_TYPE
+};
+
+struct rdma_sq_send_wqe {
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_SEND_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ __le32 reserved1[4];
+};
+
+struct rdma_sq_send_wqe_1st {
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_send_wqe_2st {
+ __le32 reserved1[4];
+};
+
+#endif /* __QED_HSI_RDMA__ */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
new file mode 100644
index 000000000000..a61514296767
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -0,0 +1,3547 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <linux/iommu.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include "qedr_hsi.h"
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_cm.h"
+
+#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+
+int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+ if (index > QEDR_ROCE_PKEY_TABLE_LEN)
+ return -EINVAL;
+
+ *pkey = QEDR_ROCE_PKEY_DEFAULT;
+ return 0;
+}
+
+int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *sgid)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ int rc = 0;
+
+ if (!rdma_cap_roce_gid_table(ibdev, port))
+ return -ENODEV;
+
+ rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
+ if (rc == -EAGAIN) {
+ memcpy(sgid, &zgid, sizeof(*sgid));
+ return 0;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
+ sgid->global.interface_id, sgid->global.subnet_prefix);
+
+ return rc;
+}
+
+int qedr_add_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context)
+{
+ if (!rdma_cap_roce_gid_table(device, port_num))
+ return -EINVAL;
+
+ if (port_num > QEDR_MAX_PORT)
+ return -EINVAL;
+
+ if (!context)
+ return -EINVAL;
+
+ return 0;
+}
+
+int qedr_del_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, void **context)
+{
+ if (!rdma_cap_roce_gid_table(device, port_num))
+ return -EINVAL;
+
+ if (port_num > QEDR_MAX_PORT)
+ return -EINVAL;
+
+ if (!context)
+ return -EINVAL;
+
+ return 0;
+}
+
+int qedr_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev,
+ "qedr_query_device called with invalid params rdma_ctx=%p\n",
+ dev->rdma_ctx);
+ return -EINVAL;
+ }
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->fw_ver = qattr->fw_ver;
+ attr->sys_image_guid = qattr->sys_image_guid;
+ attr->max_mr_size = qattr->max_mr_size;
+ attr->page_size_cap = qattr->page_size_caps;
+ attr->vendor_id = qattr->vendor_id;
+ attr->vendor_part_id = qattr->vendor_part_id;
+ attr->hw_ver = qattr->hw_ver;
+ attr->max_qp = qattr->max_qp;
+ attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
+ attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
+ IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
+
+ attr->max_sge = qattr->max_sge;
+ attr->max_sge_rd = qattr->max_sge;
+ attr->max_cq = qattr->max_cq;
+ attr->max_cqe = qattr->max_cqe;
+ attr->max_mr = qattr->max_mr;
+ attr->max_mw = qattr->max_mw;
+ attr->max_pd = qattr->max_pd;
+ attr->atomic_cap = dev->atomic_cap;
+ attr->max_fmr = qattr->max_fmr;
+ attr->max_map_per_fmr = 16;
+ attr->max_qp_init_rd_atom =
+ 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
+ attr->max_qp_rd_atom =
+ min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
+ attr->max_qp_init_rd_atom);
+
+ attr->max_srq = qattr->max_srq;
+ attr->max_srq_sge = qattr->max_srq_sge;
+ attr->max_srq_wr = qattr->max_srq_wr;
+
+ attr->local_ca_ack_delay = qattr->dev_ack_delay;
+ attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
+ attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
+ attr->max_ah = qattr->max_ah;
+
+ return 0;
+}
+
+#define QEDR_SPEED_SDR (1)
+#define QEDR_SPEED_DDR (2)
+#define QEDR_SPEED_QDR (4)
+#define QEDR_SPEED_FDR10 (8)
+#define QEDR_SPEED_FDR (16)
+#define QEDR_SPEED_EDR (32)
+
+static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+ u8 *ib_width)
+{
+ switch (speed) {
+ case 1000:
+ *ib_speed = QEDR_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+ case 10000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 20000:
+ *ib_speed = QEDR_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 25000:
+ *ib_speed = QEDR_SPEED_EDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 40000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 50000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 100000:
+ *ib_speed = QEDR_SPEED_EDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+ *ib_speed = QEDR_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ }
+}
+
+int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
+{
+ struct qedr_dev *dev;
+ struct qed_rdma_port *rdma_port;
+
+ dev = get_qedr_dev(ibdev);
+ if (port > 1) {
+ DP_ERR(dev, "invalid_port=0x%x\n", port);
+ return -EINVAL;
+ }
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev, "rdma_ctx is NULL\n");
+ return -EINVAL;
+ }
+
+ rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
+ memset(attr, 0, sizeof(*attr));
+
+ if (rdma_port->port_state == QED_RDMA_PORT_UP) {
+ attr->state = IB_PORT_ACTIVE;
+ attr->phys_state = 5;
+ } else {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = 3;
+ }
+ attr->max_mtu = IB_MTU_4096;
+ attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
+ attr->lid = 0;
+ attr->lmc = 0;
+ attr->sm_lid = 0;
+ attr->sm_sl = 0;
+ attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
+ attr->gid_tbl_len = QEDR_MAX_SGID;
+ attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+ attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
+ attr->qkey_viol_cntr = 0;
+ get_link_speed_and_width(rdma_port->link_speed,
+ &attr->active_speed, &attr->active_width);
+ attr->max_msg_sz = rdma_port->max_msg_size;
+ attr->max_vl_num = 4;
+
+ return 0;
+}
+
+int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
+ struct ib_port_modify *props)
+{
+ struct qedr_dev *dev;
+
+ dev = get_qedr_dev(ibdev);
+ if (port > 1) {
+ DP_ERR(dev, "invalid_port=0x%x\n", port);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+ unsigned long len)
+{
+ struct qedr_mm *mm;
+
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+ if (!mm)
+ return -ENOMEM;
+
+ mm->key.phy_addr = phy_addr;
+ /* This function might be called with a length which is not a multiple
+ * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
+ * forces this granularity by increasing the requested size if needed.
+ * When qedr_mmap is called, it will search the list with the updated
+ * length as a key. To prevent search failures, the length is rounded up
+ * in advance to PAGE_SIZE.
+ */
+ mm->key.len = roundup(len, PAGE_SIZE);
+ INIT_LIST_HEAD(&mm->entry);
+
+ mutex_lock(&uctx->mm_list_lock);
+ list_add(&mm->entry, &uctx->mm_head);
+ mutex_unlock(&uctx->mm_list_lock);
+
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+ (unsigned long long)mm->key.phy_addr,
+ (unsigned long)mm->key.len, uctx);
+
+ return 0;
+}
+
+static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+ unsigned long len)
+{
+ bool found = false;
+ struct qedr_mm *mm;
+
+ mutex_lock(&uctx->mm_list_lock);
+ list_for_each_entry(mm, &uctx->mm_head, entry) {
+ if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+ continue;
+
+ found = true;
+ break;
+ }
+ mutex_unlock(&uctx->mm_list_lock);
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
+ mm->key.phy_addr, mm->key.len, uctx, found);
+
+ return found;
+}
+
+struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ int rc;
+ struct qedr_ucontext *ctx;
+ struct qedr_alloc_ucontext_resp uresp;
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qed_rdma_add_user_out_params oparams;
+
+ if (!udata)
+ return ERR_PTR(-EFAULT);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
+ if (rc) {
+ DP_ERR(dev,
+ "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
+ rc);
+ goto err;
+ }
+
+ ctx->dpi = oparams.dpi;
+ ctx->dpi_addr = oparams.dpi_addr;
+ ctx->dpi_phys_addr = oparams.dpi_phys_addr;
+ ctx->dpi_size = oparams.dpi_size;
+ INIT_LIST_HEAD(&ctx->mm_head);
+ mutex_init(&ctx->mm_list_lock);
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.db_pa = ctx->dpi_phys_addr;
+ uresp.db_size = ctx->dpi_size;
+ uresp.max_send_wr = dev->attr.max_sqe;
+ uresp.max_recv_wr = dev->attr.max_rqe;
+ uresp.max_srq_wr = dev->attr.max_srq_wr;
+ uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+ uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
+ uresp.max_cqes = QEDR_MAX_CQES;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ goto err;
+
+ ctx->dev = dev;
+
+ rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
+ if (rc)
+ goto err;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
+ &ctx->ibucontext);
+ return &ctx->ibucontext;
+
+err:
+ kfree(ctx);
+ return ERR_PTR(rc);
+}
+
+int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
+ struct qedr_mm *mm, *tmp;
+ int status = 0;
+
+ DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
+ uctx);
+ uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
+
+ list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+ mm->key.phy_addr, mm->key.len, uctx);
+ list_del(&mm->entry);
+ kfree(mm);
+ }
+
+ kfree(uctx);
+ return status;
+}
+
+int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
+ struct qedr_dev *dev = get_qedr_dev(context->device);
+ unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
+ u64 unmapped_db = dev->db_phys_addr;
+ unsigned long len = (vma->vm_end - vma->vm_start);
+ int rc = 0;
+ bool found;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
+ vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
+ if (vma->vm_start & (PAGE_SIZE - 1)) {
+ DP_ERR(dev, "Vma_start not page aligned = %ld\n",
+ vma->vm_start);
+ return -EINVAL;
+ }
+
+ found = qedr_search_mmap(ucontext, vm_page, len);
+ if (!found) {
+ DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
+ vma->vm_pgoff);
+ return -EINVAL;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+
+ if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
+ dev->db_size))) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+ if (vma->vm_flags & VM_READ) {
+ DP_ERR(dev, "Trying to map doorbell bar for read\n");
+ return -EPERM;
+ }
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ PAGE_SIZE, vma->vm_page_prot);
+ } else {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
+ rc = remap_pfn_range(vma, vma->vm_start,
+ vma->vm_pgoff, len, vma->vm_page_prot);
+ }
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
+ return rc;
+}
+
+struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qedr_ucontext *uctx = NULL;
+ struct qedr_alloc_pd_uresp uresp;
+ struct qedr_pd *pd;
+ u16 pd_id;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
+ (udata && context) ? "User Lib" : "Kernel");
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev, "invlaid RDMA context\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+
+ uresp.pd_id = pd_id;
+ pd->pd_id = pd_id;
+
+ if (udata && context) {
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
+ uctx = get_qedr_ucontext(context);
+ uctx->pd = pd;
+ pd->uctx = uctx;
+ }
+
+ return &pd->ibpd;
+}
+
+int qedr_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+
+ if (!pd)
+ pr_err("Invalid PD received in dealloc_pd\n");
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
+
+ kfree(pd);
+
+ return 0;
+}
+
+static void qedr_free_pbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int i;
+
+ for (i = 0; i < pbl_info->num_pbls; i++) {
+ if (!pbl[i].va)
+ continue;
+ dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+ pbl[i].va, pbl[i].pa);
+ }
+
+ kfree(pbl);
+}
+
+#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
+#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
+
+#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
+#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
+#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
+
+static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info,
+ gfp_t flags)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct qedr_pbl *pbl_table;
+ dma_addr_t *pbl_main_tbl;
+ dma_addr_t pa;
+ void *va;
+ int i;
+
+ pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
+ if (!pbl_table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < pbl_info->num_pbls; i++) {
+ va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
+ &pa, flags);
+ if (!va)
+ goto err;
+
+ memset(va, 0, pbl_info->pbl_size);
+ pbl_table[i].va = va;
+ pbl_table[i].pa = pa;
+ }
+
+ /* Two-Layer PBLs, if we have more than one pbl we need to initialize
+ * the first one with physical pointers to all of the rest
+ */
+ pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
+ for (i = 0; i < pbl_info->num_pbls - 1; i++)
+ pbl_main_tbl[i] = pbl_table[i + 1].pa;
+
+ return pbl_table;
+
+err:
+ for (i--; i >= 0; i--)
+ dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+ pbl_table[i].va, pbl_table[i].pa);
+
+ qedr_free_pbl(dev, pbl_info, pbl_table);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info,
+ u32 num_pbes, int two_layer_capable)
+{
+ u32 pbl_capacity;
+ u32 pbl_size;
+ u32 num_pbls;
+
+ if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
+ if (num_pbes > MAX_PBES_TWO_LAYER) {
+ DP_ERR(dev, "prepare pbl table: too many pages %d\n",
+ num_pbes);
+ return -EINVAL;
+ }
+
+ /* calculate required pbl page size */
+ pbl_size = MIN_FW_PBL_PAGE_SIZE;
+ pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
+ NUM_PBES_ON_PAGE(pbl_size);
+
+ while (pbl_capacity < num_pbes) {
+ pbl_size *= 2;
+ pbl_capacity = pbl_size / sizeof(u64);
+ pbl_capacity = pbl_capacity * pbl_capacity;
+ }
+
+ num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
+ num_pbls++; /* One for the layer0 ( points to the pbls) */
+ pbl_info->two_layered = true;
+ } else {
+ /* One layered PBL */
+ num_pbls = 1;
+ pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
+ roundup_pow_of_two((num_pbes * sizeof(u64))));
+ pbl_info->two_layered = false;
+ }
+
+ pbl_info->num_pbls = num_pbls;
+ pbl_info->pbl_size = pbl_size;
+ pbl_info->num_pbes = num_pbes;
+
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
+ pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
+
+ return 0;
+}
+
+static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
+ struct qedr_pbl *pbl,
+ struct qedr_pbl_info *pbl_info)
+{
+ int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+ struct qedr_pbl *pbl_tbl;
+ struct scatterlist *sg;
+ struct regpair *pbe;
+ int entry;
+ u32 addr;
+
+ if (!pbl_info->num_pbes)
+ return;
+
+ /* If we have a two layered pbl, the first pbl points to the rest
+ * of the pbls and the first entry lays on the second pbl in the table
+ */
+ if (pbl_info->two_layered)
+ pbl_tbl = &pbl[1];
+ else
+ pbl_tbl = pbl;
+
+ pbe = (struct regpair *)pbl_tbl->va;
+ if (!pbe) {
+ DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
+ return;
+ }
+
+ pbe_cnt = 0;
+
+ shift = ilog2(umem->page_size);
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ pages = sg_dma_len(sg) >> shift;
+ for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
+ /* store the page address in pbe */
+ pbe->lo = cpu_to_le32(sg_dma_address(sg) +
+ umem->page_size * pg_cnt);
+ addr = upper_32_bits(sg_dma_address(sg) +
+ umem->page_size * pg_cnt);
+ pbe->hi = cpu_to_le32(addr);
+ pbe_cnt++;
+ total_num_pbes++;
+ pbe++;
+
+ if (total_num_pbes == pbl_info->num_pbes)
+ return;
+
+ /* If the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct regpair *)pbl_tbl->va;
+ pbe_cnt = 0;
+ }
+ }
+ }
+}
+
+static int qedr_copy_cq_uresp(struct qedr_dev *dev,
+ struct qedr_cq *cq, struct ib_udata *udata)
+{
+ struct qedr_create_cq_uresp uresp;
+ int rc;
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ uresp.icid = cq->icid;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
+
+ return rc;
+}
+
+static void consume_cqe(struct qedr_cq *cq)
+{
+ if (cq->latest_cqe == cq->toggle_cqe)
+ cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+
+ cq->latest_cqe = qed_chain_consume(&cq->pbl);
+}
+
+static inline int qedr_align_cq_entries(int entries)
+{
+ u64 size, aligned_size;
+
+ /* We allocate an extra entry that we don't report to the FW. */
+ size = (entries + 1) * QEDR_CQE_SIZE;
+ aligned_size = ALIGN(size, PAGE_SIZE);
+
+ return aligned_size / QEDR_CQE_SIZE;
+}
+
+static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
+ struct qedr_dev *dev,
+ struct qedr_userq *q,
+ u64 buf_addr, size_t buf_len,
+ int access, int dmasync)
+{
+ int page_cnt;
+ int rc;
+
+ q->buf_addr = buf_addr;
+ q->buf_len = buf_len;
+ q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
+ if (IS_ERR(q->umem)) {
+ DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
+ PTR_ERR(q->umem));
+ return PTR_ERR(q->umem);
+ }
+
+ page_cnt = ib_umem_page_count(q->umem);
+ rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
+ if (rc)
+ goto err0;
+
+ q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(q->pbl_tbl))
+ goto err0;
+
+ qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
+
+ return 0;
+
+err0:
+ ib_umem_release(q->umem);
+
+ return rc;
+}
+
+static inline void qedr_init_cq_params(struct qedr_cq *cq,
+ struct qedr_ucontext *ctx,
+ struct qedr_dev *dev, int vector,
+ int chain_entries, int page_cnt,
+ u64 pbl_ptr,
+ struct qed_rdma_create_cq_in_params
+ *params)
+{
+ memset(params, 0, sizeof(*params));
+ params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
+ params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
+ params->cnq_id = vector;
+ params->cq_size = chain_entries - 1;
+ params->dpi = (ctx) ? ctx->dpi : dev->dpi;
+ params->pbl_num_pages = page_cnt;
+ params->pbl_ptr = pbl_ptr;
+ params->pbl_two_level = 0;
+}
+
+static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
+{
+ /* Flush data before signalling doorbell */
+ wmb();
+ cq->db.data.agg_flags = flags;
+ cq->db.data.value = cpu_to_le32(cons);
+ writeq(cq->db.raw, cq->db_addr);
+
+ /* Make sure write would stick */
+ mmiowb();
+}
+
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ unsigned long sflags;
+
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ return 0;
+
+ spin_lock_irqsave(&cq->cq_lock, sflags);
+
+ cq->arm_flags = 0;
+
+ if (flags & IB_CQ_SOLICITED)
+ cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
+
+ if (flags & IB_CQ_NEXT_COMP)
+ cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
+
+ doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+ spin_unlock_irqrestore(&cq->cq_lock, sflags);
+
+ return 0;
+}
+
+struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *ib_ctx, struct ib_udata *udata)
+{
+ struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
+ struct qed_rdma_destroy_cq_out_params destroy_oparams;
+ struct qed_rdma_destroy_cq_in_params destroy_iparams;
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qed_rdma_create_cq_in_params params;
+ struct qedr_create_cq_ureq ureq;
+ int vector = attr->comp_vector;
+ int entries = attr->cqe;
+ struct qedr_cq *cq;
+ int chain_entries;
+ int page_cnt;
+ u64 pbl_ptr;
+ u16 icid;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "create_cq: called from %s. entries=%d, vector=%d\n",
+ udata ? "User Lib" : "Kernel", entries, vector);
+
+ if (entries > QEDR_MAX_CQES) {
+ DP_ERR(dev,
+ "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
+ entries, QEDR_MAX_CQES);
+ return ERR_PTR(-EINVAL);
+ }
+
+ chain_entries = qedr_align_cq_entries(entries);
+ chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return ERR_PTR(-ENOMEM);
+
+ if (udata) {
+ memset(&ureq, 0, sizeof(ureq));
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ DP_ERR(dev,
+ "create cq: problem copying data from user space\n");
+ goto err0;
+ }
+
+ if (!ureq.len) {
+ DP_ERR(dev,
+ "create cq: cannot create a cq with 0 entries\n");
+ goto err0;
+ }
+
+ cq->cq_type = QEDR_CQ_TYPE_USER;
+
+ rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
+ ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
+ if (rc)
+ goto err0;
+
+ pbl_ptr = cq->q.pbl_tbl->pa;
+ page_cnt = cq->q.pbl_info.num_pbes;
+ } else {
+ cq->cq_type = QEDR_CQ_TYPE_KERNEL;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ chain_entries,
+ sizeof(union rdma_cqe),
+ &cq->pbl);
+ if (rc)
+ goto err1;
+
+ page_cnt = qed_chain_get_page_cnt(&cq->pbl);
+ pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+ }
+
+ qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
+ pbl_ptr, &params);
+
+ rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
+ if (rc)
+ goto err2;
+
+ cq->icid = icid;
+ cq->sig = QEDR_CQ_MAGIC_NUMBER;
+ spin_lock_init(&cq->cq_lock);
+
+ if (ib_ctx) {
+ rc = qedr_copy_cq_uresp(dev, cq, udata);
+ if (rc)
+ goto err3;
+ } else {
+ /* Generate doorbell address. */
+ cq->db_addr = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ cq->db.data.icid = cq->icid;
+ cq->db.data.params = DB_AGG_CMD_SET <<
+ RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
+
+ /* point to the very last element, passing it we will toggle */
+ cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
+ cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+ cq->latest_cqe = NULL;
+ consume_cqe(cq);
+ cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
+ cq->icid, cq, params.cq_size);
+
+ return &cq->ibcq;
+
+err3:
+ destroy_iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
+ &destroy_oparams);
+err2:
+ if (udata)
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+ else
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+err1:
+ if (udata)
+ ib_umem_release(cq->q.umem);
+err0:
+ kfree(cq);
+ return ERR_PTR(-EINVAL);
+}
+
+int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+ DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
+
+ return 0;
+}
+
+int qedr_destroy_cq(struct ib_cq *ibcq)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qed_rdma_destroy_cq_out_params oparams;
+ struct qed_rdma_destroy_cq_in_params iparams;
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+ DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
+
+ /* GSIs CQs are handled by driver, so they don't exist in the FW */
+ if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
+
+ if (ibcq->uobject && ibcq->uobject->context) {
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+ ib_umem_release(cq->q.umem);
+ }
+
+ kfree(cq);
+
+ return 0;
+}
+
+static inline int get_gid_info_from_table(struct ib_qp *ibqp,
+ struct ib_qp_attr *attr,
+ int attr_mask,
+ struct qed_rdma_modify_qp_in_params
+ *qp_params)
+{
+ enum rdma_network_type nw_type;
+ struct ib_gid_attr gid_attr;
+ union ib_gid gid;
+ u32 ipv4_addr;
+ int rc = 0;
+ int i;
+
+ rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
+ attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
+ if (rc)
+ return rc;
+
+ if (!memcmp(&gid, &zgid, sizeof(gid)))
+ return -ENOENT;
+
+ if (gid_attr.ndev) {
+ qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
+
+ dev_put(gid_attr.ndev);
+ nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
+ switch (nw_type) {
+ case RDMA_NETWORK_IPV6:
+ memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ sizeof(qp_params->sgid));
+ memcpy(&qp_params->dgid.bytes[0],
+ &attr->ah_attr.grh.dgid,
+ sizeof(qp_params->dgid));
+ qp_params->roce_mode = ROCE_V2_IPV6;
+ SET_FIELD(qp_params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+ break;
+ case RDMA_NETWORK_IB:
+ memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ sizeof(qp_params->sgid));
+ memcpy(&qp_params->dgid.bytes[0],
+ &attr->ah_attr.grh.dgid,
+ sizeof(qp_params->dgid));
+ qp_params->roce_mode = ROCE_V1;
+ break;
+ case RDMA_NETWORK_IPV4:
+ memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
+ memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
+ ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
+ qp_params->sgid.ipv4_addr = ipv4_addr;
+ ipv4_addr =
+ qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
+ qp_params->dgid.ipv4_addr = ipv4_addr;
+ SET_FIELD(qp_params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+ qp_params->roce_mode = ROCE_V2_IPV4;
+ break;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
+ qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
+ }
+
+ if (qp_params->vlan_id >= VLAN_CFI_MASK)
+ qp_params->vlan_id = 0;
+
+ return 0;
+}
+
+static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
+ ib_umem_release(qp->usq.umem);
+}
+
+static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
+ ib_umem_release(qp->urq.umem);
+}
+
+static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+ kfree(qp->wqe_wr_id);
+}
+
+static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+ kfree(qp->rqe_wr_id);
+}
+
+static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ /* QP0... attrs->qp_type == IB_QPT_GSI */
+ if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: unsupported qp type=0x%x requested\n",
+ attrs->qp_type);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_wr > qattr->max_sqe) {
+ DP_ERR(dev,
+ "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
+ attrs->cap.max_send_wr, qattr->max_sqe);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_inline_data > qattr->max_inline) {
+ DP_ERR(dev,
+ "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
+ attrs->cap.max_inline_data, qattr->max_inline);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
+ attrs->cap.max_send_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_recv_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
+ attrs->cap.max_recv_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ /* Unprivileged user space cannot create special QP */
+ if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+ DP_ERR(dev,
+ "create qp: userspace can't create special QPs of type=0x%x\n",
+ attrs->qp_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
+{
+ uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ uresp->rq_icid = qp->icid;
+}
+
+static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
+{
+ uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ uresp->sq_icid = qp->icid + 1;
+}
+
+static int qedr_copy_qp_uresp(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_udata *udata)
+{
+ struct qedr_create_qp_uresp uresp;
+ int rc;
+
+ memset(&uresp, 0, sizeof(uresp));
+ qedr_copy_sq_uresp(&uresp, qp);
+ qedr_copy_rq_uresp(&uresp, qp);
+
+ uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
+ uresp.qp_id = qp->qp_id;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev,
+ "create qp: failed a copy to user space with qp icid=0x%x.\n",
+ qp->icid);
+
+ return rc;
+}
+
+static void qedr_set_qp_init_params(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qedr_pd *pd,
+ struct ib_qp_init_attr *attrs)
+{
+ qp->pd = pd;
+
+ spin_lock_init(&qp->q_lock);
+
+ qp->qp_type = attrs->qp_type;
+ qp->max_inline_data = attrs->cap.max_inline_data;
+ qp->sq.max_sges = attrs->cap.max_send_sge;
+ qp->state = QED_ROCE_QP_STATE_RESET;
+ qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+ qp->sq_cq = get_qedr_cq(attrs->send_cq);
+ qp->rq_cq = get_qedr_cq(attrs->recv_cq);
+ qp->dev = dev;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
+ pd->pd_id, qp->qp_type, qp->max_inline_data,
+ qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
+ qp->sq.max_sges, qp->sq_cq->icid);
+ qp->rq.max_sges = attrs->cap.max_recv_sge;
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
+ qp->rq.max_sges, qp->rq_cq->icid);
+}
+
+static inline void
+qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
+ struct qedr_create_qp_ureq *ureq)
+{
+ /* QP handle to be written in CQE */
+ params->qp_handle_lo = ureq->qp_handle_lo;
+ params->qp_handle_hi = ureq->qp_handle_hi;
+}
+
+static inline void
+qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qp->sq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ qp->sq.db_data.data.icid = qp->icid + 1;
+}
+
+static inline void
+qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qp->rq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ qp->rq.db_data.data.icid = qp->icid;
+}
+
+static inline int
+qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
+{
+ /* Allocate driver internal RQ array */
+ qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->rqe_wr_id)
+ return -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
+
+ return 0;
+}
+
+static inline int
+qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ u32 temp_max_wr;
+
+ /* Allocate driver internal SQ array */
+ temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
+ temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
+
+ /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
+ qp->sq.max_wr = (u16)temp_max_wr;
+ qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->wqe_wr_id)
+ return -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
+
+ /* QP handle to be written in CQE */
+ params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
+ params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
+
+ return 0;
+}
+
+static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ u32 n_sq_elems, n_sq_entries;
+ int rc;
+
+ /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
+ * the ring. The ring should allow at least a single WR, even if the
+ * user requested none, due to allocation issues.
+ */
+ n_sq_entries = attrs->cap.max_send_wr;
+ n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
+ n_sq_entries = max_t(u32, n_sq_entries, 1);
+ n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_sq_elems,
+ QEDR_SQE_ELEMENT_SIZE,
+ &qp->sq.pbl);
+ if (rc) {
+ DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
+ return rc;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_SQ,
+ "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
+ qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
+ n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
+ return 0;
+}
+
+static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ u32 n_rq_elems, n_rq_entries;
+ int rc;
+
+ /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
+ * the ring. There ring should allow at least a single WR, even if the
+ * user requested none, due to allocation issues.
+ */
+ n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
+ n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_rq_elems,
+ QEDR_RQE_ELEMENT_SIZE,
+ &qp->rq.pbl);
+
+ if (rc) {
+ DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
+ return -ENOMEM;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_RQ,
+ "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
+ qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
+ n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
+
+ /* n_rq_entries < u16 so the casting is safe */
+ qp->rq.max_wr = (u16)n_rq_entries;
+
+ return 0;
+}
+
+static inline void
+qedr_init_qp_in_params_sq(struct qedr_dev *dev,
+ struct qedr_pd *pd,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ /* QP handle to be written in an async event */
+ params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
+ params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
+
+ params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
+ params->fmr_and_reserved_lkey = !udata;
+ params->pd = pd->pd_id;
+ params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
+ params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
+ params->max_sq_sges = 0;
+ params->stats_queue = 0;
+
+ if (udata) {
+ params->sq_num_pages = qp->usq.pbl_info.num_pbes;
+ params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
+ } else {
+ params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
+ params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
+ }
+}
+
+static inline void
+qedr_init_qp_in_params_rq(struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+ params->srq_id = 0;
+ params->use_srq = false;
+
+ if (udata) {
+ params->rq_num_pages = qp->urq.pbl_info.num_pbes;
+ params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+ } else {
+ params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
+ params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
+ }
+}
+
+static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
+ qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
+ qp->urq.buf_len);
+}
+
+static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
+ struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qedr_create_qp_ureq *ureq)
+{
+ int rc;
+
+ /* SQ - read access only (0), dma sync not required (0) */
+ rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
+ ureq->sq_len, 0, 0);
+ if (rc)
+ return rc;
+
+ /* RQ - read access only (0), dma sync not required (0) */
+ rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
+ ureq->rq_len, 0, 0);
+
+ if (rc)
+ qedr_cleanup_user_sq(dev, qp);
+ return rc;
+}
+
+static inline int
+qedr_init_kernel_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ int rc;
+
+ rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
+ if (rc) {
+ DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
+ if (rc) {
+ dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+ DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
+ if (rc) {
+ qedr_cleanup_kernel_sq(dev, qp);
+ DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
+ if (rc) {
+ DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
+ qedr_cleanup_kernel_sq(dev, qp);
+ dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+ return rc;
+ }
+
+ return rc;
+}
+
+struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qed_rdma_create_qp_out_params out_params;
+ struct qed_rdma_create_qp_in_params in_params;
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct ib_ucontext *ib_ctx = NULL;
+ struct qedr_ucontext *ctx = NULL;
+ struct qedr_create_qp_ureq ureq;
+ struct qedr_qp *qp;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
+ udata ? "user library" : "kernel", pd);
+
+ rc = qedr_check_qp_attrs(ibpd, dev, attrs);
+ if (rc)
+ return ERR_PTR(rc);
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ if (attrs->srq)
+ return ERR_PTR(-EINVAL);
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
+ get_qedr_cq(attrs->send_cq),
+ get_qedr_cq(attrs->send_cq)->icid,
+ get_qedr_cq(attrs->recv_cq),
+ get_qedr_cq(attrs->recv_cq)->icid);
+
+ qedr_set_qp_init_params(dev, qp, pd, attrs);
+
+ if (attrs->qp_type == IB_QPT_GSI) {
+ if (udata) {
+ DP_ERR(dev,
+ "create qp: unexpected udata when creating GSI QP\n");
+ goto err0;
+ }
+ return qedr_create_gsi_qp(dev, attrs, qp);
+ }
+
+ memset(&in_params, 0, sizeof(in_params));
+
+ if (udata) {
+ if (!(udata && ibpd->uobject && ibpd->uobject->context))
+ goto err0;
+
+ ib_ctx = ibpd->uobject->context;
+ ctx = get_qedr_ucontext(ib_ctx);
+
+ memset(&ureq, 0, sizeof(ureq));
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ DP_ERR(dev,
+ "create qp: problem copying data from user space\n");
+ goto err0;
+ }
+
+ rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
+ if (rc)
+ goto err0;
+
+ qedr_init_qp_user_params(&in_params, &ureq);
+ } else {
+ rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
+ if (rc)
+ goto err0;
+ }
+
+ qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
+ qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
+
+ qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+ &in_params, &out_params);
+
+ if (!qp->qed_qp)
+ goto err1;
+
+ qp->qp_id = out_params.qp_id;
+ qp->icid = out_params.icid;
+ qp->ibqp.qp_num = qp->qp_id;
+
+ if (udata) {
+ rc = qedr_copy_qp_uresp(dev, qp, udata);
+ if (rc)
+ goto err2;
+
+ qedr_qp_user_print(dev, qp);
+ } else {
+ qedr_init_qp_kernel_doorbell_sq(dev, qp);
+ qedr_init_qp_kernel_doorbell_rq(dev, qp);
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
+ udata ? "user" : "kernel", qp);
+
+ return &qp->ibqp;
+
+err2:
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
+err1:
+ if (udata) {
+ qedr_cleanup_user_sq(dev, qp);
+ qedr_cleanup_user_rq(dev, qp);
+ } else {
+ qedr_cleanup_kernel_sq(dev, qp);
+ qedr_cleanup_kernel_rq(dev, qp);
+ }
+
+err0:
+ kfree(qp);
+
+ return ERR_PTR(-EFAULT);
+}
+
+enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+{
+ switch (qp_state) {
+ case QED_ROCE_QP_STATE_RESET:
+ return IB_QPS_RESET;
+ case QED_ROCE_QP_STATE_INIT:
+ return IB_QPS_INIT;
+ case QED_ROCE_QP_STATE_RTR:
+ return IB_QPS_RTR;
+ case QED_ROCE_QP_STATE_RTS:
+ return IB_QPS_RTS;
+ case QED_ROCE_QP_STATE_SQD:
+ return IB_QPS_SQD;
+ case QED_ROCE_QP_STATE_ERR:
+ return IB_QPS_ERR;
+ case QED_ROCE_QP_STATE_SQE:
+ return IB_QPS_SQE;
+ }
+ return IB_QPS_ERR;
+}
+
+enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+{
+ switch (qp_state) {
+ case IB_QPS_RESET:
+ return QED_ROCE_QP_STATE_RESET;
+ case IB_QPS_INIT:
+ return QED_ROCE_QP_STATE_INIT;
+ case IB_QPS_RTR:
+ return QED_ROCE_QP_STATE_RTR;
+ case IB_QPS_RTS:
+ return QED_ROCE_QP_STATE_RTS;
+ case IB_QPS_SQD:
+ return QED_ROCE_QP_STATE_SQD;
+ case IB_QPS_ERR:
+ return QED_ROCE_QP_STATE_ERR;
+ default:
+ return QED_ROCE_QP_STATE_ERR;
+ }
+}
+
+static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
+{
+ qed_chain_reset(&qph->pbl);
+ qph->prod = 0;
+ qph->cons = 0;
+ qph->wqe_cons = 0;
+ qph->db_data.data.value = cpu_to_le16(0);
+}
+
+static int qedr_update_qp_state(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ enum qed_roce_qp_state new_state)
+{
+ int status = 0;
+
+ if (new_state == qp->state)
+ return 1;
+
+ switch (qp->state) {
+ case QED_ROCE_QP_STATE_RESET:
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_INIT:
+ qp->prev_wqe_size = 0;
+ qedr_reset_qp_hwq_info(&qp->sq);
+ qedr_reset_qp_hwq_info(&qp->rq);
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_INIT:
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTR:
+ /* Update doorbell (in case post_recv was
+ * done before move to RTR)
+ */
+ wmb();
+ writel(qp->rq.db_data.raw, qp->rq.db);
+ /* Make sure write takes effect */
+ mmiowb();
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_RTR:
+ /* RTR->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTS:
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_RTS:
+ /* RTS->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_SQD:
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_SQD:
+ /* SQD->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTS:
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ /* ERR->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RESET:
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+
+ return status;
+}
+
+int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_rdma_modify_qp_in_params qp_params = { 0 };
+ struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
+ enum ib_qp_state old_qp_state, new_qp_state;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
+ attr->qp_state);
+
+ old_qp_state = qedr_get_ibqp_state(qp->state);
+ if (attr_mask & IB_QP_STATE)
+ new_qp_state = attr->qp_state;
+ else
+ new_qp_state = old_qp_state;
+
+ if (!ib_modify_qp_is_ok
+ (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
+ DP_ERR(dev,
+ "modify qp: invalid attribute mask=0x%x specified for\n"
+ "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
+ attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
+ new_qp_state);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* Translate the masks... */
+ if (attr_mask & IB_QP_STATE) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
+ qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
+ }
+
+ if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+ qp_params.sqd_async = true;
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
+ if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
+ qp_params.incoming_rdma_read_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_READ;
+ qp_params.incoming_rdma_write_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE;
+ qp_params.incoming_atomic_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_ATOMIC;
+ }
+
+ if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+ if (attr_mask & IB_QP_PATH_MTU) {
+ if (attr->path_mtu < IB_MTU_256 ||
+ attr->path_mtu > IB_MTU_4096) {
+ pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
+ rc = -EINVAL;
+ goto err;
+ }
+ qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
+ ib_mtu_enum_to_int(iboe_get_mtu
+ (dev->ndev->mtu)));
+ }
+
+ if (!qp->mtu) {
+ qp->mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+ pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
+
+ qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
+ qp_params.flow_label = attr->ah_attr.grh.flow_label;
+ qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
+
+ qp->sgid_idx = attr->ah_attr.grh.sgid_index;
+
+ rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
+ if (rc) {
+ DP_ERR(dev,
+ "modify qp: problems with GID index %d (rc=%d)\n",
+ attr->ah_attr.grh.sgid_index, rc);
+ return rc;
+ }
+
+ rc = qedr_get_dmac(dev, &attr->ah_attr,
+ qp_params.remote_mac_addr);
+ if (rc)
+ return rc;
+
+ qp_params.use_local_mac = true;
+ ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
+ qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
+ qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
+ DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
+ qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
+ qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
+ DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
+ qp_params.remote_mac_addr);
+;
+
+ qp_params.mtu = qp->mtu;
+ qp_params.lb_indication = false;
+ }
+
+ if (!qp_params.mtu) {
+ /* Stay with current MTU */
+ if (qp->mtu)
+ qp_params.mtu = qp->mtu;
+ else
+ qp_params.mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+ }
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
+
+ qp_params.ack_timeout = attr->timeout;
+ if (attr->timeout) {
+ u32 temp;
+
+ temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
+ /* FW requires [msec] */
+ qp_params.ack_timeout = temp;
+ } else {
+ /* Infinite */
+ qp_params.ack_timeout = 0;
+ }
+ }
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
+ qp_params.retry_cnt = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
+ qp_params.rnr_retry_cnt = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
+ qp_params.rq_psn = attr->rq_psn;
+ qp->rq_psn = attr->rq_psn;
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
+ rc = -EINVAL;
+ DP_ERR(dev,
+ "unsupported max_rd_atomic=%d, supported=%d\n",
+ attr->max_rd_atomic,
+ dev->attr.max_qp_req_rd_atomic_resc);
+ goto err;
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
+ qp_params.max_rd_atomic_req = attr->max_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
+ qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
+ }
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
+ qp_params.sq_psn = attr->sq_psn;
+ qp->sq_psn = attr->sq_psn;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attr->max_dest_rd_atomic >
+ dev->attr.max_qp_resp_rd_atomic_resc) {
+ DP_ERR(dev,
+ "unsupported max_dest_rd_atomic=%d, supported=%d\n",
+ attr->max_dest_rd_atomic,
+ dev->attr.max_qp_resp_rd_atomic_resc);
+
+ rc = -EINVAL;
+ goto err;
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
+ qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_DEST_QPN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
+
+ qp_params.dest_qp = attr->dest_qp_num;
+ qp->dest_qp_num = attr->dest_qp_num;
+ }
+
+ if (qp->qp_type != IB_QPT_GSI)
+ rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
+ qp->qed_qp, &qp_params);
+
+ if (attr_mask & IB_QP_STATE) {
+ if ((qp->qp_type != IB_QPT_GSI) && (!udata))
+ qedr_update_qp_state(dev, qp, qp_params.new_state);
+ qp->state = qp_params.new_state;
+ }
+
+err:
+ return rc;
+}
+
+static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
+{
+ int ib_qp_acc_flags = 0;
+
+ if (params->incoming_rdma_write_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (params->incoming_rdma_read_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
+ if (params->incoming_atomic_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ return ib_qp_acc_flags;
+}
+
+int qedr_query_qp(struct ib_qp *ibqp,
+ struct ib_qp_attr *qp_attr,
+ int attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct qed_rdma_query_qp_out_params params;
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ int rc = 0;
+
+ memset(&params, 0, sizeof(params));
+
+ rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
+ if (rc)
+ goto err;
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+ qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
+ qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+ qp_attr->path_mig_state = IB_MIG_MIGRATED;
+ qp_attr->rq_psn = params.rq_psn;
+ qp_attr->sq_psn = params.sq_psn;
+ qp_attr->dest_qp_num = params.dest_qp;
+
+ qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
+
+ qp_attr->cap.max_send_wr = qp->sq.max_wr;
+ qp_attr->cap.max_recv_wr = qp->rq.max_wr;
+ qp_attr->cap.max_send_sge = qp->sq.max_sges;
+ qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+ qp_init_attr->cap = qp_attr->cap;
+
+ memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
+ sizeof(qp_attr->ah_attr.grh.dgid.raw));
+
+ qp_attr->ah_attr.grh.flow_label = params.flow_label;
+ qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
+ qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
+ qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
+
+ qp_attr->ah_attr.ah_flags = IB_AH_GRH;
+ qp_attr->ah_attr.port_num = 1;
+ qp_attr->ah_attr.sl = 0;
+ qp_attr->timeout = params.timeout;
+ qp_attr->rnr_retry = params.rnr_retry;
+ qp_attr->retry_cnt = params.retry_cnt;
+ qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
+ qp_attr->pkey_index = params.pkey_index;
+ qp_attr->port_num = 1;
+ qp_attr->ah_attr.src_path_bits = 0;
+ qp_attr->ah_attr.static_rate = 0;
+ qp_attr->alt_pkey_index = 0;
+ qp_attr->alt_port_num = 0;
+ qp_attr->alt_timeout = 0;
+ memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
+
+ qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
+ qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
+ qp_attr->max_rd_atomic = params.max_rd_atomic;
+ qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
+ qp_attr->cap.max_inline_data);
+
+err:
+ return rc;
+}
+
+int qedr_destroy_qp(struct ib_qp *ibqp)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ struct ib_qp_attr attr;
+ int attr_mask = 0;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
+ qp, qp->qp_type);
+
+ if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
+ QED_ROCE_QP_STATE_INIT)) {
+ attr.qp_state = IB_QPS_ERR;
+ attr_mask |= IB_QP_STATE;
+
+ /* Change the QP state to ERROR */
+ qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+ }
+
+ if (qp->qp_type != IB_QPT_GSI) {
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ return rc;
+ } else {
+ qedr_destroy_gsi_qp(dev);
+ }
+
+ if (ibqp->uobject && ibqp->uobject->context) {
+ qedr_cleanup_user_sq(dev, qp);
+ qedr_cleanup_user_rq(dev, qp);
+ } else {
+ qedr_cleanup_kernel_sq(dev, qp);
+ qedr_cleanup_kernel_rq(dev, qp);
+ }
+
+ kfree(qp);
+
+ return rc;
+}
+
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+{
+ struct qedr_ah *ah;
+
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ ah->attr = *attr;
+
+ return &ah->ibah;
+}
+
+int qedr_destroy_ah(struct ib_ah *ibah)
+{
+ struct qedr_ah *ah = get_qedr_ah(ibah);
+
+ kfree(ah);
+ return 0;
+}
+
+static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
+{
+ struct qedr_pbl *pbl, *tmp;
+
+ if (info->pbl_table)
+ list_add_tail(&info->pbl_table->list_entry,
+ &info->free_pbl_list);
+
+ if (!list_empty(&info->inuse_pbl_list))
+ list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
+
+ list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
+ list_del(&pbl->list_entry);
+ qedr_free_pbl(dev, &info->pbl_info, pbl);
+ }
+}
+
+static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
+ size_t page_list_len, bool two_layered)
+{
+ struct qedr_pbl *tmp;
+ int rc;
+
+ INIT_LIST_HEAD(&info->free_pbl_list);
+ INIT_LIST_HEAD(&info->inuse_pbl_list);
+
+ rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
+ page_list_len, two_layered);
+ if (rc)
+ goto done;
+
+ info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+ if (!info->pbl_table) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
+ &info->pbl_table->pa);
+
+ /* in usual case we use 2 PBLs, so we add one to free
+ * list and allocating another one
+ */
+ tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+ if (!tmp) {
+ DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
+ goto done;
+ }
+
+ list_add_tail(&tmp->list_entry, &info->free_pbl_list);
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
+
+done:
+ if (rc)
+ free_mr_info(dev, info);
+
+ return rc;
+}
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 usr_addr, int acc, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_mr *mr;
+ struct qedr_pd *pd;
+ int rc = -ENOMEM;
+
+ pd = get_qedr_pd(ibpd);
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
+ pd->pd_id, start, len, usr_addr, acc);
+
+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(rc);
+
+ mr->type = QEDR_MR_USER;
+
+ mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ if (IS_ERR(mr->umem)) {
+ rc = -EFAULT;
+ goto err0;
+ }
+
+ rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
+ if (rc)
+ goto err1;
+
+ qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
+ &mr->info.pbl_info);
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ /* Index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+ mr->hw_mr.key = 0;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hw_mr.mw_bind = false;
+ mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
+ mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+ mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+ mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
+ mr->hw_mr.fbo = ib_umem_offset(mr->umem);
+ mr->hw_mr.length = len;
+ mr->hw_mr.vaddr = usr_addr;
+ mr->hw_mr.zbva = false;
+ mr->hw_mr.phy_mr = false;
+ mr->hw_mr.dma_mr = false;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err2;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+ mr->hw_mr.remote_atomic)
+ mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
+ mr->ibmr.lkey);
+ return &mr->ibmr;
+
+err2:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+err0:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int qedr_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct qedr_mr *mr = get_qedr_mr(ib_mr);
+ struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
+ int rc = 0;
+
+ rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
+ if (rc)
+ return rc;
+
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+
+ if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+
+ /* it could be user registered memory. */
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ kfree(mr);
+
+ return rc;
+}
+
+struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+{
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_mr *mr;
+ int rc = -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
+ max_page_list_len);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(rc);
+
+ mr->dev = dev;
+ mr->type = QEDR_MR_FRMR;
+
+ rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
+ if (rc)
+ goto err0;
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err0;
+ }
+
+ /* Index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
+ mr->hw_mr.key = 0;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = 0;
+ mr->hw_mr.remote_read = 0;
+ mr->hw_mr.remote_write = 0;
+ mr->hw_mr.remote_atomic = 0;
+ mr->hw_mr.mw_bind = false;
+ mr->hw_mr.pbl_ptr = 0;
+ mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+ mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+ mr->hw_mr.fbo = 0;
+ mr->hw_mr.length = 0;
+ mr->hw_mr.vaddr = 0;
+ mr->hw_mr.zbva = false;
+ mr->hw_mr.phy_mr = true;
+ mr->hw_mr.dma_mr = false;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ mr->ibmr.rkey = mr->ibmr.lkey;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
+ return mr;
+
+err1:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err0:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
+ enum ib_mr_type mr_type, u32 max_num_sg)
+{
+ struct qedr_dev *dev;
+ struct qedr_mr *mr;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = __qedr_alloc_mr(ibpd, max_num_sg);
+
+ if (IS_ERR(mr))
+ return ERR_PTR(-EINVAL);
+
+ dev = mr->dev;
+
+ return &mr->ibmr;
+}
+
+static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct qedr_mr *mr = get_qedr_mr(ibmr);
+ struct qedr_pbl *pbl_table;
+ struct regpair *pbe;
+ u32 pbes_in_page;
+
+ if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
+ DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
+ return -ENOMEM;
+ }
+
+ DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
+ mr->npages, addr);
+
+ pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
+ pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
+ pbe = (struct regpair *)pbl_table->va;
+ pbe += mr->npages % pbes_in_page;
+ pbe->lo = cpu_to_le32((u32)addr);
+ pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
+
+ mr->npages++;
+
+ return 0;
+}
+
+static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
+{
+ int work = info->completed - info->completed_handled - 1;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
+ while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
+ struct qedr_pbl *pbl;
+
+ /* Free all the page list that are possible to be freed
+ * (all the ones that were invalidated), under the assumption
+ * that if an FMR was completed successfully that means that
+ * if there was an invalidate operation before it also ended
+ */
+ pbl = list_first_entry(&info->inuse_pbl_list,
+ struct qedr_pbl, list_entry);
+ list_del(&pbl->list_entry);
+ list_add_tail(&pbl->list_entry, &info->free_pbl_list);
+ info->completed_handled++;
+ }
+}
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct qedr_mr *mr = get_qedr_mr(ibmr);
+
+ mr->npages = 0;
+
+ handle_completed_mrs(mr->dev, &mr->info);
+ return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
+}
+
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_mr *mr;
+ int rc;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->type = QEDR_MR_DMA;
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ /* index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hw_mr.dma_mr = true;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err2;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+ mr->hw_mr.remote_atomic)
+ mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
+ return &mr->ibmr;
+
+err2:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
+{
+ return (((wq->prod + 1) % wq->max_wr) == wq->cons);
+}
+
+static int sge_data_len(struct ib_sge *sg_list, int num_sge)
+{
+ int i, len = 0;
+
+ for (i = 0; i < num_sge; i++)
+ len += sg_list[i].length;
+
+ return len;
+}
+
+static void swap_wqe_data64(u64 *p)
+{
+ int i;
+
+ for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
+ *p = cpu_to_be64(cpu_to_le64(*p));
+}
+
+static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
+ struct qedr_qp *qp, u8 *wqe_size,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr, u8 *bits,
+ u8 bit)
+{
+ u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
+ char *seg_prt, *wqe;
+ int i, seg_siz;
+
+ if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
+ DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
+ *bad_wr = wr;
+ return 0;
+ }
+
+ if (!data_size)
+ return data_size;
+
+ *bits |= bit;
+
+ seg_prt = NULL;
+ wqe = NULL;
+ seg_siz = 0;
+
+ /* Copy data inline */
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 len = wr->sg_list[i].length;
+ void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
+
+ while (len > 0) {
+ u32 cur;
+
+ /* New segment required */
+ if (!seg_siz) {
+ wqe = (char *)qed_chain_produce(&qp->sq.pbl);
+ seg_prt = wqe;
+ seg_siz = sizeof(struct rdma_sq_common_wqe);
+ (*wqe_size)++;
+ }
+
+ /* Calculate currently allowed length */
+ cur = min_t(u32, len, seg_siz);
+ memcpy(seg_prt, src, cur);
+
+ /* Update segment variables */
+ seg_prt += cur;
+ seg_siz -= cur;
+
+ /* Update sge variables */
+ src += cur;
+ len -= cur;
+
+ /* Swap fully-completed segments */
+ if (!seg_siz)
+ swap_wqe_data64((u64 *)wqe);
+ }
+ }
+
+ /* swap last not completed segment */
+ if (seg_siz)
+ swap_wqe_data64((u64 *)wqe);
+
+ return data_size;
+}
+
+#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
+ do { \
+ DMA_REGPAIR_LE(sge->addr, vaddr); \
+ (sge)->length = cpu_to_le32(vlength); \
+ (sge)->flags = cpu_to_le32(vflags); \
+ } while (0)
+
+#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
+ do { \
+ DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
+ (hdr)->num_sges = num_sge; \
+ } while (0)
+
+#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
+ do { \
+ DMA_REGPAIR_LE(sge->addr, vaddr); \
+ (sge)->length = cpu_to_le32(vlength); \
+ (sge)->l_key = cpu_to_le32(vlkey); \
+ } while (0)
+
+static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
+ struct ib_send_wr *wr)
+{
+ u32 data_size = 0;
+ int i;
+
+ for (i = 0; i < wr->num_sge; i++) {
+ struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
+
+ DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
+ sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
+ sge->length = cpu_to_le32(wr->sg_list[i].length);
+ data_size += wr->sg_list[i].length;
+ }
+
+ if (wqe_size)
+ *wqe_size += wr->num_sge;
+
+ return data_size;
+}
+
+static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct rdma_sq_rdma_wqe_1st *rwqe,
+ struct rdma_sq_rdma_wqe_2nd *rwqe2,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
+ DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
+
+ if (wr->send_flags & IB_SEND_INLINE) {
+ u8 flags = 0;
+
+ SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
+ return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
+ bad_wr, &rwqe->flags, flags);
+ }
+
+ return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
+}
+
+static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct rdma_sq_send_wqe_1st *swqe,
+ struct rdma_sq_send_wqe_2st *swqe2,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ memset(swqe2, 0, sizeof(*swqe2));
+ if (wr->send_flags & IB_SEND_INLINE) {
+ u8 flags = 0;
+
+ SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
+ return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
+ bad_wr, &swqe->flags, flags);
+ }
+
+ return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
+}
+
+static int qedr_prepare_reg(struct qedr_qp *qp,
+ struct rdma_sq_fmr_wqe_1st *fwqe1,
+ struct ib_reg_wr *wr)
+{
+ struct qedr_mr *mr = get_qedr_mr(wr->mr);
+ struct rdma_sq_fmr_wqe_2nd *fwqe2;
+
+ fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
+ fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
+ fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
+ fwqe1->l_key = wr->key;
+
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
+ !!(wr->access & IB_ACCESS_REMOTE_READ));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
+ !!(wr->access & IB_ACCESS_REMOTE_WRITE));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
+ !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
+ !!(wr->access & IB_ACCESS_LOCAL_WRITE));
+ fwqe2->fmr_ctrl = 0;
+
+ SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
+ ilog2(mr->ibmr.page_size) - 12);
+
+ fwqe2->length_hi = 0;
+ fwqe2->length_lo = mr->ibmr.length;
+ fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
+ fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
+
+ qp->wqe_wr_id[qp->sq.prod].mr = mr;
+
+ return 0;
+}
+
+enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return IB_WC_RDMA_WRITE;
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ return IB_WC_SEND;
+ case IB_WR_RDMA_READ:
+ return IB_WC_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return IB_WC_COMP_SWAP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return IB_WC_FETCH_ADD;
+ case IB_WR_REG_MR:
+ return IB_WC_REG_MR;
+ case IB_WR_LOCAL_INV:
+ return IB_WC_LOCAL_INV;
+ default:
+ return IB_WC_SEND;
+ }
+}
+
+inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+{
+ int wq_is_full, err_wr, pbl_is_full;
+ struct qedr_dev *dev = qp->dev;
+
+ /* prevent SQ overflow and/or processing of a bad WR */
+ err_wr = wr->num_sge > qp->sq.max_sges;
+ wq_is_full = qedr_wq_is_full(&qp->sq);
+ pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
+ QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ if (wq_is_full || err_wr || pbl_is_full) {
+ if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
+ DP_ERR(dev,
+ "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
+ }
+
+ if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
+ DP_ERR(dev,
+ "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
+ }
+
+ if (pbl_is_full &&
+ !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
+ DP_ERR(dev,
+ "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
+ }
+ return false;
+ }
+ return true;
+}
+
+int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct rdma_sq_atomic_wqe_1st *awqe1;
+ struct rdma_sq_atomic_wqe_2nd *awqe2;
+ struct rdma_sq_atomic_wqe_3rd *awqe3;
+ struct rdma_sq_send_wqe_2st *swqe2;
+ struct rdma_sq_local_inv_wqe *iwqe;
+ struct rdma_sq_rdma_wqe_2nd *rwqe2;
+ struct rdma_sq_send_wqe_1st *swqe;
+ struct rdma_sq_rdma_wqe_1st *rwqe;
+ struct rdma_sq_fmr_wqe_1st *fwqe1;
+ struct rdma_sq_common_wqe *wqe;
+ u32 length;
+ int rc = 0;
+ bool comp;
+
+ if (!qedr_can_post_send(qp, wr)) {
+ *bad_wr = wr;
+ return -ENOMEM;
+ }
+
+ wqe = qed_chain_produce(&qp->sq.pbl);
+ qp->wqe_wr_id[qp->sq.prod].signaled =
+ !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
+
+ wqe->flags = 0;
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
+ !!(wr->send_flags & IB_SEND_SOLICITED));
+ comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
+ !!(wr->send_flags & IB_SEND_FENCE));
+ wqe->prev_wqe_size = qp->prev_wqe_size;
+
+ qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
+
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+ swqe->wqe_size = 2;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+
+ swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+ case IB_WR_SEND:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+
+ swqe->wqe_size = 2;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+ swqe->wqe_size = 2;
+ swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+ case IB_WR_RDMA_WRITE:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ DP_ERR(dev,
+ "RDMA READ WITH INVALIDATE not supported\n");
+ *bad_wr = wr;
+ rc = -EINVAL;
+ break;
+
+ case IB_WR_RDMA_READ:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
+ awqe1->wqe_size = 4;
+
+ awqe2 = qed_chain_produce(&qp->sq.pbl);
+ DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
+ awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
+
+ awqe3 = qed_chain_produce(&qp->sq.pbl);
+
+ if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
+ DMA_REGPAIR_LE(awqe3->swap_data,
+ atomic_wr(wr)->compare_add);
+ } else {
+ wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
+ DMA_REGPAIR_LE(awqe3->swap_data,
+ atomic_wr(wr)->swap);
+ DMA_REGPAIR_LE(awqe3->cmp_data,
+ atomic_wr(wr)->compare_add);
+ }
+
+ qedr_prepare_sq_sges(qp, NULL, wr);
+
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
+ qp->prev_wqe_size = awqe1->wqe_size;
+ break;
+
+ case IB_WR_LOCAL_INV:
+ iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
+ iwqe->wqe_size = 1;
+
+ iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
+ iwqe->inv_l_key = wr->ex.invalidate_rkey;
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
+ qp->prev_wqe_size = iwqe->wqe_size;
+ break;
+ case IB_WR_REG_MR:
+ DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
+ wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
+ fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
+ fwqe1->wqe_size = 2;
+
+ rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
+ if (rc) {
+ DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
+ *bad_wr = wr;
+ break;
+ }
+
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
+ qp->prev_wqe_size = fwqe1->wqe_size;
+ break;
+ default:
+ DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (*bad_wr) {
+ u16 value;
+
+ /* Restore prod to its position before
+ * this WR was processed
+ */
+ value = le16_to_cpu(qp->sq.db_data.data.value);
+ qed_chain_set_prod(&qp->sq.pbl, value, wqe);
+
+ /* Restore prev_wqe_size */
+ qp->prev_wqe_size = wqe->prev_wqe_size;
+ rc = -EINVAL;
+ DP_ERR(dev, "POST SEND FAILED\n");
+ }
+
+ return rc;
+}
+
+int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ unsigned long flags;
+ int rc = 0;
+
+ *bad_wr = NULL;
+
+ if (qp->qp_type == IB_QPT_GSI)
+ return qedr_gsi_post_send(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
+ (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "QP in wrong state! QP icid=0x%x state %d\n",
+ qp->icid, qp->state);
+ return -EINVAL;
+ }
+
+ if (!wr) {
+ DP_ERR(dev, "Got an empty post send.\n");
+ return -EINVAL;
+ }
+
+ while (wr) {
+ rc = __qedr_post_send(ibqp, wr, bad_wr);
+ if (rc)
+ break;
+
+ qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+
+ qedr_inc_sw_prod(&qp->sq);
+
+ qp->sq.db_data.data.value++;
+
+ wr = wr->next;
+ }
+
+ /* Trigger doorbell
+ * If there was a failure in the first WR then it will be triggered in
+ * vane. However this is not harmful (as long as the producer value is
+ * unchanged). For performance reasons we avoid checking for this
+ * redundant doorbell.
+ */
+ wmb();
+ writel(qp->sq.db_data.raw, qp->sq.db);
+
+ /* Make sure write sticks */
+ mmiowb();
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return rc;
+}
+
+int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ unsigned long flags;
+ int status = 0;
+
+ if (qp->qp_type == IB_QPT_GSI)
+ return qedr_gsi_post_recv(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
+ (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ while (wr) {
+ int i;
+
+ if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
+ QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
+ wr->num_sge > qp->rq.max_sges) {
+ DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
+ qed_chain_get_elem_left_u32(&qp->rq.pbl),
+ QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
+ qp->rq.max_sges);
+ status = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 flags = 0;
+ struct rdma_rq_sge *rqe =
+ qed_chain_produce(&qp->rq.pbl);
+
+ /* First one must include the number
+ * of SGE in the list
+ */
+ if (!i)
+ SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
+ wr->num_sge);
+
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
+ wr->sg_list[i].lkey);
+
+ RQ_SGE_SET(rqe, wr->sg_list[i].addr,
+ wr->sg_list[i].length, flags);
+ }
+
+ /* Special case of no sges. FW requires between 1-4 sges...
+ * in this case we need to post 1 sge with length zero. this is
+ * because rdma write with immediate consumes an RQ.
+ */
+ if (!wr->num_sge) {
+ u32 flags = 0;
+ struct rdma_rq_sge *rqe =
+ qed_chain_produce(&qp->rq.pbl);
+
+ /* First one must include the number
+ * of SGE in the list
+ */
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
+ SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
+
+ RQ_SGE_SET(rqe, 0, 0, flags);
+ i = 1;
+ }
+
+ qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+ qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
+
+ qedr_inc_sw_prod(&qp->rq);
+
+ /* Flush all the writes before signalling doorbell */
+ wmb();
+
+ qp->rq.db_data.data.value++;
+
+ writel(qp->rq.db_data.raw, qp->rq.db);
+
+ /* Make sure write sticks */
+ mmiowb();
+
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return status;
+}
+
+static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+ return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
+ cq->pbl_toggle;
+}
+
+static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+ struct qedr_qp *qp;
+
+ qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
+ resp_cqe->qp_handle.lo,
+ u64);
+ return qp;
+}
+
+static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+ return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
+}
+
+/* Return latest CQE (needs processing) */
+static union rdma_cqe *get_cqe(struct qedr_cq *cq)
+{
+ return cq->latest_cqe;
+}
+
+/* In fmr we need to increase the number of fmr completed counter for the fmr
+ * algorithm determining whether we can free a pbl or not.
+ * we need to perform this whether the work request was signaled or not. for
+ * this purpose we call this function from the condition that checks if a wr
+ * should be skipped, to make sure we don't miss it ( possibly this fmr
+ * operation was not signalted)
+ */
+static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
+{
+ if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
+ qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+}
+
+static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
+ int force)
+{
+ u16 cnt = 0;
+
+ while (num_entries && qp->sq.wqe_cons != hw_cons) {
+ if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
+ qedr_chk_if_fmr(qp);
+ /* skip WC */
+ goto next_cqe;
+ }
+
+ /* fill WC */
+ wc->status = status;
+ wc->wc_flags = 0;
+ wc->src_qp = qp->id;
+ wc->qp = &qp->ibqp;
+
+ wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+ wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
+
+ switch (wc->opcode) {
+ case IB_WC_RDMA_WRITE:
+ wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
+ break;
+ case IB_WC_COMP_SWAP:
+ case IB_WC_FETCH_ADD:
+ wc->byte_len = 8;
+ break;
+ case IB_WC_REG_MR:
+ qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+ break;
+ default:
+ break;
+ }
+
+ num_entries--;
+ wc++;
+ cnt++;
+next_cqe:
+ while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
+ qed_chain_consume(&qp->sq.pbl);
+ qedr_inc_sw_cons(&qp->sq);
+ }
+
+ return cnt;
+}
+
+static int qedr_poll_cq_req(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct qedr_cq *cq,
+ int num_entries, struct ib_wc *wc,
+ struct rdma_cqe_requester *req)
+{
+ int cnt = 0;
+
+ switch (req->status) {
+ case RDMA_CQE_REQ_STS_OK:
+ cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+ IB_WC_SUCCESS, 0);
+ break;
+ case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+ IB_WC_WR_FLUSH_ERR, 0);
+ break;
+ default:
+ /* process all WQE before the cosumer */
+ qp->state = QED_ROCE_QP_STATE_ERR;
+ cnt = process_req(dev, qp, cq, num_entries, wc,
+ req->sq_cons - 1, IB_WC_SUCCESS, 0);
+ wc += cnt;
+ /* if we have extra WC fill it with actual error info */
+ if (cnt < num_entries) {
+ enum ib_wc_status wc_status;
+
+ switch (req->status) {
+ case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_BAD_RESP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_LEN_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_PROT_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_MW_BIND_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_OP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_RNR_RETRY_EXC_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_RETRY_EXC_ERR;
+ break;
+ default:
+ DP_ERR(dev,
+ "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_GENERAL_ERR;
+ }
+ cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
+ wc_status, 1);
+ }
+ }
+
+ return cnt;
+}
+
+static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp, u64 wr_id)
+{
+ enum ib_wc_status wc_status = IB_WC_SUCCESS;
+ u8 flags;
+
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = 0;
+
+ switch (resp->status) {
+ case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
+ wc_status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
+ wc_status = IB_WC_LOC_LEN_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
+ wc_status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
+ wc_status = IB_WC_LOC_PROT_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
+ wc_status = IB_WC_MW_BIND_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
+ wc_status = IB_WC_REM_INV_RD_REQ_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_OK:
+ wc_status = IB_WC_SUCCESS;
+ wc->byte_len = le32_to_cpu(resp->length);
+
+ flags = resp->flags & QEDR_RESP_RDMA_IMM;
+
+ if (flags == QEDR_RESP_RDMA_IMM)
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+
+ if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
+ wc->ex.imm_data =
+ le32_to_cpu(resp->imm_data_or_inv_r_Key);
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ }
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ DP_ERR(dev, "Invalid CQE status detected\n");
+ }
+
+ /* fill WC */
+ wc->status = wc_status;
+ wc->src_qp = qp->id;
+ wc->qp = &qp->ibqp;
+ wc->wr_id = wr_id;
+}
+
+static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp)
+{
+ u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+
+ __process_resp_one(dev, qp, cq, wc, resp, wr_id);
+
+ while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+ qed_chain_consume(&qp->rq.pbl);
+ qedr_inc_sw_cons(&qp->rq);
+
+ return 1;
+}
+
+static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
+ int num_entries, struct ib_wc *wc, u16 hw_cons)
+{
+ u16 cnt = 0;
+
+ while (num_entries && qp->rq.wqe_cons != hw_cons) {
+ /* fill WC */
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->wc_flags = 0;
+ wc->src_qp = qp->id;
+ wc->byte_len = 0;
+ wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+ wc->qp = &qp->ibqp;
+ num_entries--;
+ wc++;
+ cnt++;
+ while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+ qed_chain_consume(&qp->rq.pbl);
+ qedr_inc_sw_cons(&qp->rq);
+ }
+
+ return cnt;
+}
+
+static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+ struct rdma_cqe_responder *resp, int *update)
+{
+ if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
+ consume_cqe(cq);
+ *update |= 1;
+ }
+}
+
+static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc, struct rdma_cqe_responder *resp,
+ int *update)
+{
+ int cnt;
+
+ if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
+ cnt = process_resp_flush(qp, cq, num_entries, wc,
+ resp->rq_cons);
+ try_consume_resp_cqe(cq, qp, resp, update);
+ } else {
+ cnt = process_resp_one(dev, qp, cq, wc, resp);
+ consume_cqe(cq);
+ *update |= 1;
+ }
+
+ return cnt;
+}
+
+static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+ struct rdma_cqe_requester *req, int *update)
+{
+ if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
+ consume_cqe(cq);
+ *update |= 1;
+ }
+}
+
+int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ union rdma_cqe *cqe = cq->latest_cqe;
+ u32 old_cons, new_cons;
+ unsigned long flags;
+ int update = 0;
+ int done = 0;
+
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ return qedr_gsi_poll_cq(ibcq, num_entries, wc);
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+ while (num_entries && is_valid_cqe(cq, cqe)) {
+ struct qedr_qp *qp;
+ int cnt = 0;
+
+ /* prevent speculative reads of any field of CQE */
+ rmb();
+
+ qp = cqe_get_qp(cqe);
+ if (!qp) {
+ WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
+ break;
+ }
+
+ wc->qp = &qp->ibqp;
+
+ switch (cqe_get_type(cqe)) {
+ case RDMA_CQE_TYPE_REQUESTER:
+ cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
+ &cqe->req);
+ try_consume_req_cqe(cq, qp, &cqe->req, &update);
+ break;
+ case RDMA_CQE_TYPE_RESPONDER_RQ:
+ cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
+ &cqe->resp, &update);
+ break;
+ case RDMA_CQE_TYPE_INVALID:
+ default:
+ DP_ERR(dev, "Error: invalid CQE type = %d\n",
+ cqe_get_type(cqe));
+ }
+ num_entries -= cnt;
+ wc += cnt;
+ done += cnt;
+
+ cqe = get_cqe(cq);
+ }
+ new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+
+ cq->cq_cons += new_cons - old_cons;
+
+ if (update)
+ /* doorbell notifies abount latest VALID entry,
+ * but chain already point to the next INVALID one
+ */
+ doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ return done;
+}
+
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u8 port_num,
+ const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *mad_hdr,
+ size_t in_mad_size, struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
+ mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
+ mad_hdr->class_specific, mad_hdr->class_version,
+ mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
+ return IB_MAD_RESULT_SUCCESS;
+}
+
+int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = qedr_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
new file mode 100644
index 000000000000..a9b5e67bb81e
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -0,0 +1,101 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_VERBS_H__
+#define __QEDR_VERBS_H__
+
+int qedr_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr, struct ib_udata *udata);
+int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
+int qedr_modify_port(struct ib_device *, u8 port, int mask,
+ struct ib_port_modify *props);
+
+int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
+
+int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+
+struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
+int qedr_dealloc_ucontext(struct ib_ucontext *);
+
+int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
+int qedr_del_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, void **context);
+int qedr_add_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context);
+struct ib_pd *qedr_alloc_pd(struct ib_device *,
+ struct ib_ucontext *, struct ib_udata *);
+int qedr_dealloc_pd(struct ib_pd *pd);
+
+struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *ib_ctx,
+ struct ib_udata *udata);
+int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
+int qedr_destroy_cq(struct ib_cq *);
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
+ struct ib_udata *);
+int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *);
+int qedr_destroy_qp(struct ib_qp *ibqp);
+
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);
+int qedr_destroy_ah(struct ib_ah *ibah);
+
+int qedr_dereg_mr(struct ib_mr *);
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *);
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
+int qedr_post_send(struct ib_qp *, struct ib_send_wr *,
+ struct ib_send_wr **bad_wr);
+int qedr_post_recv(struct ib_qp *, struct ib_recv_wr *,
+ struct ib_recv_wr **bad_wr);
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in_mad,
+ size_t in_mad_size, struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
+
+int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable);
+#endif
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 2d2b94fd3633..75f08624ac05 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -67,7 +67,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got,
+ FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index a0b6ebee4d8a..1ccee6ea5bc3 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -111,6 +111,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int i;
int flags;
dma_addr_t pa;
+ unsigned int gup_flags;
if (!can_do_mlock())
return -EPERM;
@@ -135,6 +136,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
flags = IOMMU_READ | IOMMU_CACHE;
flags |= (writable) ? IOMMU_WRITE : 0;
+ gup_flags = FOLL_WRITE;
+ gup_flags |= (writable) ? 0 : FOLL_FORCE;
cur_base = addr & PAGE_MASK;
ret = 0;
@@ -142,7 +145,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
- 1, !writable, page_list, NULL);
+ gup_flags, page_list, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 7b8d2d9e2263..da12717a3eb7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -63,6 +63,8 @@ enum ipoib_flush_level {
enum {
IPOIB_ENCAP_LEN = 4,
+ IPOIB_PSEUDO_LEN = 20,
+ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
@@ -134,15 +136,21 @@ struct ipoib_header {
u16 reserved;
};
-struct ipoib_cb {
- struct qdisc_skb_cb qdisc_cb;
- u8 hwaddr[INFINIBAND_ALEN];
+struct ipoib_pseudo_header {
+ u8 hwaddr[INFINIBAND_ALEN];
};
-static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
+static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
- return (struct ipoib_cb *)skb->cb;
+ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
+
+ /*
+ * only the ipoib header is present now, make room for a dummy
+ * pseudo header and set skb field accordingly
+ */
+ memset(data, 0, IPOIB_PSEUDO_LEN);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, IPOIB_HARD_LEN);
}
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 4ad297d3de89..339a1eecdfe3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
+#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
+
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
@@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct sk_buff *skb;
int i;
- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
+ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
if (unlikely(!skb))
return NULL;
/*
- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
+ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
* IP header to a multiple of 16.
*/
- skb_reserve(skb, 12);
+ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->byte_len < IPOIB_CM_COPYBREAK) {
int dlen = wc->byte_len;
- small_skb = dev_alloc_skb(dlen + 12);
+ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
if (small_skb) {
- skb_reserve(small_skb, 12);
+ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, small_skb->data, dlen);
@@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
copied:
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index be11d5d5b8c1..830fecb6934c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
+ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
if (unlikely(!skb))
return NULL;
/*
- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
- * header. So we need 4 more bytes to get to 48 and align the
- * IP header to a multiple of 16.
+ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
+ * 64 bytes aligned
*/
- skb_reserve(skb, 4);
+ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
mapping = priv->rx_ring[id].mapping;
mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
@@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ae5d7cd100a5..c50794fb92db 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -938,9 +938,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
ipoib_neigh_free(neigh);
goto err_drop;
}
- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+ if (skb_queue_len(&neigh->queue) <
+ IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&neigh->queue, skb);
- else {
+ } else {
ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
skb_queue_len(&neigh->queue));
goto err_drop;
@@ -977,7 +980,7 @@ err_drop:
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- struct ipoib_cb *cb)
+ struct ipoib_pseudo_header *phdr)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
@@ -985,16 +988,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, cb->hwaddr + 4);
+ path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
if (!path) {
- path = path_rec_create(dev, cb->hwaddr + 4);
+ path = path_rec_create(dev, phdr->hwaddr + 4);
new_path = 1;
}
if (path) {
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1022,10 +1027,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1039,13 +1046,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
unsigned long flags;
+ phdr = (struct ipoib_pseudo_header *) skb->data;
+ skb_pull(skb, sizeof(*phdr));
header = (struct ipoib_header *) skb->data;
- if (unlikely(cb->hwaddr[4] == 0xff)) {
+ if (unlikely(phdr->hwaddr[4] == 0xff)) {
/* multicast, arrange "if" according to probability */
if ((header->proto != htons(ETH_P_IP)) &&
(header->proto != htons(ETH_P_IPV6)) &&
@@ -1058,13 +1067,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
/* Add in the P_Key for multicast*/
- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- cb->hwaddr[9] = priv->pkey & 0xff;
+ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ phdr->hwaddr[9] = priv->pkey & 0xff;
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (likely(neigh))
goto send_using_neigh;
- ipoib_mcast_send(dev, cb->hwaddr, skb);
+ ipoib_mcast_send(dev, phdr->hwaddr, skb);
return NETDEV_TX_OK;
}
@@ -1073,16 +1082,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_TIPC):
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, cb->hwaddr, dev);
+ neigh_add_path(skb, phdr->hwaddr, dev);
return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
/* for unicast ARP and RARP should always perform path find */
- unicast_arp_send(skb, dev, cb);
+ unicast_arp_send(skb, dev, phdr);
return NETDEV_TX_OK;
default:
/* ethertype not supported by IPoIB */
@@ -1099,11 +1108,13 @@ send_using_neigh:
goto unref;
}
} else if (neigh->ah) {
- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
goto unref;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(*phdr));
spin_lock_irqsave(&priv->lock, flags);
__skb_queue_tail(&neigh->queue, skb);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1135,8 +1146,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
unsigned short type,
const void *daddr, const void *saddr, unsigned len)
{
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@ -1145,12 +1156,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
/*
* we don't rely on dst_entry structure, always stuff the
- * destination address into skb->cb so we can figure out where
+ * destination address into skb hard header so we can figure out where
* to send the packet later.
*/
- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
+ phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
+ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
- return sizeof *header;
+ return IPOIB_HARD_LEN;
}
static void ipoib_set_mcast_list(struct net_device *dev)
@@ -1772,7 +1784,7 @@ void ipoib_setup(struct net_device *dev)
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- dev->hard_header_len = IPOIB_ENCAP_LEN;
+ dev->hard_header_len = IPOIB_HARD_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d3394b6add24..1909dd252c94 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
}
- if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
+ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(struct ipoib_pseudo_header));
skb_queue_tail(&mcast->pkt_queue, skb);
- else {
+ } else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 936f07a4e35f..6d7de9bfed9a 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -103,6 +103,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
6-byte ALPS packet */
#define ALPS_STICK_BITS 0x100 /* separate stick button bits */
#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
+#define ALPS_DUALPOINT_WITH_PRESSURE 0x400 /* device can report trackpoint pressure */
static const struct alps_model_info alps_model_data[] = {
{ { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Toshiba Salellite Pro M10 */
@@ -1156,15 +1157,28 @@ static unsigned char alps_get_pkt_id_ss4_v2(unsigned char *byte)
{
unsigned char pkt_id = SS4_PACKET_ID_IDLE;
- if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
- (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 && byte[5] == 0x00) {
- pkt_id = SS4_PACKET_ID_IDLE;
- } else if (!(byte[3] & 0x10)) {
- pkt_id = SS4_PACKET_ID_ONE;
- } else if (!(byte[3] & 0x20)) {
+ switch (byte[3] & 0x30) {
+ case 0x00:
+ if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
+ (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 &&
+ byte[5] == 0x00) {
+ pkt_id = SS4_PACKET_ID_IDLE;
+ } else {
+ pkt_id = SS4_PACKET_ID_ONE;
+ }
+ break;
+ case 0x10:
+ /* two-finger finger positions */
pkt_id = SS4_PACKET_ID_TWO;
- } else {
+ break;
+ case 0x20:
+ /* stick pointer */
+ pkt_id = SS4_PACKET_ID_STICK;
+ break;
+ case 0x30:
+ /* third and fourth finger positions */
pkt_id = SS4_PACKET_ID_MULTI;
+ break;
}
return pkt_id;
@@ -1185,7 +1199,13 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
f->mt[0].x = SS4_1F_X_V2(p);
f->mt[0].y = SS4_1F_Y_V2(p);
f->pressure = ((SS4_1F_Z_V2(p)) * 2) & 0x7f;
- f->fingers = 1;
+ /*
+ * When a button is held the device will give us events
+ * with x, y, and pressure of 0. This causes annoying jumps
+ * if a touch is released while the button is held.
+ * Handle this by claiming zero contacts.
+ */
+ f->fingers = f->pressure > 0 ? 1 : 0;
f->first_mp = 0;
f->is_mp = 0;
break;
@@ -1246,16 +1266,40 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
}
break;
+ case SS4_PACKET_ID_STICK:
+ if (!(priv->flags & ALPS_DUALPOINT)) {
+ psmouse_warn(psmouse,
+ "Rejected trackstick packet from non DualPoint device");
+ } else {
+ int x = (s8)(((p[0] & 1) << 7) | (p[1] & 0x7f));
+ int y = (s8)(((p[3] & 1) << 7) | (p[2] & 0x7f));
+ int pressure = (s8)(p[4] & 0x7f);
+
+ input_report_rel(priv->dev2, REL_X, x);
+ input_report_rel(priv->dev2, REL_Y, -y);
+ input_report_abs(priv->dev2, ABS_PRESSURE, pressure);
+ }
+ break;
+
case SS4_PACKET_ID_IDLE:
default:
memset(f, 0, sizeof(struct alps_fields));
break;
}
- f->left = !!(SS4_BTN_V2(p) & 0x01);
- if (!(priv->flags & ALPS_BUTTONPAD)) {
- f->right = !!(SS4_BTN_V2(p) & 0x02);
- f->middle = !!(SS4_BTN_V2(p) & 0x04);
+ /* handle buttons */
+ if (pkt_id == SS4_PACKET_ID_STICK) {
+ f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
+ if (!(priv->flags & ALPS_BUTTONPAD)) {
+ f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
+ f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
+ }
+ } else {
+ f->left = !!(SS4_BTN_V2(p) & 0x01);
+ if (!(priv->flags & ALPS_BUTTONPAD)) {
+ f->right = !!(SS4_BTN_V2(p) & 0x02);
+ f->middle = !!(SS4_BTN_V2(p) & 0x04);
+ }
}
return 0;
@@ -1266,6 +1310,7 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
struct alps_data *priv = psmouse->private;
unsigned char *packet = psmouse->packet;
struct input_dev *dev = psmouse->dev;
+ struct input_dev *dev2 = priv->dev2;
struct alps_fields *f = &priv->f;
memset(f, 0, sizeof(struct alps_fields));
@@ -1311,6 +1356,13 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
input_report_abs(dev, ABS_PRESSURE, f->pressure);
input_sync(dev);
+
+ if (priv->flags & ALPS_DUALPOINT) {
+ input_report_key(dev2, BTN_LEFT, f->ts_left);
+ input_report_key(dev2, BTN_RIGHT, f->ts_right);
+ input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
+ input_sync(dev2);
+ }
}
static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
@@ -2695,6 +2747,10 @@ static int alps_set_protocol(struct psmouse *psmouse,
if (alps_set_defaults_ss4_v2(psmouse, priv))
return -EIO;
+ if (priv->fw_ver[1] == 0x1)
+ priv->flags |= ALPS_DUALPOINT |
+ ALPS_DUALPOINT_WITH_PRESSURE;
+
break;
}
@@ -2767,6 +2823,9 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
} else if (e7[0] == 0x73 && e7[1] == 0x03 &&
e7[2] == 0x14 && ec[1] == 0x02) {
protocol = &alps_v8_protocol_data;
+ } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
+ e7[2] == 0x28 && ec[1] == 0x01) {
+ protocol = &alps_v8_protocol_data;
} else {
psmouse_dbg(psmouse,
"Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
@@ -2949,6 +3008,10 @@ int alps_init(struct psmouse *psmouse)
input_set_capability(dev2, EV_REL, REL_X);
input_set_capability(dev2, EV_REL, REL_Y);
+ if (priv->flags & ALPS_DUALPOINT_WITH_PRESSURE) {
+ input_set_capability(dev2, EV_ABS, ABS_PRESSURE);
+ input_set_abs_params(dev2, ABS_PRESSURE, 0, 127, 0, 0);
+ }
input_set_capability(dev2, EV_KEY, BTN_LEFT);
input_set_capability(dev2, EV_KEY, BTN_RIGHT);
input_set_capability(dev2, EV_KEY, BTN_MIDDLE);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index d37f814dc447..b9417e2d7ad3 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -37,12 +37,14 @@
* or there's button activities.
* SS4_PACKET_ID_TWO: There's two or more fingers on touchpad
* SS4_PACKET_ID_MULTI: There's three or more fingers on touchpad
+ * SS4_PACKET_ID_STICK: A stick pointer packet
*/
enum SS4_PACKET_ID {
SS4_PACKET_ID_IDLE = 0,
SS4_PACKET_ID_ONE,
SS4_PACKET_ID_TWO,
SS4_PACKET_ID_MULTI,
+ SS4_PACKET_ID_STICK,
};
#define SS4_COUNT_PER_ELECTRODE 256
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 08e252a42480..db7d1d666ac1 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1134,7 +1134,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* System76 Pangolin 0x250f01 ? 2 hw buttons
* (*) + 3 trackpoint buttons
* (**) + 0 trackpoint buttons
- * Note: Lenovo L430 and Lenovo L430 have the same fw_version/caps
+ * Note: Lenovo L430 and Lenovo L530 have the same fw_version/caps
*/
static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
{
@@ -1159,6 +1159,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
},
},
+ {
+ /* Fujitsu H760 also has a middle button */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
+ },
+ },
#endif
{ }
};
@@ -1503,10 +1510,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
- /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ /* Fujitsu H760 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
},
},
{
@@ -1517,6 +1524,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
+ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ },
+ },
+ {
+ /* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
index 6f2e0e4f0296..1ebc2c1debae 100644
--- a/drivers/input/rmi4/rmi_i2c.c
+++ b/drivers/input/rmi4/rmi_i2c.c
@@ -221,6 +221,21 @@ static const struct of_device_id rmi_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, rmi_i2c_of_match);
#endif
+static void rmi_i2c_regulator_bulk_disable(void *data)
+{
+ struct rmi_i2c_xport *rmi_i2c = data;
+
+ regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
+ rmi_i2c->supplies);
+}
+
+static void rmi_i2c_unregister_transport(void *data)
+{
+ struct rmi_i2c_xport *rmi_i2c = data;
+
+ rmi_unregister_transport_device(&rmi_i2c->xport);
+}
+
static int rmi_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -264,6 +279,12 @@ static int rmi_i2c_probe(struct i2c_client *client,
if (retval < 0)
return retval;
+ retval = devm_add_action_or_reset(&client->dev,
+ rmi_i2c_regulator_bulk_disable,
+ rmi_i2c);
+ if (retval)
+ return retval;
+
of_property_read_u32(client->dev.of_node, "syna,startup-delay-ms",
&rmi_i2c->startup_delay);
@@ -294,6 +315,11 @@ static int rmi_i2c_probe(struct i2c_client *client,
client->addr);
return retval;
}
+ retval = devm_add_action_or_reset(&client->dev,
+ rmi_i2c_unregister_transport,
+ rmi_i2c);
+ if (retval)
+ return retval;
retval = rmi_i2c_init_irq(client);
if (retval < 0)
@@ -304,17 +330,6 @@ static int rmi_i2c_probe(struct i2c_client *client,
return 0;
}
-static int rmi_i2c_remove(struct i2c_client *client)
-{
- struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
-
- rmi_unregister_transport_device(&rmi_i2c->xport);
- regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
- rmi_i2c->supplies);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int rmi_i2c_suspend(struct device *dev)
{
@@ -431,7 +446,6 @@ static struct i2c_driver rmi_i2c_driver = {
},
.id_table = rmi_id,
.probe = rmi_i2c_probe,
- .remove = rmi_i2c_remove,
};
module_i2c_driver(rmi_i2c_driver);
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 55bd1b34970c..4ebef607e214 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -396,6 +396,13 @@ static inline int rmi_spi_of_probe(struct spi_device *spi,
}
#endif
+static void rmi_spi_unregister_transport(void *data)
+{
+ struct rmi_spi_xport *rmi_spi = data;
+
+ rmi_unregister_transport_device(&rmi_spi->xport);
+}
+
static int rmi_spi_probe(struct spi_device *spi)
{
struct rmi_spi_xport *rmi_spi;
@@ -464,6 +471,11 @@ static int rmi_spi_probe(struct spi_device *spi)
dev_err(&spi->dev, "failed to register transport.\n");
return retval;
}
+ retval = devm_add_action_or_reset(&spi->dev,
+ rmi_spi_unregister_transport,
+ rmi_spi);
+ if (retval)
+ return retval;
retval = rmi_spi_init_irq(spi);
if (retval < 0)
@@ -473,15 +485,6 @@ static int rmi_spi_probe(struct spi_device *spi)
return 0;
}
-static int rmi_spi_remove(struct spi_device *spi)
-{
- struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
-
- rmi_unregister_transport_device(&rmi_spi->xport);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int rmi_spi_suspend(struct device *dev)
{
@@ -577,7 +580,6 @@ static struct spi_driver rmi_spi_driver = {
},
.id_table = rmi_id,
.probe = rmi_spi_probe,
- .remove = rmi_spi_remove,
};
module_spi_driver(rmi_spi_driver);
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index a5eed2ade53d..34da81c006b6 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -81,7 +81,7 @@ static inline int i8042_platform_init(void)
return -EBUSY;
#endif
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h
index ee1ad27d6ed0..08a1c10a1448 100644
--- a/drivers/input/serio/i8042-ip22io.h
+++ b/drivers/input/serio/i8042-ip22io.h
@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
return -EBUSY;
#endif
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h
index f708c75d16f1..1aabea43329e 100644
--- a/drivers/input/serio/i8042-ppcio.h
+++ b/drivers/input/serio/i8042-ppcio.h
@@ -44,7 +44,7 @@ static inline void i8042_write_command(int val)
static inline int i8042_platform_init(void)
{
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index afcd1c1a05b2..6231d63860ee 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -130,7 +130,7 @@ static int __init i8042_platform_init(void)
}
}
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
index 73f5cc124a36..455747552f85 100644
--- a/drivers/input/serio/i8042-unicore32io.h
+++ b/drivers/input/serio/i8042-unicore32io.h
@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
return -EBUSY;
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 68f5f4a0f1e7..f4bfb4b2d50a 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -510,6 +510,90 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
{ }
};
+/*
+ * On some Asus laptops, just running self tests cause problems.
+ */
+static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R409L"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
+ },
+ },
+ { }
+};
static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
{
/* MSI Wind U-100 */
@@ -1072,12 +1156,18 @@ static int __init i8042_platform_init(void)
return retval;
#if defined(__ia64__)
- i8042_reset = true;
+ i8042_reset = I8042_RESET_ALWAYS;
#endif
#ifdef CONFIG_X86
- if (dmi_check_system(i8042_dmi_reset_table))
- i8042_reset = true;
+ /* Honor module parameter when value is not default */
+ if (i8042_reset == I8042_RESET_DEFAULT) {
+ if (dmi_check_system(i8042_dmi_reset_table))
+ i8042_reset = I8042_RESET_ALWAYS;
+
+ if (dmi_check_system(i8042_dmi_noselftest_table))
+ i8042_reset = I8042_RESET_NEVER;
+ }
if (dmi_check_system(i8042_dmi_noloop_table))
i8042_noloop = true;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 405252a884dd..89abfdb539ac 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -48,9 +48,39 @@ static bool i8042_unlock;
module_param_named(unlock, i8042_unlock, bool, 0);
MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
-static bool i8042_reset;
-module_param_named(reset, i8042_reset, bool, 0);
-MODULE_PARM_DESC(reset, "Reset controller during init and cleanup.");
+enum i8042_controller_reset_mode {
+ I8042_RESET_NEVER,
+ I8042_RESET_ALWAYS,
+ I8042_RESET_ON_S2RAM,
+#define I8042_RESET_DEFAULT I8042_RESET_ON_S2RAM
+};
+static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT;
+static int i8042_set_reset(const char *val, const struct kernel_param *kp)
+{
+ enum i8042_controller_reset_mode *arg = kp->arg;
+ int error;
+ bool reset;
+
+ if (val) {
+ error = kstrtobool(val, &reset);
+ if (error)
+ return error;
+ } else {
+ reset = true;
+ }
+
+ *arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER;
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_reset_param = {
+ .flags = KERNEL_PARAM_OPS_FL_NOARG,
+ .set = i8042_set_reset,
+};
+#define param_check_reset_param(name, p) \
+ __param_check(name, p, enum i8042_controller_reset_mode)
+module_param_named(reset, i8042_reset, reset_param, 0);
+MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both");
static bool i8042_direct;
module_param_named(direct, i8042_direct, bool, 0);
@@ -1019,7 +1049,7 @@ static int i8042_controller_init(void)
* Reset the controller and reset CRT to the original value set by BIOS.
*/
-static void i8042_controller_reset(bool force_reset)
+static void i8042_controller_reset(bool s2r_wants_reset)
{
i8042_flush();
@@ -1044,8 +1074,10 @@ static void i8042_controller_reset(bool force_reset)
* Reset the controller if requested.
*/
- if (i8042_reset || force_reset)
+ if (i8042_reset == I8042_RESET_ALWAYS ||
+ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
i8042_controller_selftest();
+ }
/*
* Restore the original control register setting.
@@ -1110,7 +1142,7 @@ static void i8042_dritek_enable(void)
* before suspending.
*/
-static int i8042_controller_resume(bool force_reset)
+static int i8042_controller_resume(bool s2r_wants_reset)
{
int error;
@@ -1118,7 +1150,8 @@ static int i8042_controller_resume(bool force_reset)
if (error)
return error;
- if (i8042_reset || force_reset) {
+ if (i8042_reset == I8042_RESET_ALWAYS ||
+ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
error = i8042_controller_selftest();
if (error)
return error;
@@ -1195,7 +1228,7 @@ static int i8042_pm_resume_noirq(struct device *dev)
static int i8042_pm_resume(struct device *dev)
{
- bool force_reset;
+ bool want_reset;
int i;
for (i = 0; i < I8042_NUM_PORTS; i++) {
@@ -1218,9 +1251,9 @@ static int i8042_pm_resume(struct device *dev)
* off control to the platform firmware, otherwise we can simply restore
* the mode.
*/
- force_reset = pm_resume_via_firmware();
+ want_reset = pm_resume_via_firmware();
- return i8042_controller_resume(force_reset);
+ return i8042_controller_resume(want_reset);
}
static int i8042_pm_thaw(struct device *dev)
@@ -1482,7 +1515,7 @@ static int __init i8042_probe(struct platform_device *dev)
i8042_platform_device = dev;
- if (i8042_reset) {
+ if (i8042_reset == I8042_RESET_ALWAYS) {
error = i8042_controller_selftest();
if (error)
return error;
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index fb5fb9140ca9..552a3773f79d 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -157,6 +157,7 @@ struct mip4_ts {
char phys[32];
char product_name[16];
+ char ic_name[4];
unsigned int max_x;
unsigned int max_y;
@@ -263,6 +264,18 @@ static int mip4_query_device(struct mip4_ts *ts)
dev_dbg(&ts->client->dev, "product name: %.*s\n",
(int)sizeof(ts->product_name), ts->product_name);
+ /* IC name */
+ cmd[0] = MIP4_R0_INFO;
+ cmd[1] = MIP4_R1_INFO_IC_NAME;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd),
+ ts->ic_name, sizeof(ts->ic_name));
+ if (error)
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve IC name: %d\n", error);
+ else
+ dev_dbg(&ts->client->dev, "IC name: %.*s\n",
+ (int)sizeof(ts->ic_name), ts->ic_name);
+
/* Firmware version */
error = mip4_get_fw_version(ts);
if (error)
@@ -1326,7 +1339,7 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
* paired with current firmware in the chip.
*/
count = snprintf(buf, PAGE_SIZE, "%.*s\n",
- (int)sizeof(ts->product_name), ts->product_name);
+ (int)sizeof(ts->product_name), ts->product_name);
mutex_unlock(&ts->input->mutex);
@@ -1335,9 +1348,30 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL);
+static ssize_t mip4_sysfs_read_ic_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ size_t count;
+
+ mutex_lock(&ts->input->mutex);
+
+ count = snprintf(buf, PAGE_SIZE, "%.*s\n",
+ (int)sizeof(ts->ic_name), ts->ic_name);
+
+ mutex_unlock(&ts->input->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(ic_name, S_IRUGO, mip4_sysfs_read_ic_name, NULL);
+
static struct attribute *mip4_attrs[] = {
&dev_attr_fw_version.attr,
&dev_attr_hw_version.attr,
+ &dev_attr_ic_name.attr,
&dev_attr_update_fw.attr,
NULL,
};
@@ -1538,6 +1572,6 @@ static struct i2c_driver mip4_driver = {
module_i2c_driver(mip4_driver);
MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen");
-MODULE_VERSION("2016.03.12");
+MODULE_VERSION("2016.09.28");
MODULE_AUTHOR("Sangwon Jee <jeesw@melfas.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index c0e7b624ce54..12102448fddd 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -178,7 +178,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
idev->id_vendor, idev->id_device);
}
-ipack_device_attr(id_format, "0x%hhu\n");
+ipack_device_attr(id_format, "0x%hhx\n");
static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(id_device);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 82b0b5daf3f5..bc0af3307bbf 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -158,8 +158,8 @@ config PIC32_EVIC
select IRQ_DOMAIN
config JCORE_AIC
- bool "J-Core integrated AIC"
- depends on OF && (SUPERH || COMPILE_TEST)
+ bool "J-Core integrated AIC" if COMPILE_TEST
+ depends on OF
select IRQ_DOMAIN
help
Support for the J-Core integrated AIC.
diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c
index efbf0e4304b7..2a7a38830a8d 100644
--- a/drivers/irqchip/irq-eznps.c
+++ b/drivers/irqchip/irq-eznps.c
@@ -85,7 +85,7 @@ static void nps400_irq_eoi_global(struct irq_data *irqd)
nps_ack_gic();
}
-static void nps400_irq_eoi(struct irq_data *irqd)
+static void nps400_irq_ack(struct irq_data *irqd)
{
unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
@@ -103,7 +103,7 @@ static struct irq_chip nps400_irq_chip_percpu = {
.name = "NPS400 IC",
.irq_mask = nps400_irq_mask,
.irq_unmask = nps400_irq_unmask,
- .irq_eoi = nps400_irq_eoi,
+ .irq_ack = nps400_irq_ack,
};
static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
@@ -135,7 +135,7 @@ static const struct irq_domain_ops nps400_irq_ops = {
static int __init nps400_of_init(struct device_node *node,
struct device_node *parent)
{
- static struct irq_domain *nps400_root_domain;
+ struct irq_domain *nps400_root_domain;
if (parent) {
pr_err("DeviceTree incore ic not a root irq controller\n");
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 003495d91f9c..c5dee300e8a3 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1023,7 +1023,7 @@ static void its_free_tables(struct its_node *its)
static int its_alloc_tables(struct its_node *its)
{
- u64 typer = readq_relaxed(its->base + GITS_TYPER);
+ u64 typer = gic_read_typer(its->base + GITS_TYPER);
u32 ids = GITS_TYPER_DEVBITS(typer);
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_WaWb;
@@ -1198,7 +1198,7 @@ static void its_cpu_init_collection(void)
* We now have to bind each collection to its target
* redistributor.
*/
- if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
+ if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
/*
* This ITS wants the physical address of the
* redistributor.
@@ -1208,7 +1208,7 @@ static void its_cpu_init_collection(void)
/*
* This ITS wants a linear CPU number.
*/
- target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
+ target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
target = GICR_TYPER_CPU_NUMBER(target) << 16;
}
@@ -1691,7 +1691,7 @@ static int __init its_probe_one(struct resource *res,
INIT_LIST_HEAD(&its->its_device_list);
its->base = its_base;
its->phys_base = res->start;
- its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+ its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
its->numa_node = numa_node;
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
@@ -1763,7 +1763,7 @@ out_unmap:
static bool gic_rdists_supports_plpis(void)
{
- return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
+ return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
}
int its_cpu_init(void)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9b81bd8b929c..19d642eae096 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -153,7 +153,7 @@ static void gic_enable_redist(bool enable)
return; /* No PM support in this redistributor */
}
- while (count--) {
+ while (--count) {
val = readl_relaxed(rbase + GICR_WAKER);
if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
break;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 58e5b4e87056..d6c404b3584d 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1279,7 +1279,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
*/
*base += 0xf000;
cpuif_res.start += 0xf000;
- pr_warn("GIC: Adjusting CPU interface base to %pa",
+ pr_warn("GIC: Adjusting CPU interface base to %pa\n",
&cpuif_res.start);
}
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 6b304eb39bd2..1aec12c6d9ac 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -38,6 +38,7 @@ static void disable_8259A_irq(struct irq_data *d);
static void enable_8259A_irq(struct irq_data *d);
static void mask_and_ack_8259A(struct irq_data *d);
static void init_8259A(int auto_eoi);
+static int (*i8259_poll)(void) = i8259_irq;
static struct irq_chip i8259A_chip = {
.name = "XT-PIC",
@@ -51,6 +52,11 @@ static struct irq_chip i8259A_chip = {
* 8259A PIC functions to handle ISA devices:
*/
+void i8259_set_poll(int (*poll)(void))
+{
+ i8259_poll = poll;
+}
+
/*
* This contains the irq mask for both 8259A irq controllers,
*/
@@ -89,24 +95,6 @@ static void enable_8259A_irq(struct irq_data *d)
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
-int i8259A_irq_pending(unsigned int irq)
-{
- unsigned int mask;
- unsigned long flags;
- int ret;
-
- irq -= I8259A_IRQ_BASE;
- mask = 1 << irq;
- raw_spin_lock_irqsave(&i8259A_lock, flags);
- if (irq < 8)
- ret = inb(PIC_MASTER_CMD) & mask;
- else
- ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-
- return ret;
-}
-
void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
@@ -355,7 +343,7 @@ void __init init_i8259_irqs(void)
static void i8259_irq_dispatch(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
- int hwirq = i8259_irq();
+ int hwirq = i8259_poll();
unsigned int irq;
if (hwirq < 0)
@@ -370,13 +358,15 @@ int __init i8259_of_init(struct device_node *node, struct device_node *parent)
struct irq_domain *domain;
unsigned int parent_irq;
+ domain = __init_i8259_irqs(node);
+
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
pr_err("Failed to map i8259 parent IRQ\n");
+ irq_domain_remove(domain);
return -ENODEV;
}
- domain = __init_i8259_irqs(node);
irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch,
domain);
return 0;
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 84b01dec277d..033bccb41455 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -25,12 +25,30 @@
static struct irq_chip jcore_aic;
+/*
+ * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do
+ * not distinguish or use distinct irq number ranges for per-cpu event
+ * interrupts (timer, IPI). Since information to determine whether a
+ * particular irq number should be treated as per-cpu is not available
+ * at mapping time, we use a wrapper handler function which chooses
+ * the right handler at runtime based on whether IRQF_PERCPU was used
+ * when requesting the irq.
+ */
+
+static void handle_jcore_irq(struct irq_desc *desc)
+{
+ if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
+ handle_percpu_irq(desc);
+ else
+ handle_simple_irq(desc);
+}
+
static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct irq_chip *aic = d->host_data;
- irq_set_chip_and_handler(irq, aic, handle_simple_irq);
+ irq_set_chip_and_handler(irq, aic, handle_jcore_irq);
return 0;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8abde6b8cedc..6d53810963f7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -266,7 +266,7 @@ static struct raid_type {
{"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
{"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
{"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
- {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
+ {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
{"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
{"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
{"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
@@ -2087,11 +2087,11 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
/*
* No takeover/reshaping, because we don't have the extended v1.9.0 metadata
*/
- if (le32_to_cpu(sb->level) != mddev->level) {
+ if (le32_to_cpu(sb->level) != mddev->new_level) {
DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
return -EINVAL;
}
- if (le32_to_cpu(sb->layout) != mddev->layout) {
+ if (le32_to_cpu(sb->layout) != mddev->new_layout) {
DMERR("Reshaping raid sets not yet supported. (raid layout change)");
DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
DMERR(" Old layout: %s w/ %d copies",
@@ -2102,7 +2102,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
raid10_md_layout_to_copies(mddev->layout));
return -EINVAL;
}
- if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+ if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
return -EINVAL;
}
@@ -2115,6 +2115,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
return -EINVAL;
}
+ DMINFO("Discovered old metadata format; upgrading to extended metadata format");
+
/* Table line is checked vs. authoritative superblock */
rs_set_new(rs);
}
@@ -2258,7 +2260,8 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
if (!mddev->events && super_init_validation(rs, rdev))
return -EINVAL;
- if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
+ if (le32_to_cpu(sb->compat_features) &&
+ le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
return -EINVAL;
}
@@ -3646,7 +3649,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 9, 0},
+ .version = {1, 9, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index bdf1606f67bc..9a8b71067c6e 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,7 +145,6 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record {
struct mirror *m;
- /* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
region_t write_region;
};
@@ -1200,8 +1199,6 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
- bio_record->details.bi_bdev = NULL;
-
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1260,22 +1257,12 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
}
if (error == -EOPNOTSUPP)
- goto out;
+ return error;
if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
- goto out;
+ return error;
if (unlikely(error)) {
- if (!bio_record->details.bi_bdev) {
- /*
- * There wasn't enough memory to record necessary
- * information for a retry or there was no other
- * mirror in-sync.
- */
- DMERR_LIMIT("Mirror read failed.");
- return -EIO;
- }
-
m = bio_record->m;
DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1291,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
bd = &bio_record->details;
dm_bio_restore(bd, bio);
- bio_record->details.bi_bdev = NULL;
+ bio->bi_error = 0;
queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE;
@@ -1299,9 +1286,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
DMERR("All replicated volumes dead, failing I/O");
}
-out:
- bio_record->details.bi_bdev = NULL;
-
return error;
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index dc75bea0d541..1d0d2adc050a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -856,8 +856,11 @@ int dm_old_init_request_queue(struct mapped_device *md)
kthread_init_worker(&md->kworker);
md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
"kdmwork-%s", dm_device_name(md));
- if (IS_ERR(md->kworker_task))
- return PTR_ERR(md->kworker_task);
+ if (IS_ERR(md->kworker_task)) {
+ int error = PTR_ERR(md->kworker_task);
+ md->kworker_task = NULL;
+ return error;
+ }
elv_register_queue(md->queue);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3e407a9cde1f..c4b53b332607 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -695,37 +695,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
tgt->type = dm_get_target_type(type);
if (!tgt->type) {
- DMERR("%s: %s: unknown target type", dm_device_name(t->md),
- type);
+ DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
return -EINVAL;
}
if (dm_target_needs_singleton(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: target type %s must appear alone in table",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "singleton target type must appear alone in table";
+ goto bad;
}
t->singleton = true;
}
if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
- DMERR("%s: target type %s may not be included in read-only tables",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "target type may not be included in a read-only table";
+ goto bad;
}
if (t->immutable_target_type) {
if (t->immutable_target_type != tgt->type) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), t->immutable_target_type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
} else if (dm_target_is_immutable(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), tgt->type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
t->immutable_target_type = tgt->type;
}
@@ -740,7 +735,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
*/
if (!adjoin(t, tgt)) {
tgt->error = "Gap in table";
- r = -EINVAL;
goto bad;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 147af9536d0c..ef7bf1dd6900 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1423,8 +1423,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->bs)
bioset_free(md->bs);
- cleanup_srcu_struct(&md->io_barrier);
-
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
@@ -1436,6 +1434,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->queue)
blk_cleanup_queue(md->queue);
+ cleanup_srcu_struct(&md->io_barrier);
+
if (md->bdev) {
bdput(md->bdev);
md->bdev = NULL;
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 4769469fe842..2c9232ef7baa 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
}
/* Get user pages for DMA Xfer */
- err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0,
- 1, dma->map);
+ err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
+ dma->map, FOLL_FORCE);
if (user_dma.page_count != err) {
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index b094054cda6e..f7299d3d8244 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -76,11 +76,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
/* Get user pages for DMA Xfer */
y_pages = get_user_pages_unlocked(y_dma.uaddr,
- y_dma.page_count, 0, 1, &dma->map[0]);
+ y_dma.page_count, &dma->map[0], FOLL_FORCE);
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
if (y_pages == y_dma.page_count) {
uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
- uv_dma.page_count, 0, 1, &dma->map[y_pages]);
+ uv_dma.page_count, &dma->map[y_pages],
+ FOLL_FORCE);
}
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index e668dde6d857..a31b95cb3b09 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
if (!vec)
return -ENOMEM;
- ret = get_vaddr_frames(virtp, 1, true, false, vec);
+ ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
if (ret != 1) {
frame_vector_destroy(vec);
return -EINVAL;
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 29b3436d0910..367523a3c774 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -27,7 +27,7 @@ config VIDEO_FIXED_MINOR_RANGES
config VIDEO_PCI_SKELETON
tristate "Skeleton PCI V4L2 driver"
- depends on PCI && BUILD_DOCSRC
+ depends on PCI
depends on VIDEO_V4L2 && VIDEOBUF2_CORE
depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG
---help---
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index f300f060b3f3..1db0af6c7f94 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
{
unsigned long first, last;
int err, rw = 0;
+ unsigned int flags = FOLL_FORCE;
dma->direction = direction;
switch (dma->direction) {
@@ -178,12 +179,14 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
if (NULL == dma->pages)
return -ENOMEM;
+ if (rw == READ)
+ flags |= FOLL_WRITE;
+
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
- rw == READ, 1, /* force */
- dma->pages, NULL);
+ flags, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 3c3b517f1d1c..1cd322e939c7 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
unsigned long first, last;
unsigned long nr;
struct frame_vector *vec;
+ unsigned int flags = FOLL_FORCE;
+
+ if (write)
+ flags |= FOLL_WRITE;
first = start >> PAGE_SHIFT;
last = (start + length - 1) >> PAGE_SHIFT;
@@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index d34bc3530385..2e3cf012ef48 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
int rc;
if (!host->req) {
+ pm_runtime_get_sync(ms_dev(host));
do {
rc = memstick_next_req(msh, &host->req);
dev_dbg(ms_dev(host), "next req %d\n", rc);
@@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
host->req->error);
}
} while (!rc);
+ pm_runtime_put(ms_dev(host));
}
}
@@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
__func__, param, value);
+ pm_runtime_get_sync(ms_dev(host));
mutex_lock(&ucr->dev_mutex);
err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
@@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
}
out:
mutex_unlock(&ucr->dev_mutex);
+ pm_runtime_put(ms_dev(host));
/* power-on delay */
if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
@@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
int err;
for (;;) {
+ pm_runtime_get_sync(ms_dev(host));
mutex_lock(&ucr->dev_mutex);
/* Check pending MS card changes */
@@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
}
poll_again:
+ pm_runtime_put(ms_dev(host));
if (host->eject)
break;
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index f3d34b941f85..2e5233b60971 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -229,6 +229,14 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if (ctx->status == STARTED)
goto out; /* already started */
+ /*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc)
+ goto out;
+
if (task) {
ctx->pid = get_task_pid(task, PIDTYPE_PID);
ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
@@ -239,7 +247,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
cxl_ctx_get();
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
+ put_pid(ctx->glpid);
put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
+ cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
goto out;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index c466ee2b0c97..5e506c19108a 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -238,6 +238,9 @@ int __detach_context(struct cxl_context *ctx)
put_pid(ctx->glpid);
cxl_ctx_put();
+
+ /* Decrease the attached context count on the adapter */
+ cxl_adapter_context_put(ctx->afu->adapter);
return 0;
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 01d372aba131..a144073593fa 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -618,6 +618,14 @@ struct cxl {
bool perst_select_user;
bool perst_same_image;
bool psl_timebase_synced;
+
+ /*
+ * number of contexts mapped on to this card. Possible values are:
+ * >0: Number of contexts mapped and new one can be mapped.
+ * 0: No active contexts and new ones can be mapped.
+ * -1: No contexts mapped and new ones cannot be mapped.
+ */
+ atomic_t contexts_num;
};
int cxl_pci_alloc_one_irq(struct cxl *adapter);
@@ -944,4 +952,20 @@ bool cxl_pci_is_vphb_device(struct pci_dev *dev);
/* decode AFU error bits in the PSL register PSL_SERR_An */
void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
+
+/*
+ * Increments the number of attached contexts on an adapter.
+ * In case an adapter_context_lock is taken the return -EBUSY.
+ */
+int cxl_adapter_context_get(struct cxl *adapter);
+
+/* Decrements the number of attached contexts on an adapter */
+void cxl_adapter_context_put(struct cxl *adapter);
+
+/* If no active contexts then prevents contexts from being attached */
+int cxl_adapter_context_lock(struct cxl *adapter);
+
+/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */
+void cxl_adapter_context_unlock(struct cxl *adapter);
+
#endif
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 5fb9894b157f..77080cc5fa0a 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -194,6 +194,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
/*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc) {
+ afu_release_irqs(ctx, ctx);
+ goto out;
+ }
+
+ /*
* We grab the PID here and not in the file open to allow for the case
* where a process (master, some daemon, etc) has opened the chardev on
* behalf of another process, so the AFU's mm gets bound to the process
@@ -205,11 +215,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
amr))) {
afu_release_irqs(ctx, ctx);
+ cxl_adapter_context_put(ctx->afu->adapter);
+ put_pid(ctx->glpid);
+ put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
goto out;
}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 9aa58a77a24d..3e102cd6ed91 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1152,6 +1152,9 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
if ((rc = cxl_sysfs_adapter_add(adapter)))
goto err_put1;
+ /* release the context lock as the adapter is configured */
+ cxl_adapter_context_unlock(adapter);
+
return adapter;
err_put1:
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index d9be23b24aa3..62e0dfb5f15b 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -243,8 +243,10 @@ struct cxl *cxl_alloc_adapter(void)
if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
goto err2;
- return adapter;
+ /* start with context lock taken */
+ atomic_set(&adapter->contexts_num, -1);
+ return adapter;
err2:
cxl_remove_adapter_nr(adapter);
err1:
@@ -286,6 +288,44 @@ int cxl_afu_select_best_mode(struct cxl_afu *afu)
return 0;
}
+int cxl_adapter_context_get(struct cxl *adapter)
+{
+ int rc;
+
+ rc = atomic_inc_unless_negative(&adapter->contexts_num);
+ return rc >= 0 ? 0 : -EBUSY;
+}
+
+void cxl_adapter_context_put(struct cxl *adapter)
+{
+ atomic_dec_if_positive(&adapter->contexts_num);
+}
+
+int cxl_adapter_context_lock(struct cxl *adapter)
+{
+ int rc;
+ /* no active contexts -> contexts_num == 0 */
+ rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1);
+ return rc ? -EBUSY : 0;
+}
+
+void cxl_adapter_context_unlock(struct cxl *adapter)
+{
+ int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0);
+
+ /*
+ * contexts lock taken -> contexts_num == -1
+ * If not true then show a warning and force reset the lock.
+ * This will happen when context_unlock was requested without
+ * doing a context_lock.
+ */
+ if (val != -1) {
+ atomic_set(&adapter->contexts_num, 0);
+ WARN(1, "Adapter context unlocked with %d active contexts",
+ val);
+ }
+}
+
static int __init init_cxl(void)
{
int rc = 0;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 7afad8477ad5..e96be9ca4e60 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1487,6 +1487,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = cxl_native_register_psl_err_irq(adapter)))
goto err;
+ /* Release the context lock as adapter is configured */
+ cxl_adapter_context_unlock(adapter);
return 0;
err:
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index b043c20f158f..a8b6d6a635e9 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -75,12 +75,31 @@ static ssize_t reset_adapter_store(struct device *device,
int val;
rc = sscanf(buf, "%i", &val);
- if ((rc != 1) || (val != 1))
+ if ((rc != 1) || (val != 1 && val != -1))
return -EINVAL;
- if ((rc = cxl_ops->adapter_reset(adapter)))
- return rc;
- return count;
+ /*
+ * See if we can lock the context mapping that's only allowed
+ * when there are no contexts attached to the adapter. Once
+ * taken this will also prevent any context from getting activated.
+ */
+ if (val == 1) {
+ rc = cxl_adapter_context_lock(adapter);
+ if (rc)
+ goto out;
+
+ rc = cxl_ops->adapter_reset(adapter);
+ /* In case reset failed release context lock */
+ if (rc)
+ cxl_adapter_context_unlock(adapter);
+
+ } else if (val == -1) {
+ /* Perform a forced adapter reset */
+ rc = cxl_ops->adapter_reset(adapter);
+ }
+
+out:
+ return rc ? rc : count;
}
static ssize_t load_image_on_perst_show(struct device *device,
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 8a679ecc8fd1..fc2794b513fa 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) {
rc = -EFAULT;
- goto err_out1;
+ goto err_out2;
}
}
return 0;
+ err_out2:
+ __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
+ sgl->lpage_dma_addr);
+ sgl->lpage = NULL;
+ sgl->lpage_dma_addr = 0;
err_out1:
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
+ sgl->fpage = NULL;
+ sgl->fpage_dma_addr = 0;
err_out:
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
sgl->sgl_dma_addr);
+ sgl->sgl = NULL;
+ sgl->sgl_dma_addr = 0;
+ sgl->sgl_size = 0;
return -ENOMEM;
}
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index e6e5e55a12ed..60415a2bfcbd 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -981,11 +981,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
hisr = mei_txe_br_reg_read(hw, HISR_REG);
aliveness = mei_txe_aliveness_get(dev);
- if (hhisr & IPC_HHIER_SEC && aliveness)
+ if (hhisr & IPC_HHIER_SEC && aliveness) {
ipc_isr = mei_txe_sec_reg_read_silent(hw,
SEC_IPC_HOST_INT_STATUS_REG);
- else
+ } else {
ipc_isr = 0;
+ hhisr &= ~IPC_HHIER_SEC;
+ }
generated = generated ||
(hisr & HISR_INT_STS_MSK) ||
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index e0203b1a20fd..f806a4471eb9 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1396,8 +1396,7 @@ retry:
pinned_pages->nr_pages = get_user_pages(
(u64)addr,
nr_pages,
- !!(prot & SCIF_PROT_WRITE),
- 0,
+ (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
pinned_pages->pages,
NULL);
up_write(&mm->mmap_sem);
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index a2d97b9b17e3..6fb773dbcd0c 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
#else
*pageshift = PAGE_SHIFT;
#endif
- if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
+ if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
return -EFAULT;
*paddr = page_to_phys(page);
put_page(page);
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 1525870f460a..33741ad4a74a 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -283,7 +283,7 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
spin_lock(&gru->gs_asid_lock);
BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
asids->mt_ctxbitmap ^= ctxbitmap;
- gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
+ gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n",
gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
spin_unlock(&gru->gs_asid_lock);
spin_unlock(&gms->ms_asid_lock);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index a8cee33ae8d2..b3fa738ae005 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -431,6 +431,12 @@ int vmci_doorbell_create(struct vmci_handle *handle,
if (vmci_handle_is_invalid(*handle)) {
u32 context_id = vmci_get_context_id();
+ if (context_id == VMCI_INVALID_ID) {
+ pr_warn("Failed to get context ID\n");
+ result = VMCI_ERROR_NO_RESOURCES;
+ goto free_mem;
+ }
+
/* Let resource code allocate a free ID for us */
new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
} else {
@@ -525,7 +531,7 @@ int vmci_doorbell_destroy(struct vmci_handle handle)
entry = container_of(resource, struct dbell_entry, resource);
- if (vmci_guest_code_active()) {
+ if (!hlist_unhashed(&entry->node)) {
int result;
dbell_index_table_remove(entry);
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 896be150e28f..d7eaf1eb11e7 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.4.0-k");
+MODULE_VERSION("1.1.5.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c3335112e68c..709a872ed484 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -46,6 +46,7 @@
#include <asm/uaccess.h>
#include "queue.h"
+#include "block.h"
MODULE_ALIAS("mmc:block");
#ifdef MODULE_PARAM_PREFIX
@@ -1786,7 +1787,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->data;
struct mmc_packed *packed = mqrq->packed;
bool do_rel_wr, do_data_tag;
- u32 *packed_cmd_hdr;
+ __le32 *packed_cmd_hdr;
u8 hdr_blocks;
u8 i = 1;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 3c15a75bae86..342f1e3f301e 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -31,7 +31,7 @@ enum mmc_packed_type {
struct mmc_packed {
struct list_head list;
- u32 cmd_hdr[1024];
+ __le32 cmd_hdr[1024];
unsigned int blocks;
u8 nr_entries;
u8 retries;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 3486bc7fbb64..39fc5b2b96c5 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1263,6 +1263,16 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ goto out_err;
+
err = mmc_select_bus_width(card);
if (err < 0)
goto out_err;
@@ -1272,6 +1282,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
if (err)
goto out_err;
+ mmc_set_clock(host, card->ext_csd.hs_max_dtr);
+
err = mmc_switch_status(card);
if (err)
goto out_err;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 4106295527b9..6e9c0f8fddb1 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
dev_dbg(sdmmc_dev(host), "%s\n", __func__);
mutex_lock(&ucr->dev_mutex);
- if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
- mutex_unlock(&ucr->dev_mutex);
- return;
- }
-
sd_set_power_mode(host, ios->power_mode);
sd_set_bus_width(host, ios->bus_width);
sd_set_timing(host, ios->timing, &host->ddr_mode);
@@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
container_of(work, struct rtsx_usb_sdmmc, led_work);
struct rtsx_ucr *ucr = host->ucr;
+ pm_runtime_get_sync(sdmmc_dev(host));
mutex_lock(&ucr->dev_mutex);
if (host->led.brightness == LED_OFF)
@@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
rtsx_usb_turn_on_led(ucr);
mutex_unlock(&ucr->dev_mutex);
+ pm_runtime_put(sdmmc_dev(host));
}
#endif
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1f54fd8755c8..7123ef96ed18 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -346,7 +346,8 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 data;
- if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE ||
+ reg == SDHCI_INT_STATUS)) {
if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) {
/*
* Clear and then set D3CD bit to avoid missing the
@@ -555,6 +556,25 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
esdhc_clrset_le(host, 0xffff, val, reg);
}
+static u8 esdhc_readb_le(struct sdhci_host *host, int reg)
+{
+ u8 ret;
+ u32 val;
+
+ switch (reg) {
+ case SDHCI_HOST_CONTROL:
+ val = readl(host->ioaddr + reg);
+
+ ret = val & SDHCI_CTRL_LED;
+ ret |= (val >> 5) & SDHCI_CTRL_DMA_MASK;
+ ret |= (val & ESDHC_CTRL_4BITBUS);
+ ret |= (val & ESDHC_CTRL_8BITBUS) << 3;
+ return ret;
+ }
+
+ return readb(host->ioaddr + reg);
+}
+
static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -947,6 +967,7 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
+ .read_b = esdhc_readb_le,
.write_l = esdhc_writel_le,
.write_w = esdhc_writew_le,
.write_b = esdhc_writeb_le,
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index da8e40af6f85..410a55b1c25f 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -250,7 +250,7 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc,
writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
}
-void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
+static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
{
u8 ctrl;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -265,6 +265,28 @@ void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
}
}
+static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_180:
+ /*
+ * Plese don't switch to 1V8 as arasan,5.1 doesn't
+ * actually refer to this setting to indicate the
+ * signal voltage and the state machine will be broken
+ * actually if we force to enable 1V8. That's something
+ * like broken quirk but we could work around here.
+ */
+ return 0;
+ case MMC_SIGNAL_VOLTAGE_330:
+ case MMC_SIGNAL_VOLTAGE_120:
+ /* We don't support 3V3 and 1V2 */
+ break;
+ }
+
+ return -EINVAL;
+}
+
static struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -661,6 +683,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_arasan_hs400_enhanced_strobe;
+ host->mmc_host_ops.start_signal_voltage_switch =
+ sdhci_arasan_voltage_switch;
}
ret = sdhci_add_host(host);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 72a1f1f5180a..1d9e00a00e9f 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -32,6 +32,14 @@
#include "sdhci-pci.h"
#include "sdhci-pci-o2micro.h"
+static int sdhci_pci_enable_dma(struct sdhci_host *host);
+static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width);
+static void sdhci_pci_hw_reset(struct sdhci_host *host);
+static int sdhci_pci_select_drive_strength(struct sdhci_host *host,
+ struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -390,6 +398,45 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
+#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
+#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
+
+static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ int cntr;
+ u8 reg;
+
+ sdhci_set_power(host, mode, vdd);
+
+ if (mode == MMC_POWER_OFF)
+ return;
+
+ /*
+ * Bus power might not enable after D3 -> D0 transition due to the
+ * present state not yet having propagated. Retry for up to 2ms.
+ */
+ for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
+ reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ if (reg & SDHCI_POWER_ON)
+ break;
+ udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
+ reg |= SDHCI_POWER_ON;
+ sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+ }
+}
+
+static const struct sdhci_ops sdhci_intel_byt_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_intel_set_power,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_pci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+ .select_drive_strength = sdhci_pci_select_drive_strength,
+};
+
static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.allow_runtime_pm = true,
.probe_slot = byt_emmc_probe_slot,
@@ -397,6 +444,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
+ .ops = &sdhci_intel_byt_ops,
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
@@ -405,6 +453,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
.probe_slot = byt_sdio_probe_slot,
+ .ops = &sdhci_intel_byt_ops,
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
@@ -415,6 +464,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
.allow_runtime_pm = true,
.own_cd_for_runtime_pm = true,
.probe_slot = byt_sd_probe_slot,
+ .ops = &sdhci_intel_byt_ops,
};
/* Define Host controllers for Intel Merrifield platform */
@@ -1648,7 +1698,9 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
}
host->hw_name = "PCI";
- host->ops = &sdhci_pci_ops;
+ host->ops = chip->fixes && chip->fixes->ops ?
+ chip->fixes->ops :
+ &sdhci_pci_ops;
host->quirks = chip->quirks;
host->quirks2 = chip->quirks2;
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 9c7c08b93223..6bccf56bc5ff 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -65,6 +65,8 @@ struct sdhci_pci_fixes {
int (*suspend) (struct sdhci_pci_chip *);
int (*resume) (struct sdhci_pci_chip *);
+
+ const struct sdhci_ops *ops;
};
struct sdhci_pci_slot {
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index dd1938d341f7..d0f5c05fbc19 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -315,7 +315,7 @@ static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
struct mmc_host *mmc = host->mmc;
u8 pwr = host->pwr;
- sdhci_set_power(host, mode, vdd);
+ sdhci_set_power_noreg(host, mode, vdd);
if (host->pwr == pwr)
return;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 48055666c655..71654b90227f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -687,7 +687,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
* host->clock is in Hz. target_timeout is in us.
* Hence, us = 1000000 * cycles / Hz. Round up.
*/
- val = 1000000 * data->timeout_clks;
+ val = 1000000ULL * data->timeout_clks;
if (do_div(val, host->clock))
target_timeout++;
target_timeout += val;
@@ -1077,6 +1077,10 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
/* Initially, a command has no error */
cmd->error = 0;
+ if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
+ cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd->flags |= MMC_RSP_BUSY;
+
/* Wait max 10 ms */
timeout = 10;
@@ -1390,8 +1394,8 @@ static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
}
-void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
+void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
u8 pwr = 0;
@@ -1455,20 +1459,17 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
mdelay(10);
}
}
-EXPORT_SYMBOL_GPL(sdhci_set_power);
+EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
-static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
- struct mmc_host *mmc = host->mmc;
-
- if (host->ops->set_power)
- host->ops->set_power(host, mode, vdd);
- else if (!IS_ERR(mmc->supply.vmmc))
- sdhci_set_power_reg(host, mode, vdd);
+ if (IS_ERR(host->mmc->supply.vmmc))
+ sdhci_set_power_noreg(host, mode, vdd);
else
- sdhci_set_power(host, mode, vdd);
+ sdhci_set_power_reg(host, mode, vdd);
}
+EXPORT_SYMBOL_GPL(sdhci_set_power);
/*****************************************************************************\
* *
@@ -1609,7 +1610,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
- __sdhci_set_power(host, ios->power_mode, ios->vdd);
+ if (host->ops->set_power)
+ host->ops->set_power(host, ios->power_mode, ios->vdd);
+ else
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -2409,7 +2413,7 @@ static void sdhci_timeout_data_timer(unsigned long data)
* *
\*****************************************************************************/
-static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
+static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
{
if (!host->cmd) {
/*
@@ -2453,11 +2457,6 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
return;
}
- if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
- !(host->cmd->flags & MMC_RSP_BUSY) && !host->data &&
- host->cmd->opcode == MMC_STOP_TRANSMISSION)
- *mask &= ~SDHCI_INT_DATA_END;
-
if (intmask & SDHCI_INT_RESPONSE)
sdhci_finish_command(host);
}
@@ -2680,8 +2679,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
}
if (intmask & SDHCI_INT_CMD_MASK)
- sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
- &intmask);
+ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
if (intmask & SDHCI_INT_DATA_MASK)
sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index c722cd23205c..766df17fb7eb 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -683,6 +683,8 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
+void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 95c4048a371e..388e46be6ad9 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -741,6 +741,7 @@ static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
goto out_put;
}
+ vid_hdr = ubi_get_vid_hdr(vidb);
ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
mutex_lock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index d6384d965788..c1f5c29e458e 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -287,7 +287,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
/* new_aeb is newer */
if (cmp_res & 1) {
- victim = ubi_alloc_aeb(ai, aeb->ec, aeb->pnum);
+ victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
if (!victim)
return -ENOMEM;
@@ -707,11 +707,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
fmvhdr->vol_type,
be32_to_cpu(fmvhdr->last_eb_bytes));
- if (!av)
- goto fail_bad;
- if (PTR_ERR(av) == -EINVAL) {
- ubi_err(ubi, "volume (ID %i) already exists",
- fmvhdr->vol_id);
+ if (IS_ERR(av)) {
+ if (PTR_ERR(av) == -EEXIST)
+ ubi_err(ubi, "volume (ID %i) already exists",
+ fmvhdr->vol_id);
+
goto fail_bad;
}
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 76fb8552c9d9..ef63d24fef81 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -256,6 +256,7 @@ static const struct of_device_id b53_mmap_of_table[] = {
{ .compatible = "brcm,bcm63xx-switch" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
static struct platform_driver b53_mmap_driver = {
.probe = b53_mmap_probe,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e218887f18b7..e3ee27ce13dd 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1133,6 +1133,20 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
return 0;
}
+static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
+{
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+ /* For a kernel about to be kexec'd we want to keep the GPHY on for a
+ * successful MDIO bus scan to occur. If we did turn off the GPHY
+ * before (e.g: port_disable), this will also power it back on.
+ *
+ * Do not rely on kexec_in_progress, just power the PHY on.
+ */
+ if (priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+}
+
#ifdef CONFIG_PM_SLEEP
static int bcm_sf2_suspend(struct device *dev)
{
@@ -1158,10 +1172,12 @@ static const struct of_device_id bcm_sf2_of_match[] = {
{ .compatible = "brcm,bcm7445-switch-v4.0" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
.remove = bcm_sf2_sw_remove,
+ .shutdown = bcm_sf2_sw_shutdown,
.driver = {
.name = "brcm-sf2",
.of_match_table = bcm_sf2_of_match,
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 99c40552ea90..b59aa3541270 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1359,6 +1359,7 @@ static const struct of_device_id nb8800_dt_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
static int nb8800_probe(struct platform_device *pdev)
{
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 7e513cacb57a..5c7acef1de2e 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1126,7 +1126,8 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- phy_disconnect(phydev);
+ if (priv->has_phy)
+ phy_disconnect(phydev);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 856379cbb402..31ca204b38d2 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1449,7 +1449,7 @@ static int bgmac_phy_connect(struct bgmac *bgmac)
phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- dev_err(bgmac->dev, "PHY connecton failed\n");
+ dev_err(bgmac->dev, "PHY connection failed\n");
return PTR_ERR(phy_dev);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2a5df3f71e9f..eab49ff4eecd 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -271,22 +271,25 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
static u32
bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
{
+ unsigned long flags;
u32 val;
- spin_lock_bh(&bp->indirect_lock);
+ spin_lock_irqsave(&bp->indirect_lock, flags);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
return val;
}
static void
bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
{
- spin_lock_bh(&bp->indirect_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->indirect_lock, flags);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
}
static void
@@ -304,8 +307,10 @@ bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
static void
bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
{
+ unsigned long flags;
+
offset += cid_addr;
- spin_lock_bh(&bp->indirect_lock);
+ spin_lock_irqsave(&bp->indirect_lock, flags);
if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
int i;
@@ -322,7 +327,7 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
BNX2_WR(bp, BNX2_CTX_DATA, val);
}
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
}
#ifdef BCM_CNIC
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 67b6180bdbf6..ab990da677d5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15244,7 +15244,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp)
memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
bp->cyclecounter.read = bnx2x_cyclecounter_read;
bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
- bp->cyclecounter.shift = 1;
+ bp->cyclecounter.shift = 0;
bp->cyclecounter.mult = 1;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index b0bb23f95beb..c0cc2ee77be7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4055,7 +4055,7 @@ static void cfg_queues(struct adapter *adap)
* capped by the number of available cores.
*/
if (n10g) {
- i = num_online_cpus();
+ i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
s->ofldqsets = roundup(i, adap->params.nports);
} else {
s->ofldqsets = adap->params.nports;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 0945fa49a5dd..2471ff465d5c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -135,15 +135,17 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
}
static int alloc_uld_rxqs(struct adapter *adap,
- struct sge_uld_rxq_info *rxq_info,
- unsigned int nq, unsigned int offset, bool lro)
+ struct sge_uld_rxq_info *rxq_info, bool lro)
{
struct sge *s = &adap->sge;
- struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
- unsigned short *ids = rxq_info->rspq_id + offset;
- unsigned int per_chan = nq / adap->params.nports;
+ unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+ struct sge_ofld_rxq *q = rxq_info->uldrxq;
+ unsigned short *ids = rxq_info->rspq_id;
unsigned int bmap_idx = 0;
- int i, err, msi_idx;
+ unsigned int per_chan;
+ int i, err, msi_idx, que_idx = 0;
+
+ per_chan = rxq_info->nrxq / adap->params.nports;
if (adap->flags & USING_MSIX)
msi_idx = 1;
@@ -151,12 +153,18 @@ static int alloc_uld_rxqs(struct adapter *adap,
msi_idx = -((int)s->intrq.abs_id + 1);
for (i = 0; i < nq; i++, q++) {
+ if (i == rxq_info->nrxq) {
+ /* start allocation of concentrator queues */
+ per_chan = rxq_info->nciq / adap->params.nports;
+ que_idx = 0;
+ }
+
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
- adap->port[i / per_chan],
+ adap->port[que_idx++ / per_chan],
msi_idx,
q->fl.size ? &q->fl : NULL,
uldrx_handler,
@@ -165,29 +173,19 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (err)
goto freeout;
if (msi_idx >= 0)
- rxq_info->msix_tbl[i + offset] = bmap_idx;
+ rxq_info->msix_tbl[i] = bmap_idx;
memset(&q->stats, 0, sizeof(q->stats));
if (ids)
ids[i] = q->rspq.abs_id;
}
return 0;
freeout:
- q = rxq_info->uldrxq + offset;
+ q = rxq_info->uldrxq;
for ( ; i; i--, q++) {
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
}
-
- /* We need to free rxq also in case of ciq allocation failure */
- if (offset) {
- q = rxq_info->uldrxq + offset;
- for ( ; i; i--, q++) {
- if (q->rspq.desc)
- free_rspq_fl(adap, &q->rspq,
- q->fl.size ? &q->fl : NULL);
- }
- }
return err;
}
@@ -205,9 +203,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
return -ENOMEM;
}
- ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
- !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
- rxq_info->nrxq, lro));
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
if (adap->flags & FULL_INIT_DONE &&
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 539de764bbd3..cbd68a8fe2e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -210,8 +210,10 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
/* Unbind queue from any existing class */
err = t4_sched_queue_unbind(pi, p);
- if (err)
+ if (err) {
+ t4_free_mem(qe);
goto out;
+ }
/* Bind queue to specified class */
memset(qe, 0, sizeof(*qe));
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index e572a527b18d..36bc2c71fba9 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -169,19 +169,28 @@ int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
struct vnic_dev *vdev = rq->vdev;
+ int i;
- iowrite32(0, &rq->ctrl->enable);
+ /* Due to a race condition with clearing RQ "mini-cache" in hw, we need
+ * to disable the RQ twice to guarantee that stale descriptors are not
+ * used when this RQ is re-enabled.
+ */
+ for (i = 0; i < 2; i++) {
+ iowrite32(0, &rq->ctrl->enable);
- /* Wait for HW to ACK disable request */
- for (wait = 0; wait < 1000; wait++) {
- if (!(ioread32(&rq->ctrl->running)))
- return 0;
- udelay(10);
- }
+ /* Wait for HW to ACK disable request */
+ for (wait = 20000; wait > 0; wait--)
+ if (!ioread32(&rq->ctrl->running))
+ break;
+ if (!wait) {
+ vdev_neterr(vdev, "Failed to disable RQ[%d]\n",
+ rq->index);
- vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index);
+ return -ETIMEDOUT;
+ }
+ }
- return -ETIMEDOUT;
+ return 0;
}
void vnic_rq_clean(struct vnic_rq *rq,
@@ -212,6 +221,11 @@ void vnic_rq_clean(struct vnic_rq *rq,
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
iowrite32(fetch_index, &rq->ctrl->posted_index);
+ /* Anytime we write fetch_index, we need to re-write 0 to rq->enable
+ * to re-sync internal VIC state.
+ */
+ iowrite32(0, &rq->ctrl->enable);
+
vnic_dev_clear_desc_ring(&rq->ring);
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f928e6f79c89..223f35cc034c 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -669,6 +669,7 @@ static const struct of_device_id nps_enet_dt_ids[] = {
{ .compatible = "ezchip,nps-mgt-enet" },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
static struct platform_driver nps_enet_driver = {
.probe = nps_enet_probe,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 43b2839a3d11..01aebc0084a9 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1430,14 +1430,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_put(skb, pkt_len - 4);
data = skb->data;
+ if (!is_copybreak && need_swap)
+ swap_buffer(data, pkt_len);
+
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC)
data = skb_pull_inline(skb, 2);
#endif
- if (!is_copybreak && need_swap)
- swap_buffer(data, pkt_len);
-
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
if (fep->bufdesc_ex)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index e28d960997af..2d0cb609adc3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -207,6 +207,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
int ret;
char *mac_addr = (char *)addr;
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ u8 port_num;
assert(mac_cb);
@@ -221,8 +222,11 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
return ret;
}
- ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM,
- mac_addr, true);
+ ret = hns_mac_get_inner_port_num(mac_cb, handle->vf_id, &port_num);
+ if (ret)
+ return ret;
+
+ ret = hns_mac_set_multi(mac_cb, port_num, mac_addr, true);
if (ret)
dev_err(handle->owner_dev,
"mac add mul_mac:%pM port%d fail, ret = %#x!\n",
@@ -678,9 +682,6 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
ret = -EINVAL;
}
- if (!ret)
- hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en);
-
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index d8e99416ab24..55cbb6ce733c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -141,9 +141,10 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
*@port_num:port number
*
*/
-static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
- u8 vmid, u8 *port_num)
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num)
{
+ int q_num_per_vf, vf_num_per_port;
+ int vm_queue_id;
u8 tmp_port;
if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
@@ -174,6 +175,12 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
return -EINVAL;
}
+ q_num_per_vf = mac_cb->dsaf_dev->rcb_common[0]->max_q_per_vf;
+ vf_num_per_port = mac_cb->dsaf_dev->rcb_common[0]->max_vfn;
+
+ vm_queue_id = vmid * q_num_per_vf +
+ vf_num_per_port * q_num_per_vf * mac_cb->mac_id;
+
switch (mac_cb->dsaf_dev->dsaf_mode) {
case DSAF_MODE_ENABLE_FIX:
tmp_port = 0;
@@ -193,7 +200,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
case DSAF_MODE_DISABLE_6PORT_2VM:
case DSAF_MODE_DISABLE_6PORT_4VM:
case DSAF_MODE_DISABLE_6PORT_16VM:
- tmp_port = vmid;
+ tmp_port = vm_queue_id;
break;
default:
dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 4cbdf14f5c16..d3a1f72ece0e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -461,5 +461,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb);
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status);
void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en);
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
+ u8 vmid, u8 *port_num);
#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 8e5b3f51b47b..8ea3d95fa483 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -760,16 +760,6 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
DSAF_CFG_MIX_MODE_S, !!en);
}
-void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
-{
- if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
- dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG)
- return;
-
- dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
- DSAFV2_SERDES_LBK_EN_B, !!en);
-}
-
/**
* hns_dsaf_tbl_stat_en - tbl
* @dsaf_id: dsa fabric id
@@ -2761,6 +2751,7 @@ static const struct of_device_id g_dsaf_match[] = {
{.compatible = "hisilicon,hns-dsaf-v2"},
{}
};
+MODULE_DEVICE_TABLE(of, g_dsaf_match);
static struct platform_driver g_dsaf_driver = {
.probe = hns_dsaf_probe,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 35df187e66f1..c494fc52be74 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -466,6 +466,5 @@ void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 *en);
int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 en);
-void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index ef1107777c08..f0ed80d6ef9c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -543,6 +543,22 @@ int hns_rcb_set_coalesce_usecs(
"error: coalesce_usecs setting supports 0~1023us\n");
return -EINVAL;
}
+
+ if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+ if (timeout == 0)
+ /* set timeout to 0, Disable gap time */
+ dsaf_set_reg_field(rcb_common->io_base,
+ RCB_INT_GAP_TIME_REG + port_idx * 4,
+ PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
+ 0);
+ else
+ /* set timeout non 0, restore gap time to 1 */
+ dsaf_set_reg_field(rcb_common->io_base,
+ RCB_INT_GAP_TIME_REG + port_idx * 4,
+ PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
+ 1);
+ }
+
hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 4b8b803822d1..878950a42e6c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -417,6 +417,7 @@
#define RCB_CFG_OVERTIME_REG 0x9300
#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
+#define RCB_INT_GAP_TIME_REG 0x9400
#define RCB_PORT_CFG_OVERTIME_REG 0x9430
#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
@@ -898,6 +899,9 @@
#define PPE_CNT_CLR_CE_B 0
#define PPE_CNT_CLR_SNAP_EN_B 1
+#define PPE_INT_GAPTIME_B 0
+#define PPE_INT_GAPTIME_M 0x3ff
+
#define PPE_COMMON_CNT_CLR_CE_B 0
#define PPE_COMMON_CNT_CLR_SNAP_EN_B 1
#define RCB_COM_TSO_MODE_B 0
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index a7208673116c..60831a2ac86b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -575,7 +575,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb;
struct hnae_desc *desc;
struct hnae_desc_cb *desc_cb;
- struct ethhdr *eh;
unsigned char *va;
int bnum, length, i;
int pull_len;
@@ -601,7 +600,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
- skb_reset_mac_header(skb);
prefetchw(skb->data);
length = le16_to_cpu(desc->rx.pkt_len);
@@ -683,14 +681,6 @@ out_bnum_err:
return -EFAULT;
}
- /* filter out multicast pkt with the same src mac as this port */
- eh = eth_hdr(skb);
- if (unlikely(is_multicast_ether_addr(eh->h_dest) &&
- ether_addr_equal(ndev->dev_addr, eh->h_source))) {
- dev_kfree_skb_any(skb);
- return -EFAULT;
- }
-
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
@@ -748,25 +738,37 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
ndev->last_rx = jiffies;
}
+static int hns_desc_unused(struct hnae_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+
+ return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
+}
+
static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
int budget, void *v)
{
struct hnae_ring *ring = ring_data->ring;
struct sk_buff *skb;
- int num, bnum, ex_num;
+ int num, bnum;
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int recv_pkts, recv_bds, clean_count, err;
+ int unused_count = hns_desc_unused(ring);
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
rmb(); /* make sure num taken effect before the other data is touched */
recv_pkts = 0, recv_bds = 0, clean_count = 0;
-recv:
+ num -= unused_count;
+
while (recv_pkts < budget && recv_bds < num) {
/* reuse or realloc buffers */
- if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
- hns_nic_alloc_rx_buffers(ring_data, clean_count);
+ if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ hns_nic_alloc_rx_buffers(ring_data,
+ clean_count + unused_count);
clean_count = 0;
+ unused_count = hns_desc_unused(ring);
}
/* poll one pkt */
@@ -787,21 +789,11 @@ recv:
recv_pkts++;
}
- /* make all data has been write before submit */
- if (recv_pkts < budget) {
- ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
-
- if (ex_num > clean_count) {
- num += ex_num - clean_count;
- rmb(); /*complete read rx ring bd number*/
- goto recv;
- }
- }
-
out:
/* make all data has been write before submit */
- if (clean_count > 0)
- hns_nic_alloc_rx_buffers(ring_data, clean_count);
+ if (clean_count + unused_count > 0)
+ hns_nic_alloc_rx_buffers(ring_data,
+ clean_count + unused_count);
return recv_pkts;
}
@@ -811,6 +803,8 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
struct hnae_ring *ring = ring_data->ring;
int num = 0;
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+
/* for hardware bug fixed */
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
@@ -822,6 +816,20 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
}
}
+static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int num = 0;
+
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+ if (num == 0)
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring, 0);
+ else
+ napi_schedule(&ring_data->napi);
+}
+
static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
int *bytes, int *pkts)
{
@@ -923,7 +931,11 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
- int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ int head;
+
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head != ring->next_to_clean) {
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
@@ -933,6 +945,18 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
}
}
+static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+ if (head == ring->next_to_clean)
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring, 0);
+ else
+ napi_schedule(&ring_data->napi);
+}
+
static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
@@ -964,10 +988,7 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
if (clean_complete >= 0 && clean_complete < budget) {
napi_complete(napi);
- ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
- ring_data->ring, 0);
- if (ring_data->fini_process)
- ring_data->fini_process(ring_data);
+ ring_data->fini_process(ring_data);
return 0;
}
@@ -1559,6 +1580,21 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
return stats;
}
+static u16
+hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ /* fix hardware broadcast/multicast packets queue loopback */
+ if (!AE_IS_VER1(priv->enet_ver) &&
+ is_multicast_ether_addr(eth_hdr->h_dest))
+ return 0;
+ else
+ return fallback(ndev, skb);
+}
+
static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_open = hns_nic_net_open,
.ndo_stop = hns_nic_net_stop,
@@ -1574,6 +1610,7 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_poll_controller = hns_nic_poll_controller,
#endif
.ndo_set_rx_mode = hns_nic_set_rx_mode,
+ .ndo_select_queue = hns_nic_select_queue,
};
static void hns_nic_update_link_status(struct net_device *netdev)
@@ -1735,7 +1772,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->queue_index = i;
rd->ring = &h->qs[i]->tx_ring;
rd->poll_one = hns_nic_tx_poll_one;
- rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
+ rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
+ hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
@@ -1747,7 +1785,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->ring = &h->qs[i - h->q_num]->rx_ring;
rd->poll_one = hns_nic_rx_poll_one;
rd->ex_process = hns_nic_rx_up_pro;
- rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
+ rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
+ hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 47e59bbfd061..87d5c94b2810 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -352,6 +352,13 @@ static int __lb_setup(struct net_device *ndev,
break;
}
+ if (!ret) {
+ if (loop == MAC_LOOP_NONE)
+ h->dev->ops->set_promisc_mode(
+ h, ndev->flags & IFF_PROMISC);
+ else
+ h->dev->ops->set_promisc_mode(h, 1);
+ }
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 33f4c483af0f..501eb2090ca6 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -563,6 +563,7 @@ static const struct of_device_id hns_mdio_match[] = {
{.compatible = "hisilicon,hns-mdio"},
{}
};
+MODULE_DEVICE_TABLE(of, hns_mdio_match);
static const struct acpi_device_id hns_mdio_acpi_match[] = {
{ "HISI0141", 0 },
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 657206be7ba9..d54405b46109 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1178,7 +1178,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
if (!scrq)
return NULL;
- scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
+ scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
if (!scrq->msgs) {
dev_warn(dev, "Couldn't allocate crq queue messages page\n");
@@ -1449,14 +1449,16 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
return rc;
req_rx_irq_failed:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ }
i = adapter->req_tx_queues;
req_tx_irq_failed:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ }
release_sub_crqs_no_irqs(adapter);
return rc;
}
@@ -3222,6 +3224,27 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
spin_unlock_irqrestore(&adapter->inflight_lock, flags);
}
+static void ibmvnic_xport_event(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ ibmvnic_xport);
+ struct device *dev = &adapter->vdev->dev;
+ long rc;
+
+ ibmvnic_free_inflight(adapter);
+ release_sub_crqs(adapter);
+ if (adapter->migrated) {
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ if (rc)
+ dev_err(dev, "Error after enable rc=%ld\n", rc);
+ adapter->migrated = false;
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc)
+ dev_err(dev, "Error sending init rc=%ld\n", rc);
+ }
+}
+
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -3257,15 +3280,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
dev_info(dev, "Re-enabling adapter\n");
adapter->migrated = true;
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
- rc = ibmvnic_reenable_crq_queue(adapter);
- if (rc)
- dev_err(dev, "Error after enable rc=%ld\n", rc);
- adapter->migrated = false;
- rc = ibmvnic_send_crq_init(adapter);
- if (rc)
- dev_err(dev, "Error sending init rc=%ld\n", rc);
+ schedule_work(&adapter->ibmvnic_xport);
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
dev_info(dev, "Backing device failover detected\n");
netif_carrier_off(netdev);
@@ -3274,8 +3289,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
gen_crq->cmd);
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
+ schedule_work(&adapter->ibmvnic_xport);
}
return;
case IBMVNIC_CRQ_CMD_RSP:
@@ -3644,6 +3658,7 @@ static void handle_crq_init_rsp(struct work_struct *work)
goto task_failed;
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ netdev->mtu = adapter->req_mtu;
netdev->min_mtu = adapter->min_mtu;
netdev->max_mtu = adapter->max_mtu;
@@ -3717,6 +3732,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
SET_NETDEV_DEV(netdev, &dev->dev);
INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+ INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
spin_lock_init(&adapter->stats_lock);
@@ -3784,6 +3800,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ netdev->mtu = adapter->req_mtu;
rc = register_netdev(netdev);
if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index bfc84c7d0e11..dd775d951b73 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -27,7 +27,7 @@
/**************************************************************************/
#define IBMVNIC_NAME "ibmvnic"
-#define IBMVNIC_DRIVER_VERSION "1.0"
+#define IBMVNIC_DRIVER_VERSION "1.0.1"
#define IBMVNIC_INVALID_MAP -1
#define IBMVNIC_STATS_TIMEOUT 1
/* basic structures plus 100 2k buffers */
@@ -1048,5 +1048,6 @@ struct ibmvnic_adapter {
u8 map_id;
struct work_struct vnic_crq_init;
+ struct work_struct ibmvnic_xport;
bool failover;
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 01cce5bab861..5a6f8518b4e1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -92,6 +92,7 @@
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 7fa535f57820..d78a4dc7b00b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4641,29 +4641,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
}
/**
- * i40e_pf_get_default_tc - Get bitmap for first enabled TC
- * @pf: PF being queried
- *
- * Return a bitmap for first enabled traffic class for this PF.
- **/
-static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
-{
- u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
- u8 i = 0;
-
- if (!enabled_tc)
- return 0x1; /* TC0 */
-
- /* Find the first enabled TC */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & BIT(i))
- break;
- }
-
- return BIT(i);
-}
-
-/**
* i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
* @pf: PF being queried
*
@@ -4673,7 +4650,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
{
/* If DCB is not enabled for this PF then just return default TC */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
/* SFP mode we want PF to be enabled for all TCs */
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
@@ -4683,7 +4660,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
if (pf->hw.func_caps.iscsi)
return i40e_get_iscsi_tc_map(pf);
else
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
}
/**
@@ -5029,7 +5006,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
if (v == pf->lan_vsi)
tc_map = i40e_pf_get_tc_map(pf);
else
- tc_map = i40e_pf_get_default_tc(pf);
+ tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
#ifdef I40E_FCOE
if (pf->vsi[v]->type == I40E_VSI_FCOE)
tc_map = i40e_get_fcoe_tc_map(pf);
@@ -5717,7 +5694,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
u8 type;
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -7707,6 +7684,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
pf->msix_entries = NULL;
+ pci_disable_msix(pf->pdev);
return -ENODEV;
} else if (v_actual == I40E_MIN_MSIX) {
@@ -9047,7 +9025,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return 0;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
- nlflags, 0, 0, filter_mask, NULL);
+ 0, 0, nlflags, filter_mask, NULL);
}
/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cbd2cfa1b154..5e1f57c7ee1b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9162,10 +9162,14 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
goto fwd_add_err;
fwd_adapter->pool = pool;
fwd_adapter->real_adapter = adapter;
- err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
- if (err)
- goto fwd_add_err;
- netif_tx_start_all_queues(vdev);
+
+ if (netif_running(pdev)) {
+ err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+ if (err)
+ goto fwd_add_err;
+ netif_tx_start_all_queues(vdev);
+ }
+
return fwd_adapter;
fwd_add_err:
/* unwind counter and free adapter struct */
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 68675d83bdc5..8302c7e660ce 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2981,6 +2981,22 @@ static void set_params(struct mv643xx_eth_private *mp,
mp->txq_count = pd->tx_queue_count ? : 1;
}
+static int get_phy_mode(struct mv643xx_eth_private *mp)
+{
+ struct device *dev = mp->dev->dev.parent;
+ int iface = -1;
+
+ if (dev->of_node)
+ iface = of_get_phy_mode(dev->of_node);
+
+ /* Historical default if unspecified. We could also read/write
+ * the interface state in the PSC1
+ */
+ if (iface < 0)
+ iface = PHY_INTERFACE_MODE_GMII;
+ return iface;
+}
+
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
@@ -3007,7 +3023,7 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
"orion-mdio-mii", addr);
phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!IS_ERR(phydev)) {
phy_addr_set(mp, addr);
break;
@@ -3106,6 +3122,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
mp = netdev_priv(dev);
platform_set_drvdata(pdev, mp);
@@ -3145,7 +3162,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (pd->phy_node) {
phydev = of_phy_connect(mp->dev, pd->phy_node,
mv643xx_eth_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!phydev)
err = -ENODEV;
else
@@ -3203,12 +3220,15 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->priv_flags |= IFF_UNICAST_FLT;
dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
+<<<<<<< HEAD
/* MTU range: 64 - 9500 */
dev->min_mtu = 64;
dev->max_mtu = 9500;
SET_NETDEV_DEV(dev, &pdev->dev);
+=======
+>>>>>>> 2a26d99b251b8625d27aed14e97fc10707a3a81f
if (mp->shared->win_protect)
wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index b1cef7a0f7ca..e36bebcab3f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2469,6 +2469,7 @@ err_comm_admin:
kfree(priv->mfunc.master.slave_state);
err_comm:
iounmap(priv->mfunc.comm);
+ priv->mfunc.comm = NULL;
err_vhcr:
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
priv->mfunc.vhcr,
@@ -2537,6 +2538,13 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
int slave;
u32 slave_read;
+ /* If the comm channel has not yet been initialized,
+ * skip reporting the internal error event to all
+ * the communication channels.
+ */
+ if (!priv->mfunc.comm)
+ return;
+
/* Report an internal error event to all
* communication channels.
*/
@@ -2571,6 +2579,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
}
iounmap(priv->mfunc.comm);
+ priv->mfunc.comm = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 08fc5fc56d43..a5fc46bbcbe2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -245,8 +245,11 @@ static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 tmp_rounded =
+ roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
+ roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
- max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
+ max_val_cycles : tmp_rounded;
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 132cea655920..e3be7e44ff51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -127,7 +127,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
/* For TX we use the same irq per
ring we assigned for the RX */
struct mlx4_en_cq *rx_cq;
-
+ int xdp_index;
+
+ /* The xdp tx irq must align with the rx ring that forwards to
+ * it, so reindex these from 0. This should only happen when
+ * tx_ring_num is not a multiple of rx_ring_num.
+ */
+ xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx;
+ if (xdp_index >= 0)
+ cq_idx = xdp_index;
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bf35ac4c1c61..58b749dd6125 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1733,6 +1733,13 @@ int mlx4_en_start_port(struct net_device *dev)
udp_tunnel_get_rx_info(dev);
priv->port_up = true;
+
+ /* Process all completions if exist to prevent
+ * the queues freezing if they are full
+ */
+ for (i = 0; i < priv->rx_ring_num; i++)
+ napi_schedule(&priv->rx_cq[i]->napi);
+
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -1910,8 +1917,9 @@ static void mlx4_en_clear_stats(struct net_device *dev)
struct mlx4_en_dev *mdev = priv->mdev;
int i;
- if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
- en_dbg(HW, priv, "Failed dumping statistics\n");
+ if (!mlx4_is_slave(mdev->dev))
+ if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
+ en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
@@ -2194,6 +2202,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
if (!shutdown)
free_netdev(dev);
+ dev->ethtool_ops = NULL;
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 5aa8b751f417..59473a0ebcdf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -166,7 +166,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
return PTR_ERR(mailbox);
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
+ MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -322,7 +322,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
0, MLX4_CMD_DUMP_ETH_STATS,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index b66e03d9711f..c06346a82496 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -118,6 +118,29 @@ mlx4_en_test_loopback_exit:
return !loopback_ok;
}
+static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+ int i = 0;
+
+ err = mlx4_test_async(mdev->dev);
+ /* When not in MSI_X or slave, test only async */
+ if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev))
+ return err;
+
+ /* A loop over all completion vectors of current port,
+ * for each vector check whether it works by mapping command
+ * completions to that vector and performing a NOP command
+ */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector);
+ if (err)
+ break;
+ }
+
+ return err;
+}
static int mlx4_en_test_link(struct mlx4_en_priv *priv)
{
@@ -151,7 +174,6 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
int i, carrier_ok;
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
@@ -177,7 +199,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
netif_carrier_on(dev);
}
- buf[0] = mlx4_test_interrupts(mdev->dev);
+ buf[0] = mlx4_en_test_interrupts(priv);
buf[1] = mlx4_en_test_link(priv);
buf[2] = mlx4_en_test_speed(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cf8f8a72a801..cd3638e6fe25 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1361,53 +1361,49 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
kfree(priv->eq_table.uar_map);
}
-/* A test that verifies that we can accept interrupts on all
- * the irq vectors of the device.
+/* A test that verifies that we can accept interrupts
+ * on the vector allocated for asynchronous events
+ */
+int mlx4_test_async(struct mlx4_dev *dev)
+{
+ return mlx4_NOP(dev);
+}
+EXPORT_SYMBOL(mlx4_test_async);
+
+/* A test that verifies that we can accept interrupts
+ * on the given irq vector of the tested port.
* Interrupts are checked using the NOP command.
*/
-int mlx4_test_interrupts(struct mlx4_dev *dev)
+int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int i;
int err;
- err = mlx4_NOP(dev);
- /* When not in MSI_X, there is only one irq to check */
- if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
- return err;
-
- /* A loop over all completion vectors, for each vector we will check
- * whether it works by mapping command completions to that vector
- * and performing a NOP command
- */
- for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
- /* Make sure request_irq was called */
- if (!priv->eq_table.eq[i].have_irq)
- continue;
-
- /* Temporary use polling for command completions */
- mlx4_cmd_use_polling(dev);
-
- /* Map the new eq to handle all asynchronous events */
- err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[i].eqn);
- if (err) {
- mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
- mlx4_cmd_use_events(dev);
- break;
- }
+ /* Temporary use polling for command completions */
+ mlx4_cmd_use_polling(dev);
- /* Go back to using events */
- mlx4_cmd_use_events(dev);
- err = mlx4_NOP(dev);
+ /* Map the new eq to handle all asynchronous events */
+ err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
+ priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn);
+ if (err) {
+ mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
+ goto out;
}
+ /* Go back to using events */
+ mlx4_cmd_use_events(dev);
+ err = mlx4_NOP(dev);
+
/* Return to default */
+ mlx4_cmd_use_polling(dev);
+out:
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+ mlx4_cmd_use_events(dev);
+
return err;
}
-EXPORT_SYMBOL(mlx4_test_interrupts);
+EXPORT_SYMBOL(mlx4_test_interrupt);
bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index c41ab31a39f8..84bab9f0732e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -49,9 +49,9 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
-static bool enable_qos = true;
+static bool enable_qos;
module_param(enable_qos, bool, 0444);
-MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
+MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
#define MLX4_GET(dest, source, offset) \
do { \
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7183ac4135d2..6f4e67bc3538 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1102,6 +1102,14 @@ static int __set_port_type(struct mlx4_port_info *info,
int i;
int err = 0;
+ if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
+ mlx4_err(mdev,
+ "Requested port type for port %d is not supported on this HCA\n",
+ info->port);
+ err = -EINVAL;
+ goto err_sup;
+ }
+
mlx4_stop_sense(mdev);
mutex_lock(&priv->port_mutex);
info->tmp_type = port_type;
@@ -1147,7 +1155,7 @@ static int __set_port_type(struct mlx4_port_info *info,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
-
+err_sup:
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e4878f31e45d..88ee7d8a5923 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -145,9 +145,10 @@ enum mlx4_resource {
RES_MTT,
RES_MAC,
RES_VLAN,
- RES_EQ,
+ RES_NPORT_ID,
RES_COUNTER,
RES_FS_RULE,
+ RES_EQ,
MLX4_NUM_OF_RESOURCE_TYPE
};
@@ -1329,8 +1330,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd);
int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
int port, void *buf);
-int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
- struct mlx4_cmd_mailbox *outbox);
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index c5b2064297a1..b656dd5772e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1728,24 +1728,13 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
-int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
- u32 in_mod, struct mlx4_cmd_mailbox *outbox)
-{
- return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
- MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_NATIVE);
-}
-
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
- if (slave != dev->caps.function)
- return 0;
- return mlx4_common_dump_eth_stats(dev, slave,
- vhcr->in_modifier, outbox);
+ return 0;
}
int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 84d7857ccc27..c548beaaf910 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_EQ_BUSY;
- if (eq)
- *eq = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
+ if (!err && eq)
+ *eq = r;
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 6cb38304669f..2c6e3c7b7417 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -41,6 +41,13 @@
#include "mlx5_core.h"
+struct mlx5_db_pgdir {
+ struct list_head list;
+ unsigned long *bitmap;
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
/* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0.
*/
@@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir;
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir)
return NULL;
- bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
+ pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+
+ if (!pgdir->bitmap) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ bitmap_fill(pgdir->bitmap, db_per_page);
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
&pgdir->db_dma, node);
if (!pgdir->db_page) {
+ kfree(pgdir->bitmap);
kfree(pgdir);
return NULL;
}
@@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
int offset;
int i;
- i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
- if (i >= MLX5_DB_PER_PAGE)
+ i = find_first_bit(pgdir->bitmap, db_per_page);
+ if (i >= db_per_page)
return -ENOMEM;
__clear_bit(i, pgdir->bitmap);
db->u.pgdir = pgdir;
db->index = i;
- offset = db->index * L1_CACHE_BYTES;
+ offset = db->index * cache_line_size();
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
db->dma = pgdir->db_dma + offset;
@@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
mutex_lock(&dev->priv.pgdir_mutex);
__set_bit(db->index, db->u.pgdir->bitmap);
- if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
+ if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir->bitmap);
kfree(db->u.pgdir);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 460363b66cb1..7a43502a89cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -85,6 +85,9 @@
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
+#define MLX5E_DEFAULT_LRO_TIMEOUT 32
+#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
@@ -221,6 +224,7 @@ struct mlx5e_params {
struct ieee_ets ets;
#endif
bool rx_am_enabled;
+ u32 lro_timeout;
};
struct mlx5e_tstamp {
@@ -888,5 +892,6 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 03183eba7003..d3f13b5db5b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1971,9 +1971,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
(priv->params.lro_wqe_sz -
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
- MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[2]));
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
}
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
@@ -3383,6 +3381,18 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
}
}
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+{
+ int i;
+
+ /* The supported periods are organized in ascending order */
+ for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
+ if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
+ break;
+
+ return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
+}
+
static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile,
@@ -3401,6 +3411,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->params.lro_timeout =
+ mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
@@ -4023,7 +4036,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- unregister_netdev(netdev);
destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
@@ -4040,6 +4052,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
+ unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(mdev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3c97da103d30..7fe6559e4ab3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -457,6 +457,7 @@ void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
struct mlx5e_priv *priv = rep->priv_data;
struct net_device *netdev = priv->netdev;
+ unregister_netdev(netdev);
mlx5e_detach_netdev(esw->dev, netdev);
mlx5e_destroy_netdev(esw->dev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index abbf2c369923..be1f7333ab7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -931,8 +931,8 @@ static void esw_vport_change_handler(struct work_struct *work)
mutex_unlock(&esw->state_lock);
}
-static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *vlan_grp = NULL;
@@ -949,9 +949,11 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
int table_size = 2;
int err = 0;
- if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->egress.acl))
- return;
+ if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->egress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
@@ -959,12 +961,12 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
@@ -1009,6 +1011,7 @@ out:
mlx5_destroy_flow_group(vlan_grp);
if (err && !IS_ERR_OR_NULL(acl))
mlx5_destroy_flow_table(acl);
+ return err;
}
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
@@ -1041,8 +1044,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
-static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
@@ -1063,9 +1066,11 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
int table_size = 4;
int err = 0;
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->ingress.acl))
- return;
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->ingress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
@@ -1073,12 +1078,12 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
@@ -1167,6 +1172,7 @@ out:
}
kvfree(flow_group_in);
+ return err;
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1225,7 +1231,13 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
return 0;
}
- esw_vport_enable_ingress_acl(esw, vport);
+ err = esw_vport_enable_ingress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable ingress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
@@ -1299,7 +1311,13 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
return 0;
}
- esw_vport_enable_egress_acl(esw, vport);
+ err = esw_vport_enable_egress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable egress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5da2cc878582..89696048b045 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -436,6 +436,9 @@ static void del_flow_group(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
dev = get_dev(&ft->node);
+ if (ft->autogroup.active)
+ ft->autogroup.num_groups--;
+
if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
@@ -879,7 +882,7 @@ static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *
tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
tree_add_node(&fg->node, &ft->node);
/* Add node to group list */
- list_add(&fg->node.list, ft->node.children.prev);
+ list_add(&fg->node.list, prev_fg);
return fg;
}
@@ -893,7 +896,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
return ERR_PTR(-EPERM);
lock_ref_node(&ft->node);
- fg = create_flow_group_common(ft, fg_in, &ft->node.children, false);
+ fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
unlock_ref_node(&ft->node);
return fg;
@@ -1012,7 +1015,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
u32 *match_criteria)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct list_head *prev = &ft->node.children;
+ struct list_head *prev = ft->node.children.prev;
unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
void *match_criteria_addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 3a9195b4169d..3b026c151cf2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -218,6 +218,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
goto err_out;
if (aging) {
+ counter->cache.lastuse = jiffies;
counter->aging = true;
spin_lock(&fc_stats->addlist_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1a05fb965c8d..5bcf93422ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -61,10 +61,15 @@ enum {
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2,
+ MLX5_NIC_IFC_INVALID = 3
};
-static u8 get_nic_interface(struct mlx5_core_dev *dev)
+enum {
+ MLX5_DROP_NEW_HEALTH_WORK,
+};
+
+static u8 get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
}
@@ -97,7 +102,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+ if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
return 1;
if (ioread32be(&h->fw_ver) == 0xffffffff)
@@ -127,7 +132,7 @@ unlock:
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
{
- u8 nic_interface = get_nic_interface(dev);
+ u8 nic_interface = get_nic_state(dev);
switch (nic_interface) {
case MLX5_NIC_IFC_FULL:
@@ -149,8 +154,34 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
mlx5_disable_device(dev);
}
+static void health_recover(struct work_struct *work)
+{
+ struct mlx5_core_health *health;
+ struct delayed_work *dwork;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ u8 nic_state;
+
+ dwork = container_of(work, struct delayed_work, work);
+ health = container_of(dwork, struct mlx5_core_health, recover_work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ nic_state = get_nic_state(dev);
+ if (nic_state == MLX5_NIC_IFC_INVALID) {
+ dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
+ return;
+ }
+
+ dev_err(&dev->pdev->dev, "starting health recovery flow\n");
+ mlx5_recover_device(dev);
+}
+
+/* How much time to wait until health resetting the driver (in msecs) */
+#define MLX5_RECOVERY_DELAY_MSECS 60000
static void health_care(struct work_struct *work)
{
+ unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
@@ -160,6 +191,14 @@ static void health_care(struct work_struct *work)
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
mlx5_handle_bad_state(dev);
+
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ schedule_delayed_work(&health->recover_work, recover_delay);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
static const char *hsynd_str(u8 synd)
@@ -272,7 +311,13 @@ static void poll_health(unsigned long data)
if (in_fatal(dev) && !health->sick) {
health->sick = true;
print_health_info(dev);
- schedule_work(&health->work);
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ queue_work(health->wq, &health->work);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
}
@@ -281,6 +326,8 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
init_timer(&health->timer);
+ health->sick = 0;
+ clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -297,11 +344,22 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
del_timer_sync(&health->timer);
}
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ spin_lock(&health->wq_lock);
+ set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ spin_unlock(&health->wq_lock);
+ cancel_delayed_work_sync(&health->recover_work);
+ cancel_work_sync(&health->work);
+}
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- flush_work(&health->work);
+ destroy_workqueue(health->wq);
}
int mlx5_health_init(struct mlx5_core_dev *dev)
@@ -316,9 +374,13 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
strcpy(name, "mlx5_health");
strcat(name, dev_name(&dev->pdev->dev));
+ health->wq = create_singlethread_workqueue(name);
kfree(name);
-
+ if (!health->wq)
+ return -ENOMEM;
+ spin_lock_init(&health->wq_lock);
INIT_WORK(&health->work, health_care);
+ INIT_DELAYED_WORK(&health->recover_work, health_recover);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d9c3c70b29e4..d5433c49b2b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -844,12 +844,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
struct pci_dev *pdev = dev->pdev;
int err;
- err = mlx5_query_hca_caps(dev);
- if (err) {
- dev_err(&pdev->dev, "query hca failed\n");
- goto out;
- }
-
err = mlx5_query_board_id(dev);
if (err) {
dev_err(&pdev->dev, "query board id failed\n");
@@ -1023,6 +1017,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_start_health_poll(dev);
+ err = mlx5_query_hca_caps(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query hca failed\n");
+ goto err_stop_poll;
+ }
+
if (boot && mlx5_init_once(dev, priv)) {
dev_err(&pdev->dev, "sw objs init failed\n");
goto err_stop_poll;
@@ -1313,10 +1313,16 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
struct mlx5_priv *priv = &dev->priv;
dev_info(&pdev->dev, "%s was called\n", __func__);
+
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- pci_save_state(pdev);
- mlx5_pci_disable_device(dev);
+ /* In case of kernel call save the pci state and drain health wq */
+ if (state) {
+ pci_save_state(pdev);
+ mlx5_drain_health_wq(dev);
+ mlx5_pci_disable_device(dev);
+ }
+
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
@@ -1373,11 +1379,6 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-void mlx5_disable_device(struct mlx5_core_dev *dev)
-{
- mlx5_pci_err_detected(dev->pdev, 0);
-}
-
static void mlx5_pci_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
@@ -1427,6 +1428,18 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+void mlx5_recover_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_disable_device(dev);
+ if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
+ mlx5_pci_resume(dev->pdev);
+}
+
static struct pci_driver mlx5_core_driver = {
.name = DRIVER_NAME,
.id_table = mlx5_core_pci_table,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 3d0cfb9f18f9..187662c8ea96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -83,6 +83,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
+void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index cc4fd61914d3..a57d5a81eb05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -209,6 +209,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
struct page *page;
+ u64 zero_addr = 1;
u64 addr;
int err;
int nid = dev_to_node(&dev->pdev->dev);
@@ -218,26 +219,35 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
mlx5_core_warn(dev, "failed to allocate page\n");
return -ENOMEM;
}
+map:
addr = dma_map_page(&dev->pdev->dev, page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&dev->pdev->dev, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n");
err = -ENOMEM;
- goto out_alloc;
+ goto err_mapping;
}
+
+ /* Firmware doesn't support page with physical address 0 */
+ if (addr == 0) {
+ zero_addr = addr;
+ goto map;
+ }
+
err = insert_page(dev, addr, page, func_id);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
- goto out_mapping;
+ dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
}
- return 0;
-
-out_mapping:
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+err_mapping:
+ if (err)
+ __free_page(page);
-out_alloc:
- __free_page(page);
+ if (zero_addr == 0)
+ dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 63d89f787ad7..d5cf1ea1c89a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1757,11 +1757,17 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.cmd_exec = mlxsw_pci_cmd_exec,
};
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+ const struct pci_device_id *id)
{
unsigned long end;
mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+ if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
+ msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ return 0;
+ }
+
wmb(); /* reset needs to be written before we read control register */
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
do {
@@ -1829,7 +1835,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->pdev = pdev;
pci_set_drvdata(pdev, mlxsw_pci);
- err = mlxsw_pci_sw_reset(mlxsw_pci);
+ err = mlxsw_pci_sw_reset(mlxsw_pci, id);
if (err) {
dev_err(&pdev->dev, "Software reset failed\n");
goto err_sw_reset;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 107934fe06ce..348c77339d88 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -320,6 +320,8 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
lpm_tree);
if (err)
goto err_left_struct_set;
+ memcpy(&lpm_tree->prefix_usage, prefix_usage,
+ sizeof(lpm_tree->prefix_usage));
return lpm_tree;
err_left_struct_set:
@@ -343,7 +345,8 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
lpm_tree = &mlxsw_sp->router.lpm_trees[i];
- if (lpm_tree->proto == proto &&
+ if (lpm_tree->ref_count != 0 &&
+ lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage))
goto inc_ref_count;
@@ -1815,19 +1818,17 @@ err_fib_entry_insert:
return err;
}
-static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct fib_entry_notifier_info *fen_info)
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
if (mlxsw_sp->router.aborted)
- return 0;
+ return;
fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
- if (!fib_entry) {
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
- return -ENOENT;
- }
+ if (!fib_entry)
+ return;
if (fib_entry->ref_count == 1) {
mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
@@ -1835,7 +1836,6 @@ static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
- return 0;
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -1857,7 +1857,8 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index dc3c5ed73ecd..8c8f5d8a2113 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1094,6 +1094,7 @@ err_port_stp_state_set:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_set:
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
err_port_system_port_mapping_set:
port_not_usable:
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 0df1391f9663..32f2a45f4ab2 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -107,15 +107,7 @@ config QEDE
---help---
This enables the support for ...
-config INFINIBAND_QEDR
- tristate "QLogic qede RoCE sources [debug]"
- depends on QEDE && 64BIT
- select QED_LL2
- default n
- ---help---
- This provides a temporary node that allows the compilation
- and logical testing of the InfiniBand over Ethernet support
- for QLogic QED. This would be replaced by the 'real' option
- once the QEDR driver is added [+relocated].
+config QED_RDMA
+ bool
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index cda0af7fbc20..967acf322c09 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
-qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
+qed-$(CONFIG_QED_RDMA) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 82370a1a59ad..0c42c240b5cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -47,13 +47,8 @@
#define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4
-/* ILT constants */
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE 4
-#else
-#define ILT_DEFAULT_HW_P_SIZE 3
-#endif
+#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -349,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
return NULL;
}
-void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs;
}
-u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
@@ -1804,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
return 0;
}
-void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
- struct qed_rdma_pf_params *p_params)
+static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_pf_params *p_params)
{
u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
enum protocol_type proto;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 130da1c0490b..a4789a93b692 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
if (!dcbx_info)
return -ENOMEM;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
if (rc) {
kfree(dcbx_info);
@@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
if (!dcbx_info)
return NULL;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 88e7d5bef909..68f19ca57f96 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -405,7 +405,7 @@ struct phy_defs {
/***************************** Constant Arrays *******************************/
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
@@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
}
/* Dump MCP Trace */
-enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
@@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
}
/* Dump GRC FIFO */
-enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
@@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Dump IGU FIFO */
-enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
@@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Protection Override dump */
-enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 *num_dumped_dwords)
{
u32 offset = 0, size_param_offset, override_window_dwords;
@@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
}
/* Wrapper for unifying the idle_chk and mcp_trace api */
-enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
+static enum dbg_status
+qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
{
u32 num_errors, num_warnnings;
@@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
#define QED_RESULTS_BUF_MIN_SIZE 16
/* Generic function for decoding debug feature info */
-enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
- enum qed_dbg_features feature_idx)
+static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
+ enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
@@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
}
/* Generic function for performing the dump of a debug feature. */
-enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_dbg_features feature_idx)
+static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 754f6a908858..edae5fc5fccd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_ROCE,
- 0) * 2;
+ NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
num_cons =
qed_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI, 0);
+ PROTOCOLID_ISCSI,
+ NULL);
n_eqes += 2 * num_cons;
}
@@ -1422,19 +1423,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
u32 *feat_num = p_hwfn->hw_info.feat_num;
int num_features = 1;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
- * status blocks equally between L2 / RoCE but with consideration as
- * to how many l2 queues / cnqs we have
- */
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ if (IS_ENABLED(CONFIG_QED_RDMA) &&
+ p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
+ * the status blocks equally between L2 / RoCE but with
+ * consideration as to how many l2 queues / cnqs we have.
+ */
num_features++;
feat_num[QED_RDMA_CNQ] =
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
}
-#endif
+
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 02a8be2faed7..63e1a1b0ef8e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -38,6 +38,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_roce.h"
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
@@ -140,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer);
}
-void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- struct qed_ll2_rx_packet *p_pkt,
- struct core_rx_fast_path_cqe *p_cqe,
- bool b_last_packet)
+static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ struct qed_ll2_rx_packet *p_pkt,
+ struct core_rx_fast_path_cqe *p_cqe,
+ bool b_last_packet)
{
u16 packet_length = le16_to_cpu(p_cqe->packet_length);
struct qed_ll2_buffer *buffer = p_pkt->cookie;
@@ -515,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
return rc;
}
-void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
{
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ll2_rx_packet *p_pkt = NULL;
@@ -537,8 +538,7 @@ void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
if (!p_pkt)
break;
- list_del(&p_pkt->list_entry);
- list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+ list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
rx_buf_addr = p_pkt->rx_buf_addr;
cookie = p_pkt->cookie;
@@ -992,9 +992,8 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
p_posting_packet = list_first_entry(&p_rx->posting_descq,
struct qed_ll2_rx_packet,
list_entry);
- list_del(&p_posting_packet->list_entry);
- list_add_tail(&p_posting_packet->list_entry,
- &p_rx->active_descq);
+ list_move_tail(&p_posting_packet->list_entry,
+ &p_rx->active_descq);
b_notify_fw = true;
}
@@ -1123,9 +1122,6 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len);
- SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
- type);
-
DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
@@ -1188,8 +1184,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
if (!p_pkt)
break;
- list_del(&p_pkt->list_entry);
- list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
+ list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
}
SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 80a5dc2d652d..4e3d62a16cab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
*/
void qed_ll2_free(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_connections);
-void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr,
- u16 data_length,
- u8 data_length_error,
- u16 parse_flags,
- u16 vlan,
- u32 src_mac_addr_hi,
- u16 src_mac_addr_lo, bool b_last_packet);
-void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
-void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 4ee3151e80c2..c418360ba02a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -33,10 +33,8 @@
#include "qed_hw.h"
#include "qed_selftest.h"
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
-#endif
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
struct qed_sb_cnt_info sb_cnt_info;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- int num_l2_queues;
-#endif
+ int num_l2_queues = 0;
int rc;
int i;
@@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- num_l2_queues = 0;
+ if (!IS_ENABLED(CONFIG_QED_RDMA))
+ return 0;
+
for_each_hwfn(cdev, i)
num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
@@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
cdev->int_params.rdma_msix_cnt,
cdev->int_params.rdma_msix_base);
-#endif
return 0;
}
@@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev,
{
int i;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- params->rdma_pf_params.num_qps = QED_ROCE_QPS;
- params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
- /* divide by 3 the MRs to avoid MF ILT overflow */
- params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
- params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
-#endif
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
p_hwfn->pf_params = *params;
}
+
+ if (!IS_ENABLED(CONFIG_QED_RDMA))
+ return;
+
+ params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+ params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ /* divide by 3 the MRs to avoid MF ILT overflow */
+ params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
+ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
static int qed_slowpath_start(struct qed_dev *cdev,
@@ -880,6 +878,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
}
}
+ cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
rc = qed_nic_setup(cdev);
if (rc)
goto err;
@@ -1432,7 +1431,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
-struct qed_selftest_ops qed_selftest_ops_pass = {
+static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
.selftest_register = &qed_selftest_register,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index b11beb559981..6a353ffb87a4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
}
}
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
}
-u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
-{
- return QED_CAU_DEF_RX_TIMER_RES;
-}
-
static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params)
@@ -162,7 +157,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
p_hwfn->p_rdma_info = p_rdma_info;
p_rdma_info->proto = PROTOCOLID_ROCE;
- num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
+ NULL);
p_rdma_info->num_qps = num_cons / 2;
@@ -275,7 +271,7 @@ free_rdma_info:
return rc;
}
-void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -527,6 +523,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
@@ -573,7 +589,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
}
-int qed_rdma_stop(void *rdma_cxt)
+static int qed_rdma_stop(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_close_func_ramrod_data *p_ramrod;
@@ -629,8 +645,8 @@ out:
return rc;
}
-int qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params)
+static int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 dpi_start_offset;
@@ -664,7 +680,7 @@ int qed_rdma_add_user(void *rdma_cxt,
return rc;
}
-struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
@@ -680,7 +696,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
return p_port;
}
-struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -690,7 +706,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
return p_hwfn->p_rdma_info->dev;
}
-void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -701,27 +717,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
- if (rc)
- goto out;
-
- rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
-out:
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
- return rc;
-}
-
-void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
{
struct qed_hwfn *p_hwfn;
u16 qz_num;
@@ -816,7 +812,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
return 0;
}
-int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 returned_id;
@@ -836,7 +832,7 @@ int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
return rc;
}
-void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -873,8 +869,9 @@ qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
return toggle_bit;
}
-int qed_rdma_create_cq(void *rdma_cxt,
- struct qed_rdma_create_cq_in_params *params, u16 *icid)
+static int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
@@ -957,98 +954,10 @@ err:
return rc;
}
-int qed_rdma_resize_cq(void *rdma_cxt,
- struct qed_rdma_resize_cq_in_params *in_params,
- struct qed_rdma_resize_cq_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_resize_cq_output_params *p_ramrod_res;
- struct rdma_resize_cq_ramrod_data *p_ramrod;
- enum qed_rdma_toggle_bit toggle_bit;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- dma_addr_t ramrod_res_phys;
- u8 fw_return_code;
- int rc = -ENOMEM;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
-
- p_ramrod_res =
- (struct rdma_resize_cq_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- &ramrod_res_phys, GFP_KERNEL);
- if (!p_ramrod_res) {
- DP_NOTICE(p_hwfn,
- "qed resize cq failed: cannot allocate memory (ramrod)\n");
- return rc;
- }
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.cid = in_params->icid;
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_RESIZE_CQ,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- goto err;
-
- p_ramrod = &p_ent->ramrod.rdma_resize_cq;
-
- p_ramrod->flags = 0;
-
- /* toggle the bit for every resize or create cq for a given icid */
- toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
- in_params->icid);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
- in_params->pbl_two_level);
-
- p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
- p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
- p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
- DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
- DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc)
- goto err;
-
- if (fw_return_code != RDMA_RETURN_OK) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
- rc = -EINVAL;
- goto err;
- }
-
- out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
- out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
-
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
-
- return rc;
-
-err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
- DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
-
- return rc;
-}
-
-int qed_rdma_destroy_cq(void *rdma_cxt,
- struct qed_rdma_destroy_cq_in_params *in_params,
- struct qed_rdma_destroy_cq_out_params *out_params)
+static int
+qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_destroy_cq_output_params *p_ramrod_res;
@@ -1169,7 +1078,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
-int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 responder_icid;
@@ -1793,9 +1702,9 @@ err:
return rc;
}
-int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
struct roce_query_qp_req_output_params *p_req_ramrod_res;
@@ -1936,7 +1845,7 @@ err_resp:
return rc;
}
-int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
@@ -1985,9 +1894,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
return 0;
}
-int qed_rdma_query_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+static int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc;
@@ -2022,7 +1931,7 @@ int qed_rdma_query_qp(void *rdma_cxt,
return rc;
}
-int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc = 0;
@@ -2038,7 +1947,7 @@ int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
return rc;
}
-struct qed_rdma_qp *
+static struct qed_rdma_qp *
qed_rdma_create_qp(void *rdma_cxt,
struct qed_rdma_create_qp_in_params *in_params,
struct qed_rdma_create_qp_out_params *out_params)
@@ -2215,9 +2124,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_rdma_modify_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params)
+static int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
enum qed_roce_qp_state prev_state;
@@ -2312,8 +2221,9 @@ int qed_rdma_modify_qp(void *rdma_cxt,
return rc;
}
-int qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params)
+static int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_register_tid_ramrod_data *p_ramrod;
@@ -2450,7 +2360,7 @@ int qed_rdma_register_tid(void *rdma_cxt,
return rc;
}
-int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_deregister_tid_ramrod_data *p_ramrod;
@@ -2561,7 +2471,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_conf(p_hwfn, p_ptt);
}
-int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
+static int qed_rdma_start(void *rdma_cxt,
+ struct qed_rdma_start_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_ptt *p_ptt;
@@ -2601,7 +2512,7 @@ static int qed_rdma_init(struct qed_dev *cdev,
return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
}
-void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -2808,11 +2719,6 @@ static int qed_roce_ll2_stop(struct qed_dev *cdev)
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
int rc;
- if (!cdev) {
- DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
- return -EINVAL;
- }
-
if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
return -EINVAL;
@@ -2849,7 +2755,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
int rc;
int i;
- if (!cdev || !pkt || !params) {
+ if (!pkt || !params) {
DP_ERR(cdev,
"roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
cdev, pkt, params);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 2f091e8a0f40..279f342af8db 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -95,26 +95,6 @@ struct qed_rdma_info {
enum protocol_type proto;
};
-struct qed_rdma_resize_cq_in_params {
- u16 icid;
- u32 cq_size;
- bool pbl_two_level;
- u64 pbl_ptr;
- u16 pbl_num_pages;
- u8 pbl_page_size_log;
-};
-
-struct qed_rdma_resize_cq_out_params {
- u32 prod;
- u32 cons;
-};
-
-struct qed_rdma_resize_cnq_in_params {
- u32 cnq_id;
- u32 pbl_page_size_log;
- u64 pbl_ptr;
-};
-
struct qed_rdma_qp {
struct regpair qp_handle;
struct regpair qp_handle_async;
@@ -181,36 +161,55 @@ struct qed_rdma_qp {
dma_addr_t shared_queue_phys_addr;
};
-int
-qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params);
-int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
-int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
-int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
-void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
-struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
-struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
-int
-qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params);
-void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
-int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
-int qed_rdma_stop(void *rdma_cxt);
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
-u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
-void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
-void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe);
-int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
-int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params);
-int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params);
-
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet);
#else
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet) {}
+static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet) {}
+static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo,
+ bool b_last_packet) {}
#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 27c450fd2193..9c897bc68d05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -80,7 +80,6 @@ union ramrod_data {
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
struct rdma_create_cq_ramrod_data rdma_create_cq;
- struct rdma_resize_cq_ramrod_data rdma_resize_cq;
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 6c05402ea4dc..019960b7855a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,9 +28,7 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#include "qed_roce.h"
-#endif
/***************************************************************************
* Structures & Definitions
@@ -272,11 +270,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
case PROTOCOLID_ROCE:
qed_async_roce_event(p_hwfn, p_eqe);
return 0;
-#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 28dc58919c85..048a230c3ce0 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
-qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
+qede-$(CONFIG_QED_RDMA) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 9135b9d37dfa..cf8d3547aecf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -349,12 +349,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
u8 count);
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
-#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index b7dbb4493a64..0100f5c0a4ec 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -748,6 +748,8 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
+ channels->max_rx = QEDE_MAX_RSS_CNT(edev);
+ channels->max_tx = QEDE_MAX_RSS_CNT(edev);
channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
edev->fp_num_rx;
channels->tx_count = edev->fp_num_tx;
@@ -812,6 +814,13 @@ static int qede_set_channels(struct net_device *dev,
edev->req_queues = count;
edev->req_num_tx = channels->tx_count;
edev->req_num_rx = channels->rx_count;
+ /* Reset the indirection table if rx queue count is updated */
+ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
+ edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
+ memset(&edev->rss_params.rss_ind_table, 0,
+ sizeof(edev->rss_params.rss_ind_table));
+ }
+
if (netif_running(dev))
qede_reload(edev, NULL, NULL);
@@ -1045,6 +1054,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
struct qede_dev *edev = netdev_priv(dev);
int i;
+ if (edev->dev_info.common.num_hwfns > 1) {
+ DP_INFO(edev,
+ "RSS configuration is not supported for 100G devices\n");
+ return -EOPNOTSUPP;
+ }
+
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -1176,8 +1191,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
}
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++;
txq->sw_tx_ring[idx].skb = NULL;
@@ -1191,8 +1206,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
+ int i, rc = 0;
u8 *data_ptr;
- int i;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
@@ -1211,46 +1226,60 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
* queue and that the loopback traffic is not IP.
*/
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
- if (qede_has_rx_work(rxq))
+ if (!qede_has_rx_work(rxq)) {
+ usleep_range(100, 200);
+ continue;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative
+ * reads of CQE/BD before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB,
+ * and then the CPU reads the hw_comp_cons, it will use an old
+ * CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset +
+ sw_rx_data->page_offset);
+ if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
+ ether_addr_equal(data_ptr + ETH_ALEN,
+ edev->ndev->dev_addr)) {
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ rc = -1;
+ break;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
break;
- usleep_range(100, 200);
+ }
+
+ DP_INFO(edev, "Not the transmitted packet\n");
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
- if (!qede_has_rx_work(rxq)) {
+ if (i == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ qede_update_rx_prod(edev, rxq);
- /* Memory barrier to prevent the CPU from doing speculative reads of CQE
- * / BD before reading hw_comp_cons. If the CQE is read before it is
- * written by FW, then FW writes CQE and SB, and then the CPU reads the
- * hw_comp_cons, it will use an old CQE.
- */
- rmb();
-
- /* Get the CQE from the completion ring */
- cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
-
- /* Get the data from the SW ring */
- sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->len_on_first_bd);
- data_ptr = (u8 *)(page_address(sw_rx_data->data) +
- fp_cqe->placement_offset + sw_rx_data->page_offset);
- for (i = ETH_HLEN; i < len; i++)
- if (data_ptr[i] != (unsigned char)(i & 0xff)) {
- DP_NOTICE(edev, "Loopback test failed\n");
- qede_recycle_rx_bd_ring(rxq, edev, 1);
- return -1;
- }
-
- qede_recycle_rx_bd_ring(rxq, edev, 1);
-
- return 0;
+ return rc;
}
static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 4f298656bf47..8488ad36a2b8 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -317,8 +317,8 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
split_bd_len = BD_UNMAP_LEN(split);
bds_consumed++;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
@@ -363,8 +363,8 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
nbd--;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < nbd; i++) {
@@ -964,8 +964,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
return 0;
}
-static inline void qede_update_rx_prod(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
@@ -3006,7 +3005,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
txq->num_tx_buffers = edev->q_num_tx_buffers;
/* Allocate the parallel driver ring for Tx buffers */
- size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+ size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring) {
DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
@@ -3017,7 +3016,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- NUM_TX_BDS_MAX,
+ TX_RING_SIZE,
sizeof(*p_virt), &txq->tx_pbl);
if (rc)
goto err;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index e97968ed4b8f..6fb3bee904d3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1021,14 +1021,18 @@ void emac_mac_down(struct emac_adapter *adpt)
napi_disable(&adpt->rx_q.napi);
phy_stop(adpt->phydev);
- phy_disconnect(adpt->phydev);
- /* disable mac irq */
+ /* Interrupts must be disabled before the PHY is disconnected, to
+ * avoid a race condition where adjust_link is null when we get
+ * an interrupt.
+ */
writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
writel(0, adpt->base + EMAC_INT_MASK);
synchronize_irq(adpt->irq.irq);
free_irq(adpt->irq.irq, &adpt->irq);
+ phy_disconnect(adpt->phydev);
+
emac_mac_reset(adpt);
emac_tx_q_descs_free(adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index e4e1925d18a4..8be526af659a 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -568,6 +568,7 @@ static const struct of_device_id emac_dt_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, emac_dt_match);
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id emac_acpi_match[] = {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b698ea544bfc..2830190aaace 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8269,7 +8269,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((sizeof(dma_addr_t) > 4) &&
(use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
/* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
if (!pci_is_pcie(pdev))
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 55b2ab9dc320..67df4cf93362 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1471,7 +1471,7 @@ static int rocker_world_check_init(struct rocker_port *rocker_port)
if (rocker->wops) {
if (rocker->wops->mode != mode) {
dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
- return err;
+ return -EINVAL;
}
return 0;
}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 431a60804272..4ca461322d60 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1493,8 +1493,6 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
- if (found)
- *index = found->index;
updating = found && adding;
removing = found && !adding;
@@ -1508,9 +1506,11 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
resolved = false;
} else if (removing) {
ofdpa_neigh_del(trans, found);
+ *index = found->index;
} else if (updating) {
ofdpa_neigh_update(found, trans, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
+ *index = found->index;
} else {
err = -ENOENT;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 4ec7397e7fb3..a1b17cd7886b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -347,10 +347,9 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
for (i = 0; i < size; i++) {
- if (p->des0)
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
- i, (unsigned int)virt_to_phys(p),
- p->des0, p->des1, p->des2, p->des3);
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(p),
+ p->des0, p->des1, p->des2, p->des3);
p++;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f94e0282451b..758b4e2c783c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -144,7 +144,7 @@ int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fa4a82f4656f..10909c9c0033 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -678,7 +678,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- return stmmac_ptp_register(priv);
+ stmmac_ptp_register(priv);
+
+ return 0;
}
static void stmmac_release_ptp(struct stmmac_priv *priv)
@@ -1710,7 +1712,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (init_ptp) {
ret = stmmac_init_ptp(priv);
if (ret)
- netdev_warn(priv->dev, "PTP support cannot init.\n");
+ netdev_warn(priv->dev, "fail to init PTP.\n");
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 289d52725a6c..1477471f8d44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -177,7 +177,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
* Description: this function will register the ptp clock driver
* to kernel. It also does some house keeping work.
*/
-int stmmac_ptp_register(struct stmmac_priv *priv)
+void stmmac_ptp_register(struct stmmac_priv *priv)
{
spin_lock_init(&priv->ptp_lock);
priv->ptp_clock_ops = stmmac_ptp_clock_ops;
@@ -185,15 +185,10 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
priv->device);
if (IS_ERR(priv->ptp_clock)) {
+ netdev_err(priv->dev, "ptp_clock_register failed\n");
priv->ptp_clock = NULL;
- return PTR_ERR(priv->ptp_clock);
- }
-
- spin_lock_init(&priv->ptp_lock);
-
- netdev_dbg(priv->dev, "Added PTP HW clock successfully\n");
-
- return 0;
+ } else if (priv->ptp_clock)
+ netdev_info(priv->dev, "registered PTP clock\n");
}
/**
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index eaa51ce8bd6d..70533017aaa7 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -982,11 +982,13 @@ static int dwceqos_mii_probe(struct net_device *ndev)
if (netif_msg_probe(lp))
phy_attached_info(phydev);
- phydev->supported &= PHY_GBIT_FEATURES;
+ phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
lp->link = 0;
lp->speed = 0;
lp->duplex = DUPLEX_UNKNOWN;
+ lp->flowcontrol.autoneg = AUTONEG_ENABLE;
return 0;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 752bcaa852e4..85a423a66478 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -58,9 +58,9 @@ struct geneve_dev {
struct hlist_node hlist; /* vni hash table */
struct net *net; /* netns for packet i/o */
struct net_device *dev; /* netdev for geneve tunnel */
- struct geneve_sock *sock4; /* IPv4 socket used for geneve tunnel */
+ struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */
#if IS_ENABLED(CONFIG_IPV6)
- struct geneve_sock *sock6; /* IPv6 socket used for geneve tunnel */
+ struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */
#endif
u8 vni[3]; /* virtual network ID for tunnel */
u8 ttl; /* TTL override */
@@ -453,7 +453,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
@@ -543,9 +543,19 @@ static void __geneve_sock_release(struct geneve_sock *gs)
static void geneve_sock_release(struct geneve_dev *geneve)
{
- __geneve_sock_release(geneve->sock4);
+ struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4);
#if IS_ENABLED(CONFIG_IPV6)
- __geneve_sock_release(geneve->sock6);
+ struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6);
+
+ rcu_assign_pointer(geneve->sock6, NULL);
+#endif
+
+ rcu_assign_pointer(geneve->sock4, NULL);
+ synchronize_net();
+
+ __geneve_sock_release(gs4);
+#if IS_ENABLED(CONFIG_IPV6)
+ __geneve_sock_release(gs6);
#endif
}
@@ -586,10 +596,10 @@ out:
gs->flags = geneve->flags;
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
- geneve->sock6 = gs;
+ rcu_assign_pointer(geneve->sock6, gs);
else
#endif
- geneve->sock4 = gs;
+ rcu_assign_pointer(geneve->sock4, gs);
hash = geneve_net_vni_hash(geneve->vni);
hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
@@ -603,9 +613,7 @@ static int geneve_open(struct net_device *dev)
bool metadata = geneve->collect_md;
int ret = 0;
- geneve->sock4 = NULL;
#if IS_ENABLED(CONFIG_IPV6)
- geneve->sock6 = NULL;
if (ipv6 || metadata)
ret = geneve_sock_add(geneve, true);
#endif
@@ -720,6 +728,9 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct rtable *rt = NULL;
__u8 tos;
+ if (!rcu_dereference(geneve->sock4))
+ return ERR_PTR(-EIO);
+
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_mark = skb->mark;
fl4->flowi4_proto = IPPROTO_UDP;
@@ -772,11 +783,15 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
struct dst_cache *dst_cache;
+ struct geneve_sock *gs6;
__u8 prio;
+ gs6 = rcu_dereference(geneve->sock6);
+ if (!gs6)
+ return ERR_PTR(-EIO);
+
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_mark = skb->mark;
fl6->flowi6_proto = IPPROTO_UDP;
@@ -842,7 +857,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs4 = geneve->sock4;
+ struct geneve_sock *gs4;
struct rtable *rt = NULL;
const struct iphdr *iip; /* interior IP header */
int err = -EINVAL;
@@ -853,6 +868,10 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
u32 flags = geneve->flags;
+ gs4 = rcu_dereference(geneve->sock4);
+ if (!gs4)
+ goto tx_error;
+
if (geneve->collect_md) {
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
@@ -932,9 +951,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
const struct iphdr *iip; /* interior IP header */
+ struct geneve_sock *gs6;
int err = -EINVAL;
struct flowi6 fl6;
__u8 prio, ttl;
@@ -943,6 +962,10 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
u32 flags = geneve->flags;
+ gs6 = rcu_dereference(geneve->sock6);
+ if (!gs6)
+ goto tx_error;
+
if (geneve->collect_md) {
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3b28cf127f9d..9522763c8faf 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -447,7 +447,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* Setup the sendside checksum offload only if this is not a
* GSO packet.
*/
- if (skb_is_gso(skb)) {
+ if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
@@ -607,15 +607,18 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
- if (csum_info) {
- /* We only look at the IP checksum here.
- * Should we be dropping the packet if checksum
- * failed? How do we deal with other checksums - TCP/UDP?
- */
- if (csum_info->receive.ip_checksum_succeeded)
+
+ /* skb is already created with CHECKSUM_NONE */
+ skb_checksum_none_assert(skb);
+
+ /*
+ * In Linux, the IP checksum is always checked.
+ * Do L4 checksum offload if enabled and present.
+ */
+ if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ if (csum_info->receive.tcp_checksum_succeeded ||
+ csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
}
if (vlan_tci & VLAN_TAG_PRESENT)
@@ -696,12 +699,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *dev = net_device_ctx->device_ctx;
-
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info));
}
static void netvsc_get_channels(struct net_device *net,
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 0a715ab9d9cc..cc00eb0db5d2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -397,6 +397,14 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
+static bool send_sci(const struct macsec_secy *secy)
+{
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ return tx_sc->send_sci ||
+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
+}
+
static sci_t make_sci(u8 *addr, __be16 port)
{
sci_t sci;
@@ -437,15 +445,15 @@ static unsigned int macsec_extra_len(bool sci_present)
/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
static void macsec_fill_sectag(struct macsec_eth_header *h,
- const struct macsec_secy *secy, u32 pn)
+ const struct macsec_secy *secy, u32 pn,
+ bool sci_present)
{
const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
- memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci));
+ memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
h->eth.h_proto = htons(ETH_P_MACSEC);
- if (tx_sc->send_sci ||
- (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
+ if (sci_present) {
h->tci_an |= MACSEC_TCI_SC;
memcpy(&h->secure_channel_id, &secy->sci,
sizeof(h->secure_channel_id));
@@ -650,6 +658,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct macsec_tx_sc *tx_sc;
struct macsec_tx_sa *tx_sa;
struct macsec_dev *macsec = macsec_priv(dev);
+ bool sci_present;
u32 pn;
secy = &macsec->secy;
@@ -687,7 +696,8 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
unprotected_len = skb->len;
eth = eth_hdr(skb);
- hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci));
+ sci_present = send_sci(secy);
+ hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy);
@@ -696,7 +706,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
kfree_skb(skb);
return ERR_PTR(-ENOLINK);
}
- macsec_fill_sectag(hh, secy, pn);
+ macsec_fill_sectag(hh, secy, pn, sci_present);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len);
@@ -726,10 +736,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
skb_to_sgvec(skb, sg, 0, skb->len);
if (tx_sc->encrypt) {
- int len = skb->len - macsec_hdr_len(tx_sc->send_sci) -
+ int len = skb->len - macsec_hdr_len(sci_present) -
secy->icv_len;
aead_request_set_crypt(req, sg, sg, len, iv);
- aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci));
+ aead_request_set_ad(req, macsec_hdr_len(sci_present));
} else {
aead_request_set_crypt(req, sg, sg, 0, iv);
aead_request_set_ad(req, skb->len - secy->icv_len);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index dd47b69ddcff..c1e52b9dc58d 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -42,19 +42,24 @@
#define AT803X_MMD_ACCESS_CONTROL 0x0D
#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
#define AT803X_FUNC_DATA 0x4003
+#define AT803X_REG_CHIP_CONFIG 0x1f
+#define AT803X_BT_BX_REG_SEL 0x8000
#define AT803X_DEBUG_ADDR 0x1D
#define AT803X_DEBUG_DATA 0x1E
+#define AT803X_MODE_CFG_MASK 0x0F
+#define AT803X_MODE_CFG_SGMII 0x01
+
+#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
+#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
+
#define AT803X_DEBUG_REG_0 0x00
#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-#define AT803X_REG_CHIP_CONFIG 0x1f
-#define AT803X_BT_BX_REG_SEL 0x8000
-
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
@@ -210,7 +215,6 @@ static int at803x_suspend(struct phy_device *phydev)
{
int value;
int wol_enabled;
- int ccr;
mutex_lock(&phydev->lock);
@@ -226,16 +230,6 @@ static int at803x_suspend(struct phy_device *phydev)
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-down SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
@@ -244,7 +238,6 @@ done:
static int at803x_resume(struct phy_device *phydev)
{
int value;
- int ccr;
mutex_lock(&phydev->lock);
@@ -252,17 +245,6 @@ static int at803x_resume(struct phy_device *phydev)
value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-up SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE);
- phy_write(phydev, MII_BMCR, value);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
@@ -382,6 +364,36 @@ static void at803x_link_change_notify(struct phy_device *phydev)
}
}
+static int at803x_aneg_done(struct phy_device *phydev)
+{
+ int ccr;
+
+ int aneg_done = genphy_aneg_done(phydev);
+ if (aneg_done != BMSR_ANEGCOMPLETE)
+ return aneg_done;
+
+ /*
+ * in SGMII mode, if copper side autoneg is successful,
+ * also check SGMII side autoneg result
+ */
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII)
+ return aneg_done;
+
+ /* switch to SGMII/fiber page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
+
+ /* check if the SGMII link is OK. */
+ if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) {
+ pr_warn("803x_aneg_done: SGMII link is not ok\n");
+ aneg_done = 0;
+ }
+ /* switch back to copper page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
+
+ return aneg_done;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* ATHEROS 8035 */
@@ -433,6 +445,7 @@ static struct phy_driver at803x_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
} };
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 03d54c4adc88..800b39f06279 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -19,6 +19,7 @@
#define TI_DP83848C_PHY_ID 0x20005ca0
#define NS_DP83848C_PHY_ID 0x20005c90
#define TLK10X_PHY_ID 0x2000a210
+#define TI_DP83822_PHY_ID 0x2000a240
/* Registers */
#define DP83848_MICR 0x11 /* MII Interrupt Control Register */
@@ -77,6 +78,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
{ TLK10X_PHY_ID, 0xfffffff0 },
+ { TI_DP83822_PHY_ID, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
@@ -105,6 +107,7 @@ static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
+ DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
};
module_phy_driver(dp83848_driver);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index f79eb12c326a..125cff57c759 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -433,13 +433,13 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 0);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 0);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
@@ -497,13 +497,13 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 1);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 1);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 5662babf0583..3e37724d30ae 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -151,7 +151,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
- if (status < 0) {
+ if (status) {
usb_set_intfdata(intf, NULL);
usb_driver_release_interface(driver_of(intf), intf);
return status;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0c36de121eb0..e34b1297c96a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2279,6 +2279,7 @@ vmxnet3_set_mc(struct net_device *netdev)
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
dma_addr_t new_table_pa = 0;
+ bool new_table_pa_valid = false;
u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC) {
@@ -2307,13 +2308,15 @@ vmxnet3_set_mc(struct net_device *netdev)
new_table,
sz,
PCI_DMA_TODEVICE);
+ if (!dma_mapping_error(&adapter->pdev->dev,
+ new_table_pa)) {
+ new_mode |= VMXNET3_RXM_MCAST;
+ new_table_pa_valid = true;
+ rxConf->mfTablePA = cpu_to_le64(
+ new_table_pa);
+ }
}
-
- if (!dma_mapping_error(&adapter->pdev->dev,
- new_table_pa)) {
- new_mode |= VMXNET3_RXM_MCAST;
- rxConf->mfTablePA = cpu_to_le64(new_table_pa);
- } else {
+ if (!new_table_pa_valid) {
netdev_info(netdev,
"failed to copy mcast list, setting ALL_MULTI\n");
new_mode |= VMXNET3_RXM_ALL_MULTI;
@@ -2338,7 +2341,7 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- if (new_table_pa)
+ if (new_table_pa_valid)
dma_unmap_single(&adapter->pdev->dev, new_table_pa,
rxConf->mfTableLen, PCI_DMA_TODEVICE);
kfree(new_table);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 85c271c70d42..820de6a9ddde 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -956,6 +956,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
if (skb->pkt_type == PACKET_LOOPBACK) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
skb->pkt_type = PACKET_HOST;
goto out;
}
@@ -996,6 +997,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
{
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ IPCB(skb)->flags |= IPSKB_L3SLAVE;
/* loopback traffic; do not push through packet taps again.
* Reset pkt_type for upper layers to process skb
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index c0170b6956bb..cb5cc7c03160 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -583,7 +583,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
}
}
- pp = eth_gro_receive(head, skb);
+ pp = call_gro_receive(eth_gro_receive, head, skb);
flush = 0;
out:
@@ -943,17 +943,20 @@ static bool vxlan_snoop(struct net_device *dev,
static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
{
struct vxlan_dev *vxlan;
+ struct vxlan_sock *sock4;
+ struct vxlan_sock *sock6 = NULL;
unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
+ sock4 = rtnl_dereference(dev->vn4_sock);
+
/* The vxlan_sock is only used by dev, leaving group has
* no effect on other vxlan devices.
*/
- if (family == AF_INET && dev->vn4_sock &&
- atomic_read(&dev->vn4_sock->refcnt) == 1)
+ if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1)
return false;
#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 && dev->vn6_sock &&
- atomic_read(&dev->vn6_sock->refcnt) == 1)
+ sock6 = rtnl_dereference(dev->vn6_sock);
+ if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1)
return false;
#endif
@@ -961,10 +964,12 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
if (!netif_running(vxlan->dev) || vxlan == dev)
continue;
- if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock)
+ if (family == AF_INET &&
+ rtnl_dereference(vxlan->vn4_sock) != sock4)
continue;
#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock)
+ if (family == AF_INET6 &&
+ rtnl_dereference(vxlan->vn6_sock) != sock6)
continue;
#endif
@@ -1005,22 +1010,25 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
static void vxlan_sock_release(struct vxlan_dev *vxlan)
{
- bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock);
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
#if IS_ENABLED(CONFIG_IPV6)
- bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock);
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ rcu_assign_pointer(vxlan->vn6_sock, NULL);
#endif
+ rcu_assign_pointer(vxlan->vn4_sock, NULL);
synchronize_net();
- if (ipv4) {
- udp_tunnel_sock_release(vxlan->vn4_sock->sock);
- kfree(vxlan->vn4_sock);
+ if (__vxlan_sock_release_prep(sock4)) {
+ udp_tunnel_sock_release(sock4->sock);
+ kfree(sock4);
}
#if IS_ENABLED(CONFIG_IPV6)
- if (ipv6) {
- udp_tunnel_sock_release(vxlan->vn6_sock->sock);
- kfree(vxlan->vn6_sock);
+ if (__vxlan_sock_release_prep(sock6)) {
+ udp_tunnel_sock_release(sock6->sock);
+ kfree(sock6);
}
#endif
}
@@ -1036,18 +1044,21 @@ static int vxlan_igmp_join(struct vxlan_dev *vxlan)
int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_join_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- sk = vxlan->vn6_sock->sock->sk;
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
&ip->sin6.sin6_addr);
@@ -1067,18 +1078,21 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_leave_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- sk = vxlan->vn6_sock->sock->sk;
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
&ip->sin6.sin6_addr);
@@ -1828,11 +1842,15 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
+ struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst;
struct flowi6 fl6;
int err;
+ if (!sock6)
+ return ERR_PTR(-EIO);
+
if (tos && !info)
use_cache = false;
if (use_cache) {
@@ -1850,7 +1868,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowi6_proto = IPPROTO_UDP;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
- vxlan->vn6_sock->sock->sk,
+ sock6->sock->sk,
&ndst, &fl6);
if (err < 0)
return ERR_PTR(err);
@@ -1995,9 +2013,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
if (dst->sa.sa_family == AF_INET) {
- if (!vxlan->vn4_sock)
+ struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
+
+ if (!sock4)
goto drop;
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
rt = vxlan_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
@@ -2050,12 +2070,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
+ struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
struct dst_entry *ndst;
u32 rt6i_flags;
- if (!vxlan->vn6_sock)
+ if (!sock6)
goto drop;
- sk = vxlan->vn6_sock->sock->sk;
+ sk = sock6->sock->sk;
ndst = vxlan6_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
@@ -2403,9 +2424,10 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
if (ip_tunnel_info_af(info) == AF_INET) {
+ struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
struct rtable *rt;
- if (!vxlan->vn4_sock)
+ if (!sock4)
return -EINVAL;
rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
@@ -2417,8 +2439,6 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
#if IS_ENABLED(CONFIG_IPV6)
struct dst_entry *ndst;
- if (!vxlan->vn6_sock)
- return -EINVAL;
ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
&info->key.u.ipv6.src, NULL, info);
@@ -2728,10 +2748,10 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
return PTR_ERR(vs);
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
- vxlan->vn6_sock = vs;
+ rcu_assign_pointer(vxlan->vn6_sock, vs);
else
#endif
- vxlan->vn4_sock = vs;
+ rcu_assign_pointer(vxlan->vn4_sock, vs);
vxlan_vs_add_dev(vs, vxlan);
return 0;
}
@@ -2742,9 +2762,9 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan)
bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
int ret = 0;
- vxlan->vn4_sock = NULL;
+ RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
#if IS_ENABLED(CONFIG_IPV6)
- vxlan->vn6_sock = NULL;
+ RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
if (ipv6 || metadata)
ret = __vxlan_sock_add(vxlan, true);
#endif
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 33ab3345d333..4e9fe75d7067 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -294,7 +294,7 @@ config FSL_UCC_HDLC
config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
- depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index d06a887a2352..b776a0ab106c 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -223,12 +223,19 @@ static int slic_ds26522_probe(struct spi_device *spi)
return ret;
}
+static const struct spi_device_id slic_ds26522_id[] = {
+ { .name = "ds26522" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(spi, slic_ds26522_id);
+
static const struct of_device_id slic_ds26522_match[] = {
{
.compatible = "maxim,ds26522",
},
{},
};
+MODULE_DEVICE_TABLE(of, slic_ds26522_match);
static struct spi_driver slic_ds26522_driver = {
.driver = {
@@ -239,6 +246,7 @@ static struct spi_driver slic_ds26522_driver = {
},
.probe = slic_ds26522_probe,
.remove = slic_ds26522_remove,
+ .id_table = slic_ds26522_id,
};
static int __init slic_ds26522_init(void)
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index dda49af1eb74..521f1c55c19e 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -450,6 +450,7 @@ struct ath10k_debug {
u32 pktlog_filter;
u32 reg_addr;
u32 nf_cal_period;
+ void *cal_data;
struct ath10k_fw_crash_data *fw_crash_data;
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 832da6ed9f13..82a4c67f3672 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -30,6 +30,8 @@
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+#define ATH10K_DEBUG_CAL_DATA_LEN 12064
+
#define ATH10K_FW_CRASH_DUMP_VERSION 1
/**
@@ -1451,56 +1453,51 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
-static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
{
- struct ath10k *ar = inode->i_private;
- void *buf;
u32 hi_addr;
__le32 addr;
int ret;
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH10K_STATE_ON &&
- ar->state != ATH10K_STATE_UTF) {
- ret = -ENETDOWN;
- goto err;
- }
+ lockdep_assert_held(&ar->conf_mutex);
- buf = vmalloc(ar->hw_params.cal_data_len);
- if (!buf) {
- ret = -ENOMEM;
- goto err;
- }
+ if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
+ return -EINVAL;
hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
if (ret) {
- ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
- goto err_vfree;
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
+ ret);
+ return ret;
}
- ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
ar->hw_params.cal_data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
- goto err_vfree;
+ return ret;
}
- file->private_data = buf;
+ return 0;
+}
- mutex_unlock(&ar->conf_mutex);
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
- return 0;
+ mutex_lock(&ar->conf_mutex);
-err_vfree:
- vfree(buf);
+ if (ar->state == ATH10K_STATE_ON ||
+ ar->state == ATH10K_STATE_UTF) {
+ ath10k_debug_cal_data_fetch(ar);
+ }
-err:
+ file->private_data = ar;
mutex_unlock(&ar->conf_mutex);
- return ret;
+ return 0;
}
static ssize_t ath10k_debug_cal_data_read(struct file *file,
@@ -1508,18 +1505,16 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- void *buf = file->private_data;
- return simple_read_from_buffer(user_buf, count, ppos,
- buf, ar->hw_params.cal_data_len);
-}
+ mutex_lock(&ar->conf_mutex);
-static int ath10k_debug_cal_data_release(struct inode *inode,
- struct file *file)
-{
- vfree(file->private_data);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
- return 0;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
}
static ssize_t ath10k_write_ani_enable(struct file *file,
@@ -1580,7 +1575,6 @@ static const struct file_operations fops_ani_enable = {
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
- .release = ath10k_debug_cal_data_release,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
@@ -1932,6 +1926,8 @@ void ath10k_debug_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_debug_cal_data_fetch(ar);
+
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
* warning from del_timer(). */
@@ -2344,6 +2340,10 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.fw_crash_data)
return -ENOMEM;
+ ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
+ if (!ar->debug.cal_data)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
@@ -2357,6 +2357,9 @@ void ath10k_debug_destroy(struct ath10k *ar)
vfree(ar->debug.fw_crash_data);
ar->debug.fw_crash_data = NULL;
+ vfree(ar->debug.cal_data);
+ ar->debug.cal_data = NULL;
+
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index eab0ab976af2..76eb33679d4b 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1401,6 +1401,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))},
{},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index b6f064a8d264..7e27a06e5df1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -33,7 +33,6 @@ struct coeff {
enum ar9003_cal_types {
IQ_MISMATCH_CAL = BIT(0),
- TEMP_COMP_CAL = BIT(1),
};
static void ar9003_hw_setup_calibration(struct ath_hw *ah,
@@ -59,12 +58,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
/* Kick-off cal */
REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
break;
- case TEMP_COMP_CAL:
- ath_dbg(common, CALIBRATE,
- "starting Temperature Compensation Calibration\n");
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL);
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START);
- break;
default:
ath_err(common, "Invalid calibration type\n");
break;
@@ -93,8 +86,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Accumulate cal measures for active chains
*/
- if (cur_caldata->calCollect)
- cur_caldata->calCollect(ah);
+ cur_caldata->calCollect(ah);
ah->cal_samples++;
if (ah->cal_samples >= cur_caldata->calNumSamples) {
@@ -107,8 +99,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Process accumulated data
*/
- if (cur_caldata->calPostProc)
- cur_caldata->calPostProc(ah, numChains);
+ cur_caldata->calPostProc(ah, numChains);
/* Calibration has finished. */
caldata->CalValid |= cur_caldata->calType;
@@ -323,16 +314,9 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
ar9003_hw_iqcalibrate
};
-static const struct ath9k_percal_data temp_cal_single_sample = {
- TEMP_COMP_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
-};
-
static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
{
ah->iq_caldata.calData = &iq_cal_single_sample;
- ah->temp_caldata.calData = &temp_cal_single_sample;
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
@@ -340,7 +324,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
- ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL;
+ ah->supp_cals = IQ_MISMATCH_CAL;
}
#define OFF_UPPER_LT 24
@@ -1399,9 +1383,6 @@ static void ar9003_hw_init_cal_common(struct ath_hw *ah)
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- INIT_CAL(&ah->temp_caldata);
- INSERT_CAL(ah, &ah->temp_caldata);
-
/* Initialize current pointer to first element in list */
ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 2a5d3ad1169c..9cbca1229bac 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -830,7 +830,6 @@ struct ath_hw {
/* Calibration */
u32 supp_cals;
struct ath9k_cal_list iq_caldata;
- struct ath9k_cal_list temp_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;
struct ath9k_cal_list *cal_list;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 94480123efa3..274dd5a1574a 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -45,7 +45,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev.iftype, 0, false);
+ priv->wdev.iftype, 0, NULL, NULL);
while (!skb_queue_empty(&list)) {
struct rx_packet_hdr *rx_hdr;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 1016628926d2..08d587a342d3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -238,7 +238,7 @@ struct rtl8xxxu_rxdesc16 {
u32 pattern1match:1;
u32 pattern0match:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
#if 0
u32 bassn:12;
u32 bavld:1;
@@ -368,7 +368,7 @@ struct rtl8xxxu_rxdesc24 {
u32 ldcp:1;
u32 splcp:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
};
struct rtl8xxxu_txdesc32 {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index df54d27e7851..a793fedc3654 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1461,7 +1461,9 @@ static int rtl8192eu_active_to_emu(struct rtl8xxxu_priv *priv)
int count, ret = 0;
/* Turn off RF */
- rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+ val8 = rtl8xxxu_read8(priv, REG_RF_CTRL);
+ val8 &= ~RF_ENABLE;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
/* Switch DPDT_SEL_P output from register 0x65[2] */
val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
@@ -1593,6 +1595,10 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
u32 val32;
u8 val8;
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 |= (BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+
val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
val8 |= BIT(5);
rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 6c086b5657e9..02b8ddd98a95 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1498,6 +1498,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
u32 val32;
u8 val8;
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 |= (BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+
/*
* No indication anywhere as to what 0x0790 does. The 2 antenna
* vendor code preserves bits 6-7 here.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index b2d7f6e69667..a5e6ec2152bf 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5197,7 +5197,12 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift +
sizeof(struct rtl8xxxu_rxdesc16), 128);
- if (pkt_cnt > 1)
+ /*
+ * Only clone the skb if there's enough data at the end to
+ * at least cover the rx descriptor
+ */
+ if (pkt_cnt > 1 &&
+ urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
next_skb = skb_clone(skb, GFP_ATOMIC);
rx_status = IEEE80211_SKB_RXCB(skb);
@@ -5215,7 +5220,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
@@ -5285,7 +5290,7 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index f95760c13c56..8e7f23c11680 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -111,7 +111,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
if (!err)
goto found_alt;
}
- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index e7b11b40e68d..f361808def47 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -86,6 +86,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 tid;
+ char *fw_name;
rtl8188ee_bt_reg_init(hw);
rtlpriv->dm.dm_initialgain_enable = 1;
@@ -169,10 +170,10 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8188efw.bin";
+ fw_name = "rtlwifi/rtl8188efw.bin";
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -284,7 +285,6 @@ static const struct rtl_hal_cfg rtl88ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl88e_pci",
- .fw_name = "rtlwifi/rtl8188efw.bin",
.ops = &rtl8188ee_hal_ops,
.mod_params = &rtl88ee_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 87aa209ae325..8b6e37ce3f66 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -96,6 +96,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ char *fw_name = "rtlwifi/rtl8192cfwU.bin";
rtl8192ce_bt_reg_init(hw);
@@ -167,15 +168,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
}
/* request fw */
- if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
- !IS_92C_SERIAL(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin";
- else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+ if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
+ fw_name = "rtlwifi/rtl8192cfwU_B.bin";
rtlpriv->max_fw_size = 0x4000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -262,7 +260,6 @@ static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92c_pci",
- .fw_name = "rtlwifi/rtl8192cfw.bin",
.ops = &rtl8192ce_hal_ops,
.mod_params = &rtl92ce_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 7c6f7f0d18c6..f953320f0e23 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -59,6 +59,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
+ char *fw_name;
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -77,18 +78,18 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
}
if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
!IS_92C_SERIAL(rtlpriv->rtlhal.version)) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin";
+ fw_name = "rtlwifi/rtl8192cufw_A.bin";
} else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin";
+ fw_name = "rtlwifi/rtl8192cufw_B.bin";
} else {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+ fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
}
/* provide name of alternative file */
rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
- pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ pr_info("Loading firmware %s\n", fw_name);
rtlpriv->max_fw_size = 0x4000;
err = request_firmware_nowait(THIS_MODULE, 1,
- rtlpriv->cfg->fw_name, rtlpriv->io.dev,
+ fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
return err;
}
@@ -187,7 +188,6 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.name = "rtl92c_usb",
- .fw_name = "rtlwifi/rtl8192cufw.bin",
.ops = &rtl8192cu_hal_ops,
.mod_params = &rtl92cu_mod_params,
.usb_interface_cfg = &rtl92cu_interface_cfg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 0538a4d09568..1ebfee18882f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -92,6 +92,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ char *fw_name = "rtlwifi/rtl8192defw.bin";
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -181,10 +182,10 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->max_fw_size = 0x8000;
pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
+ pr_info("Loading firmware file %s\n", fw_name);
/* request fw */
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92de_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8192de",
- .fw_name = "rtlwifi/rtl8192defw.bin",
.ops = &rtl8192de_hal_ops,
.mod_params = &rtl92de_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index ac299cbe59b0..46b605de36e7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -91,6 +91,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int err = 0;
+ char *fw_name;
rtl92ee_bt_reg_init(hw);
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
@@ -170,11 +171,11 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
}
/* request fw */
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192eefw.bin";
+ fw_name = "rtlwifi/rtl8192eefw.bin";
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92ee_pci",
- .fw_name = "rtlwifi/rtl8192eefw.bin",
.ops = &rtl8192ee_hal_ops,
.mod_params = &rtl92ee_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 5e8e02d5de8a..3e1eaeac4fdc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -89,12 +89,13 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
struct ieee80211_hw *hw = context;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rt_firmware *pfirmware = NULL;
+ char *fw_name = "rtlwifi/rtl8192sefw.bin";
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ pr_err("Firmware %s not available\n", fw_name);
rtlpriv->max_fw_size = 0;
return;
}
@@ -117,6 +118,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int err = 0;
u16 earlyrxthreshold = 7;
+ char *fw_name = "rtlwifi/rtl8192sefw.bin";
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -214,9 +216,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 +
sizeof(struct fw_hdr);
pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
- "Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ "Loading firmware %s\n", fw_name);
/* request fw */
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl92se_fw_cb);
if (err) {
@@ -310,7 +312,6 @@ static const struct rtl_hal_cfg rtl92se_hal_cfg = {
.bar_id = 1,
.write_readback = false,
.name = "rtl92s_pci",
- .fw_name = "rtlwifi/rtl8192sefw.bin",
.ops = &rtl8192se_hal_ops,
.mod_params = &rtl92se_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 89c828ad89f4..c51a9e8234e9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -94,6 +94,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
int err = 0;
+ char *fw_name = "rtlwifi/rtl8723fw.bin";
rtl8723e_bt_reg_init(hw);
@@ -176,14 +177,12 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- if (IS_VENDOR_8723_A_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin";
- else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin";
+ if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
+ fw_name = "rtlwifi/rtl8723fw_B.bin";
rtlpriv->max_fw_size = 0x6000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -280,7 +279,6 @@ static const struct rtl_hal_cfg rtl8723e_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723e_pci",
- .fw_name = "rtlwifi/rtl8723efw.bin",
.ops = &rtl8723e_hal_ops,
.mod_params = &rtl8723e_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 20b53f035483..847644d1f5f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -91,6 +91,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ char *fw_name = "rtlwifi/rtl8723befw.bin";
rtl8723be_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -184,8 +185,8 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
}
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -280,7 +281,6 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723be_pci",
- .fw_name = "rtlwifi/rtl8723befw.bin",
.ops = &rtl8723be_hal_ops,
.mod_params = &rtl8723be_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 22f687b1f133..297938e0effd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -93,6 +93,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ char *fw_name, *wowlan_fw_name;
rtl8821ae_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -203,17 +204,17 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin";
- rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
+ fw_name = "rtlwifi/rtl8812aefw.bin";
+ wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
} else {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin";
- rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
+ fw_name = "rtlwifi/rtl8821aefw.bin";
+ wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
}
rtlpriv->max_fw_size = 0x8000;
/*load normal firmware*/
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -222,9 +223,9 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
/*load wowlan firmware*/
- pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name);
+ pr_info("Using firmware %s\n", wowlan_fw_name);
err = request_firmware_nowait(THIS_MODULE, 1,
- rtlpriv->cfg->wowlan_fw_name,
+ wowlan_fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_wowlan_fw_cb);
if (err) {
@@ -320,7 +321,6 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8821ae_pci",
- .fw_name = "rtlwifi/rtl8821aefw.bin",
.ops = &rtl8821ae_hal_ops,
.mod_params = &rtl8821ae_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 595f7d5d091a..dafe486f8448 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2278,9 +2278,7 @@ struct rtl_hal_cfg {
u8 bar_id;
bool write_readback;
char *name;
- char *fw_name;
char *alt_fw_name;
- char *wowlan_fw_name;
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index a6e94b1a12cb..47fe7f96a242 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -391,7 +391,6 @@ static void wl1271_remove(struct sdio_func *func)
pm_runtime_get_noresume(&func->dev);
platform_device_unregister(glue->core);
- kfree(glue);
}
#ifdef CONFIG_PM
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 8b2b740d6679..124c2432ac9c 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -89,7 +89,7 @@ config NVDIMM_PFN
Select Y if unsure
config NVDIMM_DAX
- tristate "NVDIMM DAX: Raw access to persistent memory"
+ bool "NVDIMM DAX: Raw access to persistent memory"
default LIBNVDIMM
depends on NVDIMM_PFN
help
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 3509cff68ef9..abe5c6bc756c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -2176,12 +2176,14 @@ static struct device **scan_labels(struct nd_region *nd_region)
return devs;
err:
- for (i = 0; devs[i]; i++)
- if (is_nd_blk(&nd_region->dev))
- namespace_blk_release(devs[i]);
- else
- namespace_pmem_release(devs[i]);
- kfree(devs);
+ if (devs) {
+ for (i = 0; devs[i]; i++)
+ if (is_nd_blk(&nd_region->dev))
+ namespace_blk_release(devs[i]);
+ else
+ namespace_pmem_release(devs[i]);
+ kfree(devs);
+ }
return NULL;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 42b3a8217073..24618431a14b 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -47,7 +47,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
return to_nd_region(to_dev(pmem)->parent);
}
-static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
+static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
unsigned int len)
{
struct device *dev = to_dev(pmem);
@@ -62,8 +62,12 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
__func__, (unsigned long long) sector,
cleared / 512, cleared / 512 > 1 ? "s" : "");
badblocks_clear(&pmem->bb, sector, cleared / 512);
+ } else {
+ return -EIO;
}
+
invalidate_pmem(pmem->virt_addr + offset, len);
+ return 0;
}
static void write_pmem(void *pmem_addr, struct page *page,
@@ -123,7 +127,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
flush_dcache_page(page);
write_pmem(pmem_addr, page, off, len);
if (unlikely(bad_pmem)) {
- pmem_clear_poison(pmem, pmem_off, len);
+ rc = pmem_clear_poison(pmem, pmem_off, len);
write_pmem(pmem_addr, page, off, len);
}
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 329381a28edf..79e679d12f3b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -554,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
c.identify.opcode = nvme_admin_identify;
- c.identify.cns = cpu_to_le32(1);
+ c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL);
*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
if (!*id)
@@ -572,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
struct nvme_command c = { };
c.identify.opcode = nvme_admin_identify;
- c.identify.cns = cpu_to_le32(2);
+ c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST);
c.identify.nsid = cpu_to_le32(nsid);
return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
}
@@ -900,9 +900,9 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
return -ENODEV;
}
- if (ns->ctrl->vs >= NVME_VS(1, 1))
+ if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
- if (ns->ctrl->vs >= NVME_VS(1, 2))
+ if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
return 0;
@@ -1086,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
int ret;
while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
+ if (csts == ~0)
+ return -ENODEV;
if ((csts & NVME_CSTS_RDY) == bit)
break;
@@ -1240,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
page_shift = NVME_CAP_MPSMIN(cap) + 12;
- if (ctrl->vs >= NVME_VS(1, 1))
+ if (ctrl->vs >= NVME_VS(1, 1, 0))
ctrl->subsystem = NVME_CAP_NSSRC(cap);
ret = nvme_identify_ctrl(ctrl, &id);
@@ -1840,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work)
return;
nn = le32_to_cpu(id->nn);
- if (ctrl->vs >= NVME_VS(1, 1) &&
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
if (!nvme_scan_ns_list(ctrl, nn))
goto done;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0fc99f0f2571..0248d0e21fee 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -99,6 +99,7 @@ struct nvme_dev {
dma_addr_t cmb_dma_addr;
u64 cmb_size;
u32 cmbsz;
+ u32 cmbloc;
struct nvme_ctrl ctrl;
struct completion ioq_wait;
};
@@ -893,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
/*
* Mark the request as handled, since the inline shutdown
@@ -1214,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
struct nvme_queue *nvmeq;
- dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
+ dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
NVME_CAP_NSSRC(cap) : 0;
if (dev->subsystem &&
@@ -1291,7 +1292,7 @@ static void nvme_watchdog_timer(unsigned long data)
/* Skip controllers under certain specific conditions. */
if (nvme_should_reset(dev, csts)) {
- if (queue_work(nvme_workq, &dev->reset_work))
+ if (!nvme_reset(dev))
dev_warn(dev->dev,
"Failed status: 0x%x, reset controller.\n",
csts);
@@ -1331,28 +1332,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
return ret >= 0 ? 0 : ret;
}
+static ssize_t nvme_cmb_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
+ ndev->cmbloc, ndev->cmbsz);
+}
+static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
+
static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
{
u64 szu, size, offset;
- u32 cmbloc;
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
void __iomem *cmb;
dma_addr_t dma_addr;
- if (!use_cmb_sqes)
- return NULL;
-
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!(NVME_CMB_SZ(dev->cmbsz)))
return NULL;
+ dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
- cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
+ if (!use_cmb_sqes)
+ return NULL;
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
size = szu * NVME_CMB_SZ(dev->cmbsz);
- offset = szu * NVME_CMB_OFST(cmbloc);
- bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
+ offset = szu * NVME_CMB_OFST(dev->cmbloc);
+ bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
if (offset > bar_size)
return NULL;
@@ -1365,7 +1375,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
+ dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
cmb = ioremap_wc(dma_addr, size);
if (!cmb)
return NULL;
@@ -1511,9 +1521,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0;
}
-static void nvme_disable_io_queues(struct nvme_dev *dev)
+static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
{
- int pass, queues = dev->online_queues - 1;
+ int pass;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
@@ -1616,9 +1626,25 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->q_depth);
}
- if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
+ /*
+ * CMBs can currently only exist on >=1.2 PCIe devices. We only
+ * populate sysfs if a CMB is implemented. Note that we add the
+ * CMB attribute to the nvme_ctrl kobj which removes the need to remove
+ * it on exit. Since nvme_dev_attrs_group has no name we can pass
+ * NULL as final argument to sysfs_add_file_to_group.
+ */
+
+ if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
dev->cmb = nvme_map_cmb(dev);
+ if (dev->cmbsz) {
+ if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL))
+ dev_warn(dev->dev,
+ "failed to add sysfs attribute for CMB\n");
+ }
+ }
+
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
return 0;
@@ -1649,7 +1675,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- int i;
+ int i, queues;
u32 csts = -1;
del_timer_sync(&dev->watchdog_timer);
@@ -1660,6 +1686,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
csts = readl(dev->bar + NVME_REG_CSTS);
}
+ queues = dev->online_queues - 1;
for (i = dev->queue_count - 1; i > 0; i--)
nvme_suspend_queue(dev->queues[i]);
@@ -1671,7 +1698,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (dev->queue_count)
nvme_suspend_queue(dev->queues[0]);
} else {
- nvme_disable_io_queues(dev);
+ nvme_disable_io_queues(dev, queues);
nvme_disable_admin_queue(dev, shutdown);
}
nvme_pci_disable(dev);
@@ -1818,11 +1845,10 @@ static int nvme_reset(struct nvme_dev *dev)
{
if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
return -ENODEV;
-
+ if (work_busy(&dev->reset_work))
+ return -ENODEV;
if (!queue_work(nvme_workq, &dev->reset_work))
return -EBUSY;
-
- flush_work(&dev->reset_work);
return 0;
}
@@ -1846,7 +1872,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
{
- return nvme_reset(to_nvme_dev(ctrl));
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+ int ret = nvme_reset(dev);
+
+ if (!ret)
+ flush_work(&dev->reset_work);
+ return ret;
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
@@ -1940,7 +1971,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
if (prepare)
nvme_dev_disable(dev, false);
else
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
}
static void nvme_shutdown(struct pci_dev *pdev)
@@ -2009,7 +2040,7 @@ static int nvme_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- queue_work(nvme_workq, &ndev->reset_work);
+ nvme_reset(ndev);
return 0;
}
#endif
@@ -2048,7 +2079,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev);
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index c2a0a1c7d05d..3eaa4d27801e 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -606,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
eui = id_ns->eui64;
len = sizeof(id_ns->eui64);
- if (ns->ctrl->vs >= NVME_VS(1, 2)) {
+ if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) {
if (bitmap_empty(eui, len * 8)) {
eui = id_ns->nguid;
len = sizeof(id_ns->nguid);
@@ -679,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{
int res;
- if (ns->ctrl->vs >= NVME_VS(1, 1)) {
+ if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) {
res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
if (res != -EOPNOTSUPP)
return res;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 7ab9c9381b98..6fe4c48a21e4 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -199,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
*/
/* we support multiple ports and multiples hosts: */
- id->mic = (1 << 0) | (1 << 1);
+ id->cmic = (1 << 0) | (1 << 1);
/* no limit on data transfer sizes for now */
id->mdts = 0;
@@ -511,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req)
case nvme_admin_identify:
req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) {
- case 0x00:
+ case NVME_ID_CNS_NS:
req->execute = nvmet_execute_identify_ns;
return 0;
- case 0x01:
+ case NVME_ID_CNS_CTRL:
req->execute = nvmet_execute_identify_ctrl;
return 0;
- case 0x02:
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
req->execute = nvmet_execute_identify_nslist;
return 0;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6559d5afa7bf..b4cacb6f0258 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -882,7 +882,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
if (!subsys)
return NULL;
- subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */
+ subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
switch (type) {
case NVME_NQN_NVME:
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 6f65646e89cf..12f39eea569f 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
/* we support only dynamic controllers */
e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
- e->nqntype = type;
+ e->subtype = type;
memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
@@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req)
case nvme_admin_identify:
req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) {
- case 0x01:
+ case NVME_ID_CNS_CTRL:
req->execute =
nvmet_execute_identify_disc_ctrl;
return 0;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index f811d2796437..e4bf07d20f9b 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -29,6 +29,7 @@
const struct of_device_id of_default_bus_match_table[] = {
{ .compatible = "simple-bus", },
{ .compatible = "simple-mfd", },
+ { .compatible = "isa", },
#ifdef CONFIG_ARM_AMBA
{ .compatible = "arm,amba-bus", },
#endif /* CONFIG_ARM_AMBA */
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 2cb7315e26d0..653707996342 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -247,6 +247,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
pp = &pcie->pp;
pp->dev = dev;
+ pcie->drvdata = match->data;
pp->ops = pcie->drvdata->ops;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
@@ -256,7 +257,6 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->pp.dbi_base);
}
- pcie->drvdata = match->data;
pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
if (!ls_pcie_is_bridge(pcie))
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index 537f58a664fa..8df6312ed300 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
- * Authors: Joao Pinto <jpinto@synopsys.com>
+ * Authors: Joao Pinto <jpmpinto@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index bfdd0744b686..ad70507cfb56 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -610,6 +610,7 @@ static int msi_verify_entries(struct pci_dev *dev)
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
* @nvec: number of interrupts to allocate
+ * @affinity: flag to indicate cpu irq affinity mask should be set
*
* Setup the MSI capability structure of the device with the requested
* number of interrupts. A return value of zero indicates the successful
@@ -752,6 +753,7 @@ static void msix_program_entries(struct pci_dev *dev,
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of struct msix_entry entries
* @nvec: number of @entries
+ * @affinity: flag to indicate cpu irq affinity mask should be set
*
* Setup the MSI-X capability structure of device function with a
* single MSI-X irq. A return of zero indicates the successful setup of
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index c2ac7646b99f..a8ac4bcef2c0 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1011,7 +1011,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
rc = acpi_dev_get_resources(adev, &resource_list,
acpi_pmu_dev_add_resource, &res);
acpi_dev_free_resource_list(&resource_list);
- if (rc < 0 || IS_ERR(&res)) {
+ if (rc < 0) {
dev_err(dev, "PMU type %d: No resource address found\n", type);
goto err;
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index e1ab864e1a7f..c8c72e8259d3 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -151,21 +151,21 @@ FUNC_GROUP_DECL(GPID0, F19, E21);
#define GPID2_DESC SIG_DESC_SET(SCU8C, 9)
-#define D20 26
+#define F20 26
SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC);
SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC);
SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID);
-MS_PIN_DECL(D20, GPIOD2, SD2DAT0, GPID2IN);
+MS_PIN_DECL(F20, GPIOD2, SD2DAT0, GPID2IN);
-#define D21 27
+#define D20 27
SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC);
SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC);
SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID);
-MS_PIN_DECL(D21, GPIOD3, SD2DAT1, GPID2OUT);
+MS_PIN_DECL(D20, GPIOD3, SD2DAT1, GPID2OUT);
-FUNC_GROUP_DECL(GPID2, D20, D21);
+FUNC_GROUP_DECL(GPID2, F20, D20);
#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21)
#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12)
@@ -182,28 +182,88 @@ SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17));
SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC);
SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE);
-MS_PIN_DECL(C20, GPIE0, NDCD3, GPIE0OUT);
+MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT);
FUNC_GROUP_DECL(GPIE0, B20, C20);
-#define SPI1_DESC SIG_DESC_SET(HW_STRAP1, 13)
+#define SPI1_DESC { HW_STRAP1, GENMASK(13, 12), 1, 0 }
+#define SPI1DEBUG_DESC { HW_STRAP1, GENMASK(13, 12), 2, 0 }
+#define SPI1PASSTHRU_DESC { HW_STRAP1, GENMASK(13, 12), 3, 0 }
+
#define C18 64
-SIG_EXPR_LIST_DECL_SINGLE(SYSCS, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCS, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(C18, GPIOI0, SYSCS);
#define E15 65
-SIG_EXPR_LIST_DECL_SINGLE(SYSCK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCK, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(E15, GPIOI1, SYSCK);
-#define A14 66
-SIG_EXPR_LIST_DECL_SINGLE(SYSMOSI, SPI1, COND1, SPI1_DESC);
-SS_PIN_DECL(A14, GPIOI2, SYSMOSI);
+#define B16 66
+SIG_EXPR_DECL(SYSMOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMOSI, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(B16, GPIOI2, SYSMOSI);
#define C16 67
-SIG_EXPR_LIST_DECL_SINGLE(SYSMISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMISO, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(C16, GPIOI3, SYSMISO);
-FUNC_GROUP_DECL(SPI1, C18, E15, A14, C16);
+#define VB_DESC SIG_DESC_SET(HW_STRAP1, 5)
+
+#define B15 68
+SIG_EXPR_DECL(SPI1CS0, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CS0, SIG_EXPR_PTR(SPI1CS0, SPI1),
+ SIG_EXPR_PTR(SPI1CS0, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CS0, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCS, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(B15, GPIOI4, SPI1CS0, VBCS);
+
+#define C15 69
+SIG_EXPR_DECL(SPI1CK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CK, SIG_EXPR_PTR(SPI1CK, SPI1),
+ SIG_EXPR_PTR(SPI1CK, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CK, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCK, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(C15, GPIOI5, SPI1CK, VBCK);
+
+#define A14 70
+SIG_EXPR_DECL(SPI1MOSI, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MOSI, SIG_EXPR_PTR(SPI1MOSI, SPI1),
+ SIG_EXPR_PTR(SPI1MOSI, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1MOSI, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMOSI, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A14, GPIOI6, SPI1MOSI, VBMOSI);
+
+#define A15 71
+SIG_EXPR_DECL(SPI1MISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MISO, SIG_EXPR_PTR(SPI1MISO, SPI1),
+ SIG_EXPR_PTR(SPI1MISO, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1MISO, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMISO, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A15, GPIOI7, SPI1MISO, VBMISO);
+
+FUNC_GROUP_DECL(SPI1, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1DEBUG, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1PASSTHRU, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(VGABIOSROM, B15, C15, A14, A15);
+
+#define R2 72
+SIG_EXPR_LIST_DECL_SINGLE(SGPMCK, SGPM, SIG_DESC_SET(SCU84, 8));
+SS_PIN_DECL(R2, GPIOJ0, SGPMCK);
#define L2 73
SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9));
@@ -580,6 +640,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(A12),
ASPEED_PINCTRL_PIN(A13),
ASPEED_PINCTRL_PIN(A14),
+ ASPEED_PINCTRL_PIN(A15),
ASPEED_PINCTRL_PIN(A2),
ASPEED_PINCTRL_PIN(A3),
ASPEED_PINCTRL_PIN(A4),
@@ -592,6 +653,8 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(B12),
ASPEED_PINCTRL_PIN(B13),
ASPEED_PINCTRL_PIN(B14),
+ ASPEED_PINCTRL_PIN(B15),
+ ASPEED_PINCTRL_PIN(B16),
ASPEED_PINCTRL_PIN(B2),
ASPEED_PINCTRL_PIN(B20),
ASPEED_PINCTRL_PIN(B3),
@@ -603,6 +666,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(C12),
ASPEED_PINCTRL_PIN(C13),
ASPEED_PINCTRL_PIN(C14),
+ ASPEED_PINCTRL_PIN(C15),
ASPEED_PINCTRL_PIN(C16),
ASPEED_PINCTRL_PIN(C18),
ASPEED_PINCTRL_PIN(C2),
@@ -614,7 +678,6 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(D10),
ASPEED_PINCTRL_PIN(D2),
ASPEED_PINCTRL_PIN(D20),
- ASPEED_PINCTRL_PIN(D21),
ASPEED_PINCTRL_PIN(D4),
ASPEED_PINCTRL_PIN(D5),
ASPEED_PINCTRL_PIN(D6),
@@ -630,6 +693,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(E7),
ASPEED_PINCTRL_PIN(E9),
ASPEED_PINCTRL_PIN(F19),
+ ASPEED_PINCTRL_PIN(F20),
ASPEED_PINCTRL_PIN(F9),
ASPEED_PINCTRL_PIN(H20),
ASPEED_PINCTRL_PIN(L1),
@@ -691,11 +755,14 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = {
ASPEED_PINCTRL_GROUP(RMII2),
ASPEED_PINCTRL_GROUP(SD1),
ASPEED_PINCTRL_GROUP(SPI1),
+ ASPEED_PINCTRL_GROUP(SPI1DEBUG),
+ ASPEED_PINCTRL_GROUP(SPI1PASSTHRU),
ASPEED_PINCTRL_GROUP(TIMER4),
ASPEED_PINCTRL_GROUP(TIMER5),
ASPEED_PINCTRL_GROUP(TIMER6),
ASPEED_PINCTRL_GROUP(TIMER7),
ASPEED_PINCTRL_GROUP(TIMER8),
+ ASPEED_PINCTRL_GROUP(VGABIOSROM),
};
static const struct aspeed_pin_function aspeed_g5_functions[] = {
@@ -733,11 +800,14 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
ASPEED_PINCTRL_FUNC(RMII2),
ASPEED_PINCTRL_FUNC(SD1),
ASPEED_PINCTRL_FUNC(SPI1),
+ ASPEED_PINCTRL_FUNC(SPI1DEBUG),
+ ASPEED_PINCTRL_FUNC(SPI1PASSTHRU),
ASPEED_PINCTRL_FUNC(TIMER4),
ASPEED_PINCTRL_FUNC(TIMER5),
ASPEED_PINCTRL_FUNC(TIMER6),
ASPEED_PINCTRL_FUNC(TIMER7),
ASPEED_PINCTRL_FUNC(TIMER8),
+ ASPEED_PINCTRL_FUNC(VGABIOSROM),
};
static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 0391f9f13f3e..49aeba912531 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -166,13 +166,9 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
bool enable, struct regmap *map)
{
int i;
- bool ret;
-
- ret = aspeed_sig_expr_eval(expr, enable, map);
- if (ret)
- return ret;
for (i = 0; i < expr->ndescs; i++) {
+ bool ret;
const struct aspeed_sig_desc *desc = &expr->descs[i];
u32 pattern = enable ? desc->enable : desc->disable;
@@ -199,12 +195,18 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
struct regmap *map)
{
+ if (aspeed_sig_expr_eval(expr, true, map))
+ return true;
+
return aspeed_sig_expr_set(expr, true, map);
}
static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
struct regmap *map)
{
+ if (!aspeed_sig_expr_eval(expr, true, map))
+ return true;
+
return aspeed_sig_expr_set(expr, false, map);
}
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index d22a9fe2e6df..71bbeb9321ba 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1808,6 +1808,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(vg->pctl_dev);
}
+ raw_spin_lock_init(&vg->lock);
+
ret = byt_gpio_probe(vg);
if (ret) {
pinctrl_unregister(vg->pctl_dev);
@@ -1815,7 +1817,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, vg);
- raw_spin_lock_init(&vg->lock);
pm_runtime_enable(&pdev->dev);
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 63387a40b973..01443762e570 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -19,6 +19,7 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include "../core.h"
#include "pinctrl-intel.h"
/* Offset from regs */
@@ -1056,6 +1057,26 @@ int intel_pinctrl_remove(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
#ifdef CONFIG_PM_SLEEP
+static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
+{
+ const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
+
+ if (!pd || !intel_pad_usable(pctrl, pin))
+ return false;
+
+ /*
+ * Only restore the pin if it is actually in use by the kernel (or
+ * by userspace). It is possible that some pins are used by the
+ * BIOS during resume and those are not always locked down so leave
+ * them alone.
+ */
+ if (pd->mux_owner || pd->gpio_owner ||
+ gpiochip_line_is_irq(&pctrl->chip, pin))
+ return true;
+
+ return false;
+}
+
int intel_pinctrl_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1069,7 +1090,7 @@ int intel_pinctrl_suspend(struct device *dev)
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
u32 val;
- if (!intel_pad_usable(pctrl, desc->number))
+ if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
@@ -1130,7 +1151,7 @@ int intel_pinctrl_resume(struct device *dev)
void __iomem *padcfg;
u32 val;
- if (!intel_pad_usable(pctrl, desc->number))
+ if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 07462d79d040..1aba2c74160e 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -309,7 +309,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
* much memory to the process.
*/
down_read(&current->mm->mmap_sem);
- ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
+ ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
+ &page, NULL);
up_read(&current->mm->mmap_sem);
if (ret < 0)
break;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 81b8dcca8891..b8a21d7b25d4 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -576,6 +576,7 @@ config ASUS_WMI
config ASUS_NB_WMI
tristate "Asus Notebook WMI Driver"
depends on ASUS_WMI
+ depends on SERIO_I8042 || SERIO_I8042 = n
---help---
This is a driver for newer Asus notebooks. It adds extra features
like wireless radio and bluetooth control, leds, hotkeys, backlight...
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index d1a091b93192..a2323941e677 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -933,6 +933,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
},
},
+ {
+ .ident = "Lenovo YOGA 910-13IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
+ },
+ },
{}
};
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 436dfe871d32..9013a585507e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -892,7 +892,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
down_read(&current->mm->mmap_sem);
pinned = get_user_pages(
(unsigned long)xfer->loc_addr & PAGE_MASK,
- nr_pages, dir == DMA_FROM_DEVICE, 0,
+ nr_pages,
+ dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
page_list, NULL);
up_read(&current->mm->mmap_sem);
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 8b2558e7363e..968c3ae4535c 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -154,7 +154,7 @@ const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = {
UNIPHIER_RESET_END,
};
-const struct uniphier_reset_data uniphier_pro5_mio_reset_data[] = {
+const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
UNIPHIER_MIO_RESET_SD(0, 0),
UNIPHIER_MIO_RESET_SD(1, 1),
UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
@@ -360,7 +360,7 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-ld20-reset",
.data = uniphier_ld20_sys_reset_data,
},
- /* Media I/O reset */
+ /* Media I/O reset, SD reset */
{
.compatible = "socionext,uniphier-sld3-mio-reset",
.data = uniphier_sld3_mio_reset_data,
@@ -378,20 +378,20 @@ static const struct of_device_id uniphier_reset_match[] = {
.data = uniphier_sld3_mio_reset_data,
},
{
- .compatible = "socionext,uniphier-pro5-mio-reset",
- .data = uniphier_pro5_mio_reset_data,
+ .compatible = "socionext,uniphier-pro5-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
},
{
- .compatible = "socionext,uniphier-pxs2-mio-reset",
- .data = uniphier_pro5_mio_reset_data,
+ .compatible = "socionext,uniphier-pxs2-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
},
{
.compatible = "socionext,uniphier-ld11-mio-reset",
.data = uniphier_sld3_mio_reset_data,
},
{
- .compatible = "socionext,uniphier-ld20-mio-reset",
- .data = uniphier_pro5_mio_reset_data,
+ .compatible = "socionext,uniphier-ld20-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
},
/* Peripheral reset */
{
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index d1e080701264..e859d148aba9 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -208,14 +208,14 @@ config RTC_DRV_AS3722
will be called rtc-as3722.
config RTC_DRV_DS1307
- tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025"
+ tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025, ISL12057"
help
If you say yes here you get support for various compatible RTC
chips (often with battery backup) connected with I2C. This driver
should handle DS1307, DS1337, DS1338, DS1339, DS1340, ST M41T00,
- EPSON RX-8025 and probably other chips. In some cases the RTC
- must already have been initialized (by manufacturing or a
- bootloader).
+ EPSON RX-8025, Intersil ISL12057 and probably other chips. In some
+ cases the RTC must already have been initialized (by manufacturing or
+ a bootloader).
The first seven registers on these chips hold an RTC, and other
registers may add features such as NVRAM, a trickle charger for
@@ -234,6 +234,20 @@ config RTC_DRV_DS1307_HWMON
Say Y here if you want to expose temperature sensor data on
rtc-ds1307 (only DS3231)
+config RTC_DRV_DS1307_CENTURY
+ bool "Century bit support for rtc-ds1307"
+ depends on RTC_DRV_DS1307
+ default n
+ help
+ The DS1307 driver suffered from a bug where it was enabling the
+ century bit inconditionnally but never used it when reading the time.
+ It made the driver unable to support dates beyond 2099.
+ Setting this option will add proper support for the century bit but if
+ the time was previously set using a kernel predating this option,
+ reading the date will return a date in the next century.
+ To solve that, you could boot a kernel without this option set, set
+ the RTC date and then boot a kernel with this option set.
+
config RTC_DRV_DS1374
tristate "Dallas/Maxim DS1374"
help
@@ -374,16 +388,6 @@ config RTC_DRV_ISL12022
This driver can also be built as a module. If so, the module
will be called rtc-isl12022.
-config RTC_DRV_ISL12057
- select REGMAP_I2C
- tristate "Intersil ISL12057"
- help
- If you say yes here you get support for the Intersil ISL12057
- I2C RTC chip.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-isl12057.
-
config RTC_DRV_X1205
tristate "Xicor/Intersil X1205"
help
@@ -661,6 +665,7 @@ config RTC_DRV_DS1343
will be called rtc-ds1343.
config RTC_DRV_DS1347
+ select REGMAP_SPI
tristate "Dallas/Maxim DS1347"
help
If you say yes here you get support for the
@@ -1201,7 +1206,7 @@ comment "on-CPU RTC drivers"
config RTC_DRV_ASM9260
tristate "Alphascale asm9260 RTC"
- depends on MACH_ASM9260
+ depends on MACH_ASM9260 || COMPILE_TEST
help
If you say yes here you get support for the RTC on the
Alphascale asm9260 SoC.
@@ -1241,6 +1246,9 @@ config RTC_DRV_IMXDI
config RTC_DRV_OMAP
tristate "TI OMAP Real Time Clock"
depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST
+ depends on OF
+ depends on PINCTRL
+ select GENERIC_PINCONF
help
Say "yes" here to support the on chip real time clock
present on TI OMAP1, AM33xx, DA8xx/OMAP-L13x, AM43xx and DRA7xx.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 8fb994bacdf7..1ac694a330c8 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -72,7 +72,6 @@ obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o
obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o
obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
-obj-$(CONFIG_RTC_DRV_ISL12057) += rtc-isl12057.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
obj-$(CONFIG_RTC_DRV_LP8788) += rtc-lp8788.o
diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c
index 70b4fd0f6122..9e336184491c 100644
--- a/drivers/rtc/rtc-ac100.c
+++ b/drivers/rtc/rtc-ac100.c
@@ -327,6 +327,8 @@ static int ac100_rtc_register_clks(struct ac100_rtc_dev *chip)
.flags = 0,
};
+ of_property_read_string_index(np, "clock-output-names",
+ i, &init.name);
clk->regmap = chip->regmap;
clk->offset = AC100_CLKOUT_CTRL1 + i;
clk->hw.init = &init;
@@ -552,6 +554,9 @@ static int ac100_rtc_probe(struct platform_device *pdev)
int ret;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, chip);
chip->dev = &pdev->dev;
chip->regmap = ac100->regmap;
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 5219916ce11d..18a93d3e3f93 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -112,8 +112,6 @@ struct asm9260_rtc_priv {
void __iomem *iobase;
struct rtc_device *rtc;
struct clk *clk;
- /* io lock */
- spinlock_t lock;
};
static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id)
@@ -122,11 +120,15 @@ static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id)
u32 isr;
unsigned long events = 0;
+ mutex_lock(&priv->rtc->ops_lock);
isr = ioread32(priv->iobase + HW_CIIR);
- if (!isr)
+ if (!isr) {
+ mutex_unlock(&priv->rtc->ops_lock);
return IRQ_NONE;
+ }
iowrite32(0, priv->iobase + HW_CIIR);
+ mutex_unlock(&priv->rtc->ops_lock);
events |= RTC_AF | RTC_IRQF;
@@ -139,9 +141,7 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
u32 ctime0, ctime1, ctime2;
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
ctime0 = ioread32(priv->iobase + HW_CTIME0);
ctime1 = ioread32(priv->iobase + HW_CTIME1);
ctime2 = ioread32(priv->iobase + HW_CTIME2);
@@ -155,7 +155,6 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm)
ctime1 = ioread32(priv->iobase + HW_CTIME1);
ctime2 = ioread32(priv->iobase + HW_CTIME2);
}
- spin_unlock_irqrestore(&priv->lock, irq_flags);
tm->tm_sec = (ctime0 >> BM_CTIME0_SEC_S) & BM_CTIME0_SEC_M;
tm->tm_min = (ctime0 >> BM_CTIME0_MIN_S) & BM_CTIME0_MIN_M;
@@ -174,9 +173,7 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
/*
* make sure SEC counter will not flip other counter on write time,
* real value will be written at the enf of sequence.
@@ -191,7 +188,6 @@ static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
iowrite32(tm->tm_hour, priv->iobase + HW_HOUR);
iowrite32(tm->tm_min, priv->iobase + HW_MIN);
iowrite32(tm->tm_sec, priv->iobase + HW_SEC);
- spin_unlock_irqrestore(&priv->lock, irq_flags);
return 0;
}
@@ -199,9 +195,7 @@ static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
alrm->time.tm_year = ioread32(priv->iobase + HW_ALYEAR);
alrm->time.tm_mon = ioread32(priv->iobase + HW_ALMON);
alrm->time.tm_mday = ioread32(priv->iobase + HW_ALDOM);
@@ -213,7 +207,6 @@ static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = ioread32(priv->iobase + HW_AMR) ? 1 : 0;
alrm->pending = ioread32(priv->iobase + HW_CIIR) ? 1 : 0;
- spin_unlock_irqrestore(&priv->lock, irq_flags);
return rtc_valid_tm(&alrm->time);
}
@@ -221,9 +214,7 @@ static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
iowrite32(alrm->time.tm_year, priv->iobase + HW_ALYEAR);
iowrite32(alrm->time.tm_mon, priv->iobase + HW_ALMON);
iowrite32(alrm->time.tm_mday, priv->iobase + HW_ALDOM);
@@ -234,7 +225,6 @@ static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
iowrite32(alrm->time.tm_sec, priv->iobase + HW_ALSEC);
iowrite32(alrm->enabled ? 0 : BM_AMR_OFF, priv->iobase + HW_AMR);
- spin_unlock_irqrestore(&priv->lock, irq_flags);
return 0;
}
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index 83ac2337c0f7..de8bf56a41e7 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -187,7 +187,7 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
return ret;
}
-static struct rtc_class_ops at32_rtc_ops = {
+static const struct rtc_class_ops at32_rtc_ops = {
.read_time = at32_rtc_readtime,
.set_time = at32_rtc_settime,
.read_alarm = at32_rtc_readalarm,
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 0299988b4f13..397742446007 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -93,8 +93,15 @@ static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (error)
return error;
+ /*
+ * In case of oscillator failure, the register contents should be
+ * considered invalid. The flag is cleared the next time the RTC is set.
+ */
+ if (regs.minutes & BQ32K_OF)
+ return -EINVAL;
+
tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK);
- tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK);
+ tm->tm_min = bcd2bin(regs.minutes & BQ32K_MINUTES_MASK);
tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK);
tm->tm_mday = bcd2bin(regs.date);
tm->tm_wday = bcd2bin(regs.day) - 1;
@@ -204,13 +211,10 @@ static int bq32k_probe(struct i2c_client *client,
/* Check Oscillator Failure flag */
error = bq32k_read(dev, &reg, BQ32K_MINUTES, 1);
- if (!error && (reg & BQ32K_OF)) {
- dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
- reg &= ~BQ32K_OF;
- error = bq32k_write(dev, &reg, BQ32K_MINUTES, 1);
- }
if (error)
return error;
+ if (reg & BQ32K_OF)
+ dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
if (client->dev.of_node)
trickle_charger_of_init(dev, client->dev.of_node);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 43745cac0141..dd3d59806ffa 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -62,6 +62,8 @@ struct cmos_rtc {
u8 day_alrm;
u8 mon_alrm;
u8 century;
+
+ struct rtc_wkalrm saved_wkalrm;
};
/* both platform and pnp busses use negative numbers for invalid irqs */
@@ -707,6 +709,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup1;
}
+ hpet_rtc_timer_init();
+
if (is_valid_irq(rtc_irq)) {
irq_handler_t rtc_cmos_int_handler;
@@ -714,6 +718,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
rtc_cmos_int_handler = hpet_rtc_interrupt;
retval = hpet_register_irq_handler(cmos_interrupt);
if (retval) {
+ hpet_mask_rtc_irq_bit(RTC_IRQMASK);
dev_warn(dev, "hpet_register_irq_handler "
" failed in rtc_init().");
goto cleanup1;
@@ -729,7 +734,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup1;
}
}
- hpet_rtc_timer_init();
/* export at least the first block of NVRAM */
nvram.size = address_space - NVRAM_OFFSET;
@@ -844,8 +848,6 @@ static int cmos_aie_poweroff(struct device *dev)
return retval;
}
-#ifdef CONFIG_PM
-
static int cmos_suspend(struct device *dev)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -877,6 +879,8 @@ static int cmos_suspend(struct device *dev)
enable_irq_wake(cmos->irq);
}
+ cmos_read_alarm(dev, &cmos->saved_wkalrm);
+
dev_dbg(dev, "suspend%s, ctrl %02x\n",
(tmp & RTC_AIE) ? ", alarm may wake" : "",
tmp);
@@ -892,12 +896,32 @@ static int cmos_suspend(struct device *dev)
*/
static inline int cmos_poweroff(struct device *dev)
{
+ if (!IS_ENABLED(CONFIG_PM))
+ return -ENOSYS;
+
return cmos_suspend(dev);
}
-#ifdef CONFIG_PM_SLEEP
+static void cmos_check_wkalrm(struct device *dev)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ struct rtc_wkalrm current_alarm;
+ time64_t t_current_expires;
+ time64_t t_saved_expires;
+
+ cmos_read_alarm(dev, &current_alarm);
+ t_current_expires = rtc_tm_to_time64(&current_alarm.time);
+ t_saved_expires = rtc_tm_to_time64(&cmos->saved_wkalrm.time);
+ if (t_current_expires != t_saved_expires ||
+ cmos->saved_wkalrm.enabled != current_alarm.enabled) {
+ cmos_set_alarm(dev, &cmos->saved_wkalrm);
+ }
+}
+
+static void cmos_check_acpi_rtc_status(struct device *dev,
+ unsigned char *rtc_control);
-static int cmos_resume(struct device *dev)
+static int __maybe_unused cmos_resume(struct device *dev)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char tmp;
@@ -910,6 +934,9 @@ static int cmos_resume(struct device *dev)
cmos->enabled_wake = 0;
}
+ /* The BIOS might have changed the alarm, restore it */
+ cmos_check_wkalrm(dev);
+
spin_lock_irq(&rtc_lock);
tmp = cmos->suspend_ctrl;
cmos->suspend_ctrl = 0;
@@ -936,6 +963,9 @@ static int cmos_resume(struct device *dev)
tmp &= ~RTC_AIE;
hpet_mask_rtc_irq_bit(RTC_AIE);
} while (mask & RTC_AIE);
+
+ if (tmp & RTC_AIE)
+ cmos_check_acpi_rtc_status(dev, &tmp);
}
spin_unlock_irq(&rtc_lock);
@@ -944,16 +974,6 @@ static int cmos_resume(struct device *dev)
return 0;
}
-#endif
-#else
-
-static inline int cmos_poweroff(struct device *dev)
-{
- return -ENOSYS;
-}
-
-#endif
-
static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
/*----------------------------------------------------------------*/
@@ -973,6 +993,20 @@ static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
static u32 rtc_handler(void *context)
{
struct device *dev = context;
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ unsigned char rtc_control = 0;
+ unsigned char rtc_intr;
+
+ spin_lock_irq(&rtc_lock);
+ if (cmos_rtc.suspend_ctrl)
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ if (rtc_control & RTC_AIE) {
+ cmos_rtc.suspend_ctrl &= ~RTC_AIE;
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+ rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
+ rtc_update_irq(cmos->rtc, 1, rtc_intr);
+ }
+ spin_unlock_irq(&rtc_lock);
pm_wakeup_event(dev, 0);
acpi_clear_event(ACPI_EVENT_RTC);
@@ -1039,12 +1073,39 @@ static void cmos_wake_setup(struct device *dev)
device_init_wakeup(dev, 1);
}
+static void cmos_check_acpi_rtc_status(struct device *dev,
+ unsigned char *rtc_control)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ acpi_event_status rtc_status;
+ acpi_status status;
+
+ if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC)
+ return;
+
+ status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Could not get RTC status\n");
+ } else if (rtc_status & ACPI_EVENT_FLAG_SET) {
+ unsigned char mask;
+ *rtc_control &= ~RTC_AIE;
+ CMOS_WRITE(*rtc_control, RTC_CONTROL);
+ mask = CMOS_READ(RTC_INTR_FLAGS);
+ rtc_update_irq(cmos->rtc, 1, mask);
+ }
+}
+
#else
static void cmos_wake_setup(struct device *dev)
{
}
+static void cmos_check_acpi_rtc_status(struct device *dev,
+ unsigned char *rtc_control)
+{
+}
+
#endif
#ifdef CONFIG_PNP
@@ -1206,9 +1267,7 @@ static struct platform_driver cmos_platform_driver = {
.shutdown = cmos_platform_shutdown,
.driver = {
.name = driver_name,
-#ifdef CONFIG_PM
.pm = &cmos_pm_ops,
-#endif
.of_match_table = of_match_ptr(of_cmos_match),
}
};
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 101b7a240e0f..cfc4141d99cd 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -140,7 +140,7 @@ static int coh901331_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static struct rtc_class_ops coh901331_ops = {
+static const struct rtc_class_ops coh901331_ops = {
.read_time = coh901331_read_time,
.set_mmss = coh901331_set_mmss,
.read_alarm = coh901331_read_alarm,
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index dba60c1dfce2..caf35567e14c 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -469,7 +469,7 @@ static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
return 0;
}
-static struct rtc_class_ops davinci_rtc_ops = {
+static const struct rtc_class_ops davinci_rtc_ops = {
.ioctl = davinci_rtc_ioctl,
.read_time = davinci_rtc_read_time,
.set_time = davinci_rtc_set_time,
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
index 8d05596a6765..b253bf1b3531 100644
--- a/drivers/rtc/rtc-digicolor.c
+++ b/drivers/rtc/rtc-digicolor.c
@@ -159,7 +159,7 @@ static int dc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static struct rtc_class_ops dc_rtc_ops = {
+static const struct rtc_class_ops dc_rtc_ops = {
.read_time = dc_rtc_read_time,
.set_mmss = dc_rtc_set_mmss,
.read_alarm = dc_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index f5dd09fe5add..0ec4be62322b 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -102,7 +102,7 @@ static int ds1302_rtc_get_time(struct device *dev, struct rtc_time *time)
return rtc_valid_tm(time);
}
-static struct rtc_class_ops ds1302_rtc_ops = {
+static const struct rtc_class_ops ds1302_rtc_ops = {
.read_time = ds1302_rtc_get_time,
.set_time = ds1302_rtc_set_time,
};
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 8e1c5cb6ece6..4e31036ee259 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -186,6 +186,7 @@ static const struct i2c_device_id ds1307_id[] = {
{ "mcp7941x", mcp794xx },
{ "pt7c4338", ds_1307 },
{ "rx8025", rx_8025 },
+ { "isl12057", ds_1337 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1307_id);
@@ -382,10 +383,25 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
t->tm_mday = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
tmp = ds1307->regs[DS1307_REG_MONTH] & 0x1f;
t->tm_mon = bcd2bin(tmp) - 1;
-
- /* assume 20YY not 19YY, and ignore DS1337_BIT_CENTURY */
t->tm_year = bcd2bin(ds1307->regs[DS1307_REG_YEAR]) + 100;
+#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
+ switch (ds1307->type) {
+ case ds_1337:
+ case ds_1339:
+ case ds_3231:
+ if (ds1307->regs[DS1307_REG_MONTH] & DS1337_BIT_CENTURY)
+ t->tm_year += 100;
+ break;
+ case ds_1340:
+ if (ds1307->regs[DS1307_REG_HOUR] & DS1340_BIT_CENTURY)
+ t->tm_year += 100;
+ break;
+ default:
+ break;
+ }
+#endif
+
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"read", t->tm_sec, t->tm_min,
@@ -409,6 +425,27 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
+#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
+ if (t->tm_year < 100)
+ return -EINVAL;
+
+ switch (ds1307->type) {
+ case ds_1337:
+ case ds_1339:
+ case ds_3231:
+ case ds_1340:
+ if (t->tm_year > 299)
+ return -EINVAL;
+ default:
+ if (t->tm_year > 199)
+ return -EINVAL;
+ break;
+ }
+#else
+ if (t->tm_year < 100 || t->tm_year > 199)
+ return -EINVAL;
+#endif
+
buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
buf[DS1307_REG_MIN] = bin2bcd(t->tm_min);
buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
@@ -424,11 +461,13 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
case ds_1337:
case ds_1339:
case ds_3231:
- buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
+ if (t->tm_year > 199)
+ buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
break;
case ds_1340:
- buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
- | DS1340_BIT_CENTURY;
+ buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN;
+ if (t->tm_year > 199)
+ buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY;
break;
case mcp794xx:
/*
@@ -1295,6 +1334,11 @@ static int ds1307_probe(struct i2c_client *client,
if (of_property_read_bool(client->dev.of_node, "wakeup-source")) {
ds1307_can_wakeup_device = true;
}
+ /* Intersil ISL12057 DT backward compatibility */
+ if (of_property_read_bool(client->dev.of_node,
+ "isil,irq2-can-wakeup-machine")) {
+ ds1307_can_wakeup_device = true;
+ }
#endif
switch (ds1307->type) {
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
index 641e8e8a0dd7..ccfc9d43eb1e 100644
--- a/drivers/rtc/rtc-ds1347.c
+++ b/drivers/rtc/rtc-ds1347.c
@@ -18,6 +18,7 @@
#include <linux/rtc.h>
#include <linux/spi/spi.h>
#include <linux/bcd.h>
+#include <linux/regmap.h>
/* Registers in ds1347 rtc */
@@ -32,37 +33,28 @@
#define DS1347_STATUS_REG 0x17
#define DS1347_CLOCK_BURST 0x3F
-static int ds1347_read_reg(struct device *dev, unsigned char address,
- unsigned char *data)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- *data = address | 0x80;
-
- return spi_write_then_read(spi, data, 1, data, 1);
-}
-
-static int ds1347_write_reg(struct device *dev, unsigned char address,
- unsigned char data)
-{
- struct spi_device *spi = to_spi_device(dev);
- unsigned char buf[2];
-
- buf[0] = address & 0x7F;
- buf[1] = data;
+static const struct regmap_range ds1347_ranges[] = {
+ {
+ .range_min = DS1347_SECONDS_REG,
+ .range_max = DS1347_STATUS_REG,
+ },
+};
- return spi_write_then_read(spi, buf, 2, NULL, 0);
-}
+static const struct regmap_access_table ds1347_access_table = {
+ .yes_ranges = ds1347_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ds1347_ranges),
+};
static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
{
struct spi_device *spi = to_spi_device(dev);
+ struct regmap *map;
int err;
unsigned char buf[8];
- buf[0] = DS1347_CLOCK_BURST | 0x80;
+ map = spi_get_drvdata(spi);
- err = spi_write_then_read(spi, buf, 1, buf, 8);
+ err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8);
if (err)
return err;
@@ -80,25 +72,27 @@ static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
{
struct spi_device *spi = to_spi_device(dev);
- unsigned char buf[9];
+ struct regmap *map;
+ unsigned char buf[8];
+
+ map = spi_get_drvdata(spi);
- buf[0] = DS1347_CLOCK_BURST & 0x7F;
- buf[1] = bin2bcd(dt->tm_sec);
- buf[2] = bin2bcd(dt->tm_min);
- buf[3] = (bin2bcd(dt->tm_hour) & 0x3F);
- buf[4] = bin2bcd(dt->tm_mday);
- buf[5] = bin2bcd(dt->tm_mon + 1);
- buf[6] = bin2bcd(dt->tm_wday + 1);
+ buf[0] = bin2bcd(dt->tm_sec);
+ buf[1] = bin2bcd(dt->tm_min);
+ buf[2] = (bin2bcd(dt->tm_hour) & 0x3F);
+ buf[3] = bin2bcd(dt->tm_mday);
+ buf[4] = bin2bcd(dt->tm_mon + 1);
+ buf[5] = bin2bcd(dt->tm_wday + 1);
/* year in linux is from 1900 i.e in range of 100
in rtc it is from 00 to 99 */
dt->tm_year = dt->tm_year % 100;
- buf[7] = bin2bcd(dt->tm_year);
- buf[8] = bin2bcd(0x00);
+ buf[6] = bin2bcd(dt->tm_year);
+ buf[7] = bin2bcd(0x00);
/* write the rtc settings */
- return spi_write_then_read(spi, buf, 9, NULL, 0);
+ return regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8);
}
static const struct rtc_class_ops ds1347_rtc_ops = {
@@ -109,35 +103,53 @@ static const struct rtc_class_ops ds1347_rtc_ops = {
static int ds1347_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
- unsigned char data;
+ struct regmap_config config;
+ struct regmap *map;
+ unsigned int data;
int res;
+ memset(&config, 0, sizeof(config));
+ config.reg_bits = 8;
+ config.val_bits = 8;
+ config.read_flag_mask = 0x80;
+ config.max_register = 0x3F;
+ config.wr_table = &ds1347_access_table;
+
/* spi setup with ds1347 in mode 3 and bits per word as 8 */
spi->mode = SPI_MODE_3;
spi->bits_per_word = 8;
spi_setup(spi);
+ map = devm_regmap_init_spi(spi, &config);
+
+ if (IS_ERR(map)) {
+ dev_err(&spi->dev, "ds1347 regmap init spi failed\n");
+ return PTR_ERR(map);
+ }
+
+ spi_set_drvdata(spi, map);
+
/* RTC Settings */
- res = ds1347_read_reg(&spi->dev, DS1347_SECONDS_REG, &data);
+ res = regmap_read(map, DS1347_SECONDS_REG, &data);
if (res)
return res;
/* Disable the write protect of rtc */
- ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
+ regmap_read(map, DS1347_CONTROL_REG, &data);
data = data & ~(1<<7);
- ds1347_write_reg(&spi->dev, DS1347_CONTROL_REG, data);
+ regmap_write(map, DS1347_CONTROL_REG, data);
/* Enable the oscillator , disable the oscillator stop flag,
and glitch filter to reduce current consumption */
- ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
+ regmap_read(map, DS1347_STATUS_REG, &data);
data = data & 0x1B;
- ds1347_write_reg(&spi->dev, DS1347_STATUS_REG, data);
+ regmap_write(map, DS1347_STATUS_REG, data);
/* display the settings */
- ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
+ regmap_read(map, DS1347_CONTROL_REG, &data);
dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data);
- ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
+ regmap_read(map, DS1347_STATUS_REG, &data);
dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data);
rtc = devm_rtc_device_register(&spi->dev, "ds1347",
@@ -146,8 +158,6 @@ static int ds1347_probe(struct spi_device *spi)
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- spi_set_drvdata(spi, rtc);
-
return 0;
}
diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-gemini.c
index b57505efadbc..688debc14348 100644
--- a/drivers/rtc/rtc-gemini.c
+++ b/drivers/rtc/rtc-gemini.c
@@ -110,7 +110,7 @@ static int gemini_rtc_set_time(struct device *dev, struct rtc_time *tm)
return 0;
}
-static struct rtc_class_ops gemini_rtc_ops = {
+static const struct rtc_class_ops gemini_rtc_ops = {
.read_time = gemini_rtc_read_time,
.set_time = gemini_rtc_set_time,
};
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
deleted file mode 100644
index 0e7f0f52bfe4..000000000000
--- a/drivers/rtc/rtc-isl12057.c
+++ /dev/null
@@ -1,643 +0,0 @@
-/*
- * rtc-isl12057 - Driver for Intersil ISL12057 I2C Real Time Clock
- *
- * Copyright (C) 2013, Arnaud EBALARD <arno@natisbad.org>
- *
- * This work is largely based on Intersil ISL1208 driver developed by
- * Hebert Valerio Riedel <hvr@gnu.org>.
- *
- * Detailed datasheet on which this development is based is available here:
- *
- * http://natisbad.org/NAS2/refs/ISL12057.pdf
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/rtc.h>
-#include <linux/i2c.h>
-#include <linux/bcd.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
-
-#define DRV_NAME "rtc-isl12057"
-
-/* RTC section */
-#define ISL12057_REG_RTC_SC 0x00 /* Seconds */
-#define ISL12057_REG_RTC_MN 0x01 /* Minutes */
-#define ISL12057_REG_RTC_HR 0x02 /* Hours */
-#define ISL12057_REG_RTC_HR_PM BIT(5) /* AM/PM bit in 12h format */
-#define ISL12057_REG_RTC_HR_MIL BIT(6) /* 24h/12h format */
-#define ISL12057_REG_RTC_DW 0x03 /* Day of the Week */
-#define ISL12057_REG_RTC_DT 0x04 /* Date */
-#define ISL12057_REG_RTC_MO 0x05 /* Month */
-#define ISL12057_REG_RTC_MO_CEN BIT(7) /* Century bit */
-#define ISL12057_REG_RTC_YR 0x06 /* Year */
-#define ISL12057_RTC_SEC_LEN 7
-
-/* Alarm 1 section */
-#define ISL12057_REG_A1_SC 0x07 /* Alarm 1 Seconds */
-#define ISL12057_REG_A1_MN 0x08 /* Alarm 1 Minutes */
-#define ISL12057_REG_A1_HR 0x09 /* Alarm 1 Hours */
-#define ISL12057_REG_A1_HR_PM BIT(5) /* AM/PM bit in 12h format */
-#define ISL12057_REG_A1_HR_MIL BIT(6) /* 24h/12h format */
-#define ISL12057_REG_A1_DWDT 0x0A /* Alarm 1 Date / Day of the week */
-#define ISL12057_REG_A1_DWDT_B BIT(6) /* DW / DT selection bit */
-#define ISL12057_A1_SEC_LEN 4
-
-/* Alarm 2 section */
-#define ISL12057_REG_A2_MN 0x0B /* Alarm 2 Minutes */
-#define ISL12057_REG_A2_HR 0x0C /* Alarm 2 Hours */
-#define ISL12057_REG_A2_DWDT 0x0D /* Alarm 2 Date / Day of the week */
-#define ISL12057_A2_SEC_LEN 3
-
-/* Control/Status registers */
-#define ISL12057_REG_INT 0x0E
-#define ISL12057_REG_INT_A1IE BIT(0) /* Alarm 1 interrupt enable bit */
-#define ISL12057_REG_INT_A2IE BIT(1) /* Alarm 2 interrupt enable bit */
-#define ISL12057_REG_INT_INTCN BIT(2) /* Interrupt control enable bit */
-#define ISL12057_REG_INT_RS1 BIT(3) /* Freq out control bit 1 */
-#define ISL12057_REG_INT_RS2 BIT(4) /* Freq out control bit 2 */
-#define ISL12057_REG_INT_EOSC BIT(7) /* Oscillator enable bit */
-
-#define ISL12057_REG_SR 0x0F
-#define ISL12057_REG_SR_A1F BIT(0) /* Alarm 1 interrupt bit */
-#define ISL12057_REG_SR_A2F BIT(1) /* Alarm 2 interrupt bit */
-#define ISL12057_REG_SR_OSF BIT(7) /* Oscillator failure bit */
-
-/* Register memory map length */
-#define ISL12057_MEM_MAP_LEN 0x10
-
-struct isl12057_rtc_data {
- struct rtc_device *rtc;
- struct regmap *regmap;
- struct mutex lock;
- int irq;
-};
-
-static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs)
-{
- tm->tm_sec = bcd2bin(regs[ISL12057_REG_RTC_SC]);
- tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]);
-
- if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */
- tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x1f);
- if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM)
- tm->tm_hour += 12;
- } else { /* 24 hour mode */
- tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x3f);
- }
-
- tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]);
- tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */
- tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO] & 0x1f) - 1; /* ditto */
- tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100;
-
- /* Check if years register has overflown from 99 to 00 */
- if (regs[ISL12057_REG_RTC_MO] & ISL12057_REG_RTC_MO_CEN)
- tm->tm_year += 100;
-}
-
-static int isl12057_rtc_tm_to_regs(u8 *regs, struct rtc_time *tm)
-{
- u8 century_bit;
-
- /*
- * The clock has an 8 bit wide bcd-coded register for the year.
- * It also has a century bit encoded in MO flag which provides
- * information about overflow of year register from 99 to 00.
- * tm_year is an offset from 1900 and we are interested in the
- * 2000-2199 range, so any value less than 100 or larger than
- * 299 is invalid.
- */
- if (tm->tm_year < 100 || tm->tm_year > 299)
- return -EINVAL;
-
- century_bit = (tm->tm_year > 199) ? ISL12057_REG_RTC_MO_CEN : 0;
-
- regs[ISL12057_REG_RTC_SC] = bin2bcd(tm->tm_sec);
- regs[ISL12057_REG_RTC_MN] = bin2bcd(tm->tm_min);
- regs[ISL12057_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */
- regs[ISL12057_REG_RTC_DT] = bin2bcd(tm->tm_mday);
- regs[ISL12057_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1) | century_bit;
- regs[ISL12057_REG_RTC_YR] = bin2bcd(tm->tm_year % 100);
- regs[ISL12057_REG_RTC_DW] = bin2bcd(tm->tm_wday + 1);
-
- return 0;
-}
-
-/*
- * Try and match register bits w/ fixed null values to see whether we
- * are dealing with an ISL12057. Note: this function is called early
- * during init and hence does need mutex protection.
- */
-static int isl12057_i2c_validate_chip(struct regmap *regmap)
-{
- u8 regs[ISL12057_MEM_MAP_LEN];
- static const u8 mask[ISL12057_MEM_MAP_LEN] = { 0x80, 0x80, 0x80, 0xf8,
- 0xc0, 0x60, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x60, 0x7c };
- int ret, i;
-
- ret = regmap_bulk_read(regmap, 0, regs, ISL12057_MEM_MAP_LEN);
- if (ret)
- return ret;
-
- for (i = 0; i < ISL12057_MEM_MAP_LEN; ++i) {
- if (regs[i] & mask[i]) /* check if bits are cleared */
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int _isl12057_rtc_clear_alarm(struct device *dev)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- int ret;
-
- ret = regmap_update_bits(data->regmap, ISL12057_REG_SR,
- ISL12057_REG_SR_A1F, 0);
- if (ret)
- dev_err(dev, "%s: clearing alarm failed (%d)\n", __func__, ret);
-
- return ret;
-}
-
-static int _isl12057_rtc_update_alarm(struct device *dev, int enable)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- int ret;
-
- ret = regmap_update_bits(data->regmap, ISL12057_REG_INT,
- ISL12057_REG_INT_A1IE,
- enable ? ISL12057_REG_INT_A1IE : 0);
- if (ret)
- dev_err(dev, "%s: changing alarm interrupt flag failed (%d)\n",
- __func__, ret);
-
- return ret;
-}
-
-/*
- * Note: as we only read from device and do not perform any update, there is
- * no need for an equivalent function which would try and get driver's main
- * lock. Here, it is safe for everyone if we just use regmap internal lock
- * on the device when reading.
- */
-static int _isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- u8 regs[ISL12057_RTC_SEC_LEN];
- unsigned int sr;
- int ret;
-
- ret = regmap_read(data->regmap, ISL12057_REG_SR, &sr);
- if (ret) {
- dev_err(dev, "%s: unable to read oscillator status flag (%d)\n",
- __func__, ret);
- goto out;
- } else {
- if (sr & ISL12057_REG_SR_OSF) {
- ret = -ENODATA;
- goto out;
- }
- }
-
- ret = regmap_bulk_read(data->regmap, ISL12057_REG_RTC_SC, regs,
- ISL12057_RTC_SEC_LEN);
- if (ret)
- dev_err(dev, "%s: unable to read RTC time section (%d)\n",
- __func__, ret);
-
-out:
- if (ret)
- return ret;
-
- isl12057_rtc_regs_to_tm(tm, regs);
-
- return rtc_valid_tm(tm);
-}
-
-static int isl12057_rtc_update_alarm(struct device *dev, int enable)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- int ret;
-
- mutex_lock(&data->lock);
- ret = _isl12057_rtc_update_alarm(dev, enable);
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int isl12057_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- struct rtc_time *alarm_tm = &alarm->time;
- u8 regs[ISL12057_A1_SEC_LEN];
- unsigned int ir;
- int ret;
-
- mutex_lock(&data->lock);
- ret = regmap_bulk_read(data->regmap, ISL12057_REG_A1_SC, regs,
- ISL12057_A1_SEC_LEN);
- if (ret) {
- dev_err(dev, "%s: reading alarm section failed (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- alarm_tm->tm_sec = bcd2bin(regs[0] & 0x7f);
- alarm_tm->tm_min = bcd2bin(regs[1] & 0x7f);
- alarm_tm->tm_hour = bcd2bin(regs[2] & 0x3f);
- alarm_tm->tm_mday = bcd2bin(regs[3] & 0x3f);
-
- ret = regmap_read(data->regmap, ISL12057_REG_INT, &ir);
- if (ret) {
- dev_err(dev, "%s: reading alarm interrupt flag failed (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- alarm->enabled = !!(ir & ISL12057_REG_INT_A1IE);
-
-err_unlock:
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int isl12057_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- struct rtc_time *alarm_tm = &alarm->time;
- unsigned long rtc_secs, alarm_secs;
- u8 regs[ISL12057_A1_SEC_LEN];
- struct rtc_time rtc_tm;
- int ret, enable = 1;
-
- mutex_lock(&data->lock);
- ret = _isl12057_rtc_read_time(dev, &rtc_tm);
- if (ret)
- goto err_unlock;
-
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err_unlock;
-
- ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
- if (ret)
- goto err_unlock;
-
- /* If alarm time is before current time, disable the alarm */
- if (!alarm->enabled || alarm_secs <= rtc_secs) {
- enable = 0;
- } else {
- /*
- * Chip only support alarms up to one month in the future. Let's
- * return an error if we get something after that limit.
- * Comparison is done by incrementing rtc_tm month field by one
- * and checking alarm value is still below.
- */
- if (rtc_tm.tm_mon == 11) { /* handle year wrapping */
- rtc_tm.tm_mon = 0;
- rtc_tm.tm_year += 1;
- } else {
- rtc_tm.tm_mon += 1;
- }
-
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err_unlock;
-
- if (alarm_secs > rtc_secs) {
- dev_err(dev, "%s: max for alarm is one month (%d)\n",
- __func__, ret);
- ret = -EINVAL;
- goto err_unlock;
- }
- }
-
- /* Disable the alarm before modifying it */
- ret = _isl12057_rtc_update_alarm(dev, 0);
- if (ret < 0) {
- dev_err(dev, "%s: unable to disable the alarm (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- /* Program alarm registers */
- regs[0] = bin2bcd(alarm_tm->tm_sec) & 0x7f;
- regs[1] = bin2bcd(alarm_tm->tm_min) & 0x7f;
- regs[2] = bin2bcd(alarm_tm->tm_hour) & 0x3f;
- regs[3] = bin2bcd(alarm_tm->tm_mday) & 0x3f;
-
- ret = regmap_bulk_write(data->regmap, ISL12057_REG_A1_SC, regs,
- ISL12057_A1_SEC_LEN);
- if (ret < 0) {
- dev_err(dev, "%s: writing alarm section failed (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- /* Enable or disable alarm */
- ret = _isl12057_rtc_update_alarm(dev, enable);
-
-err_unlock:
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int isl12057_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- u8 regs[ISL12057_RTC_SEC_LEN];
- int ret;
-
- ret = isl12057_rtc_tm_to_regs(regs, tm);
- if (ret)
- return ret;
-
- mutex_lock(&data->lock);
- ret = regmap_bulk_write(data->regmap, ISL12057_REG_RTC_SC, regs,
- ISL12057_RTC_SEC_LEN);
- if (ret) {
- dev_err(dev, "%s: unable to write RTC time section (%d)\n",
- __func__, ret);
- goto out;
- }
-
- /*
- * Now that RTC time has been updated, let's clear oscillator
- * failure flag, if needed.
- */
- ret = regmap_update_bits(data->regmap, ISL12057_REG_SR,
- ISL12057_REG_SR_OSF, 0);
- if (ret < 0)
- dev_err(dev, "%s: unable to clear osc. failure bit (%d)\n",
- __func__, ret);
-
-out:
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-/*
- * Check current RTC status and enable/disable what needs to be. Return 0 if
- * everything went ok and a negative value upon error. Note: this function
- * is called early during init and hence does need mutex protection.
- */
-static int isl12057_check_rtc_status(struct device *dev, struct regmap *regmap)
-{
- int ret;
-
- /* Enable oscillator if not already running */
- ret = regmap_update_bits(regmap, ISL12057_REG_INT,
- ISL12057_REG_INT_EOSC, 0);
- if (ret < 0) {
- dev_err(dev, "%s: unable to enable oscillator (%d)\n",
- __func__, ret);
- return ret;
- }
-
- /* Clear alarm bit if needed */
- ret = regmap_update_bits(regmap, ISL12057_REG_SR,
- ISL12057_REG_SR_A1F, 0);
- if (ret < 0) {
- dev_err(dev, "%s: unable to clear alarm bit (%d)\n",
- __func__, ret);
- return ret;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-/*
- * One would expect the device to be marked as a wakeup source only
- * when an IRQ pin of the RTC is routed to an interrupt line of the
- * CPU. In practice, such an IRQ pin can be connected to a PMIC and
- * this allows the device to be powered up when RTC alarm rings. This
- * is for instance the case on ReadyNAS 102, 104 and 2120. On those
- * devices with no IRQ driectly connected to the SoC, the RTC chip
- * can be forced as a wakeup source by stating that explicitly in
- * the device's .dts file using the "wakeup-source" boolean property.
- * This will guarantee 'wakealarm' sysfs entry is available on the device.
- *
- * The function below returns 1, i.e. the capability of the chip to
- * wakeup the device, based on IRQ availability or if the boolean
- * property has been set in the .dts file. Otherwise, it returns 0.
- */
-
-static bool isl12057_can_wakeup_machine(struct device *dev)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-
- return data->irq || of_property_read_bool(dev->of_node, "wakeup-source")
- || of_property_read_bool(dev->of_node, /* legacy */
- "isil,irq2-can-wakeup-machine");
-}
-#else
-static bool isl12057_can_wakeup_machine(struct device *dev)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-
- return !!data->irq;
-}
-#endif
-
-static int isl12057_rtc_alarm_irq_enable(struct device *dev,
- unsigned int enable)
-{
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
- int ret = -ENOTTY;
-
- if (rtc_data->irq)
- ret = isl12057_rtc_update_alarm(dev, enable);
-
- return ret;
-}
-
-static irqreturn_t isl12057_rtc_interrupt(int irq, void *data)
-{
- struct i2c_client *client = data;
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(&client->dev);
- struct rtc_device *rtc = rtc_data->rtc;
- int ret, handled = IRQ_NONE;
- unsigned int sr;
-
- ret = regmap_read(rtc_data->regmap, ISL12057_REG_SR, &sr);
- if (!ret && (sr & ISL12057_REG_SR_A1F)) {
- dev_dbg(&client->dev, "RTC alarm!\n");
-
- rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
-
- /* Acknowledge and disable the alarm */
- _isl12057_rtc_clear_alarm(&client->dev);
- _isl12057_rtc_update_alarm(&client->dev, 0);
-
- handled = IRQ_HANDLED;
- }
-
- return handled;
-}
-
-static const struct rtc_class_ops rtc_ops = {
- .read_time = _isl12057_rtc_read_time,
- .set_time = isl12057_rtc_set_time,
- .read_alarm = isl12057_rtc_read_alarm,
- .set_alarm = isl12057_rtc_set_alarm,
- .alarm_irq_enable = isl12057_rtc_alarm_irq_enable,
-};
-
-static const struct regmap_config isl12057_rtc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
-static int isl12057_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct device *dev = &client->dev;
- struct isl12057_rtc_data *data;
- struct regmap *regmap;
- int ret;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK))
- return -ENODEV;
-
- regmap = devm_regmap_init_i2c(client, &isl12057_rtc_regmap_config);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- dev_err(dev, "%s: regmap allocation failed (%d)\n",
- __func__, ret);
- return ret;
- }
-
- ret = isl12057_i2c_validate_chip(regmap);
- if (ret)
- return ret;
-
- ret = isl12057_check_rtc_status(dev, regmap);
- if (ret)
- return ret;
-
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- mutex_init(&data->lock);
- data->regmap = regmap;
- dev_set_drvdata(dev, data);
-
- if (client->irq > 0) {
- ret = devm_request_threaded_irq(dev, client->irq, NULL,
- isl12057_rtc_interrupt,
- IRQF_SHARED|IRQF_ONESHOT,
- DRV_NAME, client);
- if (!ret)
- data->irq = client->irq;
- else
- dev_err(dev, "%s: irq %d unavailable (%d)\n", __func__,
- client->irq, ret);
- }
-
- if (isl12057_can_wakeup_machine(dev))
- device_init_wakeup(dev, true);
-
- data->rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops,
- THIS_MODULE);
- ret = PTR_ERR_OR_ZERO(data->rtc);
- if (ret) {
- dev_err(dev, "%s: unable to register RTC device (%d)\n",
- __func__, ret);
- goto err;
- }
-
- /* We cannot support UIE mode if we do not have an IRQ line */
- if (!data->irq)
- data->rtc->uie_unsupported = 1;
-
-err:
- return ret;
-}
-
-static int isl12057_remove(struct i2c_client *client)
-{
- if (isl12057_can_wakeup_machine(&client->dev))
- device_init_wakeup(&client->dev, false);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int isl12057_rtc_suspend(struct device *dev)
-{
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
-
- if (rtc_data->irq && device_may_wakeup(dev))
- return enable_irq_wake(rtc_data->irq);
-
- return 0;
-}
-
-static int isl12057_rtc_resume(struct device *dev)
-{
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
-
- if (rtc_data->irq && device_may_wakeup(dev))
- return disable_irq_wake(rtc_data->irq);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(isl12057_rtc_pm_ops, isl12057_rtc_suspend,
- isl12057_rtc_resume);
-
-#ifdef CONFIG_OF
-static const struct of_device_id isl12057_dt_match[] = {
- { .compatible = "isl,isl12057" }, /* for backward compat., don't use */
- { .compatible = "isil,isl12057" },
- { },
-};
-MODULE_DEVICE_TABLE(of, isl12057_dt_match);
-#endif
-
-static const struct i2c_device_id isl12057_id[] = {
- { "isl12057", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, isl12057_id);
-
-static struct i2c_driver isl12057_driver = {
- .driver = {
- .name = DRV_NAME,
- .pm = &isl12057_rtc_pm_ops,
- .of_match_table = of_match_ptr(isl12057_dt_match),
- },
- .probe = isl12057_probe,
- .remove = isl12057_remove,
- .id_table = isl12057_id,
-};
-module_i2c_driver(isl12057_driver);
-
-MODULE_AUTHOR("Arnaud EBALARD <arno@natisbad.org>");
-MODULE_DESCRIPTION("Intersil ISL12057 RTC driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index b2bcfc0bf2e5..5e14651b71a8 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -174,7 +174,7 @@ static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AF_IRQ, enable);
}
-static struct rtc_class_ops jz4740_rtc_ops = {
+static const struct rtc_class_ops jz4740_rtc_ops = {
.read_time = jz4740_rtc_read_time,
.set_mmss = jz4740_rtc_set_mmss,
.read_alarm = jz4740_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c
index 025bb33b9cd2..4021fd04cb0a 100644
--- a/drivers/rtc/rtc-mcp795.c
+++ b/drivers/rtc/rtc-mcp795.c
@@ -151,7 +151,7 @@ static int mcp795_read_time(struct device *dev, struct rtc_time *tim)
return rtc_valid_tm(tim);
}
-static struct rtc_class_ops mcp795_rtc_ops = {
+static const struct rtc_class_ops mcp795_rtc_ops = {
.read_time = mcp795_read_time,
.set_time = mcp795_set_time
};
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 44f622c3e048..1a61fa56f3ad 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -301,7 +301,7 @@ exit:
return ret;
}
-static struct rtc_class_ops mtk_rtc_ops = {
+static const struct rtc_class_ops mtk_rtc_ops = {
.read_time = mtk_rtc_read_time,
.set_time = mtk_rtc_set_time,
.read_alarm = mtk_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index 09fc1c19f0df..b1b6b3041bfb 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -214,7 +214,7 @@ static int nuc900_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return 0;
}
-static struct rtc_class_ops nuc900_rtc_ops = {
+static const struct rtc_class_ops nuc900_rtc_ops = {
.read_time = nuc900_rtc_read_time,
.set_time = nuc900_rtc_set_time,
.read_alarm = nuc900_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index ec2e9c5fb993..b04ea9b5ae67 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -13,19 +13,23 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/kernel.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/io.h>
#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/rtc.h>
-#include <linux/bcd.h>
-#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/io.h>
-#include <linux/clk.h>
+#include <linux/rtc.h>
/*
* The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
@@ -115,6 +119,8 @@
/* OMAP_RTC_PMIC bit fields: */
#define OMAP_RTC_PMIC_POWER_EN_EN BIT(16)
+#define OMAP_RTC_PMIC_EXT_WKUP_EN(x) BIT(x)
+#define OMAP_RTC_PMIC_EXT_WKUP_POL(x) BIT(4 + x)
/* OMAP_RTC_KICKER values */
#define KICK0_VALUE 0x83e70b13
@@ -141,6 +147,7 @@ struct omap_rtc {
bool is_pmic_controller;
bool has_ext_clk;
const struct omap_rtc_device_type *type;
+ struct pinctrl_dev *pctldev;
};
static inline u8 rtc_read(struct omap_rtc *rtc, unsigned int reg)
@@ -469,7 +476,7 @@ static void omap_rtc_power_off(void)
mdelay(2500);
}
-static struct rtc_class_ops omap_rtc_ops = {
+static const struct rtc_class_ops omap_rtc_ops = {
.read_time = omap_rtc_read_time,
.set_time = omap_rtc_set_time,
.read_alarm = omap_rtc_read_alarm,
@@ -525,6 +532,139 @@ static const struct of_device_id omap_rtc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
+static const struct pinctrl_pin_desc rtc_pins_desc[] = {
+ PINCTRL_PIN(0, "ext_wakeup0"),
+ PINCTRL_PIN(1, "ext_wakeup1"),
+ PINCTRL_PIN(2, "ext_wakeup2"),
+ PINCTRL_PIN(3, "ext_wakeup3"),
+};
+
+static int rtc_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return 0;
+}
+
+static const char *rtc_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ return NULL;
+}
+
+static const struct pinctrl_ops rtc_pinctrl_ops = {
+ .get_groups_count = rtc_pinctrl_get_groups_count,
+ .get_group_name = rtc_pinctrl_get_group_name,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+enum rtc_pin_config_param {
+ PIN_CONFIG_ACTIVE_HIGH = PIN_CONFIG_END + 1,
+};
+
+static const struct pinconf_generic_params rtc_params[] = {
+ {"ti,active-high", PIN_CONFIG_ACTIVE_HIGH, 0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item rtc_conf_items[ARRAY_SIZE(rtc_params)] = {
+ PCONFDUMP(PIN_CONFIG_ACTIVE_HIGH, "input active high", NULL, false),
+};
+#endif
+
+static int rtc_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int param = pinconf_to_config_param(*config);
+ u32 val;
+ u16 arg = 0;
+
+ rtc->type->unlock(rtc);
+ val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
+ rtc->type->lock(rtc);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (!(val & OMAP_RTC_PMIC_EXT_WKUP_EN(pin)))
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_ACTIVE_HIGH:
+ if (val & OMAP_RTC_PMIC_EXT_WKUP_POL(pin))
+ return -EINVAL;
+ break;
+ default:
+ return -ENOTSUPP;
+ };
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int rtc_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ u32 val;
+ unsigned int param;
+ u16 param_val;
+ int i;
+
+ rtc->type->unlock(rtc);
+ val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
+ rtc->type->lock(rtc);
+
+ /* active low by default */
+ val |= OMAP_RTC_PMIC_EXT_WKUP_POL(pin);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ param_val = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (param_val)
+ val |= OMAP_RTC_PMIC_EXT_WKUP_EN(pin);
+ else
+ val &= ~OMAP_RTC_PMIC_EXT_WKUP_EN(pin);
+ break;
+ case PIN_CONFIG_ACTIVE_HIGH:
+ val &= ~OMAP_RTC_PMIC_EXT_WKUP_POL(pin);
+ break;
+ default:
+ dev_err(&rtc->rtc->dev, "Property %u not supported\n",
+ param);
+ return -ENOTSUPP;
+ }
+ }
+
+ rtc->type->unlock(rtc);
+ rtc_writel(rtc, OMAP_RTC_PMIC_REG, val);
+ rtc->type->lock(rtc);
+
+ return 0;
+}
+
+static const struct pinconf_ops rtc_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = rtc_pinconf_get,
+ .pin_config_set = rtc_pinconf_set,
+};
+
+static struct pinctrl_desc rtc_pinctrl_desc = {
+ .pins = rtc_pins_desc,
+ .npins = ARRAY_SIZE(rtc_pins_desc),
+ .pctlops = &rtc_pinctrl_ops,
+ .confops = &rtc_pinconf_ops,
+ .custom_params = rtc_params,
+ .num_custom_params = ARRAY_SIZE(rtc_params),
+#ifdef CONFIG_DEBUG_FS
+ .custom_conf_items = rtc_conf_items,
+#endif
+ .owner = THIS_MODULE,
+};
+
static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
@@ -681,6 +821,15 @@ static int omap_rtc_probe(struct platform_device *pdev)
}
}
+ /* Support ext_wakeup pinconf */
+ rtc_pinctrl_desc.name = dev_name(&pdev->dev);
+
+ rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
+ if (IS_ERR(rtc->pctldev)) {
+ dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+ return PTR_ERR(rtc->pctldev);
+ }
+
return 0;
err:
@@ -724,6 +873,9 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ /* Remove ext_wakeup pinconf */
+ pinctrl_unregister(rtc->pctldev);
+
return 0;
}
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index 6080e0edef63..4bcfb88674d3 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -225,7 +225,7 @@ static irqreturn_t palmas_rtc_interrupt(int irq, void *context)
return IRQ_HANDLED;
}
-static struct rtc_class_ops palmas_rtc_ops = {
+static const struct rtc_class_ops palmas_rtc_ops = {
.read_time = palmas_rtc_read_time,
.set_time = palmas_rtc_set_time,
.read_alarm = palmas_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index b4478cc92b55..8895f77726e8 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -182,7 +182,8 @@ static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
- const char *buffer, size_t count) {
+ const char *buffer, size_t count)
+{
struct pcf2123_sysfs_reg *r;
unsigned long reg;
unsigned long val;
@@ -199,7 +200,7 @@ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- pcf2123_write_reg(dev, reg, val);
+ ret = pcf2123_write_reg(dev, reg, val);
if (ret < 0)
return -EIO;
return count;
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index e6b6911c8e05..00c31c91b245 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -232,7 +232,7 @@ static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
}
-static struct rtc_class_ops pcf50633_rtc_ops = {
+static const struct rtc_class_ops pcf50633_rtc_ops = {
.read_time = pcf50633_rtc_read_time,
.set_time = pcf50633_rtc_set_time,
.read_alarm = pcf50633_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index 64e1e4578492..5cfb6df5c430 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -400,7 +400,6 @@ static struct platform_driver pic32_rtc_driver = {
.remove = pic32_rtc_remove,
.driver = {
.name = "pic32-rtc",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_rtc_dt_ids),
},
};
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 9a2f6a95d5a7..f9277e536f7e 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -52,11 +52,21 @@
#define RV8803_CTRL_TIE BIT(4)
#define RV8803_CTRL_UIE BIT(5)
+#define RX8900_BACKUP_CTRL 0x18
+#define RX8900_FLAG_SWOFF BIT(2)
+#define RX8900_FLAG_VDETOFF BIT(3)
+
+enum rv8803_type {
+ rv_8803,
+ rx_8900
+};
+
struct rv8803_data {
struct i2c_client *client;
struct rtc_device *rtc;
struct mutex flags_lock;
u8 ctrl;
+ enum rv8803_type type;
};
static int rv8803_read_reg(const struct i2c_client *client, u8 reg)
@@ -497,6 +507,35 @@ static struct rtc_class_ops rv8803_rtc_ops = {
.ioctl = rv8803_ioctl,
};
+static int rx8900_trickle_charger_init(struct rv8803_data *rv8803)
+{
+ struct i2c_client *client = rv8803->client;
+ struct device_node *node = client->dev.of_node;
+ int err;
+ u8 flags;
+
+ if (!node)
+ return 0;
+
+ if (rv8803->type != rx_8900)
+ return 0;
+
+ err = i2c_smbus_read_byte_data(rv8803->client, RX8900_BACKUP_CTRL);
+ if (err < 0)
+ return err;
+
+ flags = ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF) & (u8)err;
+
+ if (of_property_read_bool(node, "epson,vdet-disable"))
+ flags |= RX8900_FLAG_VDETOFF;
+
+ if (of_property_read_bool(node, "trickle-diode-disable"))
+ flags |= RX8900_FLAG_SWOFF;
+
+ return i2c_smbus_write_byte_data(rv8803->client, RX8900_BACKUP_CTRL,
+ flags);
+}
+
static int rv8803_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -517,6 +556,7 @@ static int rv8803_probe(struct i2c_client *client,
mutex_init(&rv8803->flags_lock);
rv8803->client = client;
+ rv8803->type = id->driver_data;
i2c_set_clientdata(client, rv8803);
flags = rv8803_read_reg(client, RV8803_FLAG);
@@ -558,6 +598,12 @@ static int rv8803_probe(struct i2c_client *client,
if (err)
return err;
+ err = rx8900_trickle_charger_init(rv8803);
+ if (err) {
+ dev_err(&client->dev, "failed to init charger\n");
+ return err;
+ }
+
err = device_create_bin_file(&client->dev, &rv8803_nvram_attr);
if (err)
return err;
@@ -575,8 +621,8 @@ static int rv8803_remove(struct i2c_client *client)
}
static const struct i2c_device_id rv8803_id[] = {
- { "rv8803", 0 },
- { "rx8900", 0 },
+ { "rv8803", rv_8803 },
+ { "rx8900", rx_8900 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rv8803_id);
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index bbad00b233bc..7c9c08eab5e5 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -317,7 +317,7 @@ static int rx6110_init(struct rx6110_data *rx6110)
return ret;
}
-static struct rtc_class_ops rx6110_rtc_ops = {
+static const struct rtc_class_ops rx6110_rtc_ops = {
.read_time = rx6110_get_time,
.set_time = rx6110_set_time,
};
@@ -388,7 +388,6 @@ MODULE_DEVICE_TABLE(spi, rx6110_id);
static struct spi_driver rx6110_driver = {
.driver = {
.name = RX6110_DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = rx6110_probe,
.remove = rx6110_remove,
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 2b85cc7a24e7..91857d8d2df8 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -403,7 +403,7 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static struct rtc_class_ops rx8025_rtc_ops = {
+static const struct rtc_class_ops rx8025_rtc_ops = {
.read_time = rx8025_get_time,
.set_time = rx8025_set_time,
.read_alarm = rx8025_read_alarm,
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index f05ef8568480..e377f42abae7 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -343,7 +343,7 @@ static int spear_alarm_irq_enable(struct device *dev, unsigned int enabled)
return ret;
}
-static struct rtc_class_ops spear_rtc_ops = {
+static const struct rtc_class_ops spear_rtc_ops = {
.read_time = spear_rtc_read_time,
.set_time = spear_rtc_set_time,
.read_alarm = spear_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index e6aaaa52e7fe..d578e40d5a50 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -231,7 +231,7 @@ static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
return 0;
}
-static struct rtc_class_ops stmp3xxx_rtc_ops = {
+static const struct rtc_class_ops stmp3xxx_rtc_ops = {
.alarm_irq_enable =
stmp3xxx_alarm_irq_enable,
.read_time = stmp3xxx_rtc_gettime,
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 63b9fb1318c2..1218d5d4224d 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -160,7 +160,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
unsigned long push = 0;
struct rtc_wkalrm alm;
struct rtc_device *rtc = to_rtc_device(dev);
- char *buf_ptr;
+ const char *buf_ptr;
int adjust = 0;
/* Only request alarms that trigger in the future. Disable them
@@ -171,7 +171,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
return retval;
rtc_tm_to_time(&alm.time, &now);
- buf_ptr = (char *)buf;
+ buf_ptr = buf;
if (*buf_ptr == '+') {
buf_ptr++;
if (*buf_ptr == '=') {
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 15ac597d54da..3853ba963bb5 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -291,7 +291,7 @@ static irqreturn_t tegra_rtc_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static struct rtc_class_ops tegra_rtc_ops = {
+static const struct rtc_class_ops tegra_rtc_ops = {
.read_time = tegra_rtc_read_time,
.set_time = tegra_rtc_set_time,
.read_alarm = tegra_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 2dc787dc06c1..176720b7b9e5 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -462,7 +462,7 @@ out:
return ret;
}
-static struct rtc_class_ops twl_rtc_ops = {
+static const struct rtc_class_ops twl_rtc_ops = {
.read_time = twl_rtc_read_time,
.set_time = twl_rtc_set_time,
.read_alarm = twl_rtc_read_alarm,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 831935af7389..a7a88476e215 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1205,7 +1205,7 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
mdc, lpm);
return mdc;
}
- fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
+ fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
if (fcx_max_data < private->fcx_max_data) {
dev_warn(&device->cdev->dev,
"The maximum data size for zHPF requests %u "
@@ -1675,7 +1675,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
" data size for zHPF requests failed\n");
return 0;
} else
- return mdc * FCX_MAX_DATA_FACTOR;
+ return (u32)mdc * FCX_MAX_DATA_FACTOR;
}
/*
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 46be25c7461e..876c7e6e3a99 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -780,7 +780,7 @@ static int cfg_wait_idle(void)
static int __init chp_init(void)
{
struct chp_id chpid;
- int ret;
+ int state, ret;
ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
if (ret)
@@ -791,7 +791,9 @@ static int __init chp_init(void)
return 0;
/* Register available channel-paths. */
chp_id_for_each(&chpid) {
- if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
+ state = chp_info_get_status(chpid);
+ if (state == CHP_STATUS_CONFIGURED ||
+ state == CHP_STATUS_STANDBY)
chp_new(chpid);
}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 637cf8973c9e..581001989937 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -384,7 +384,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
/* if (len > rec_len):
* dump data up to cap_len ignoring small duplicate in rec->payload
*/
- spin_lock_irqsave(&dbf->pay_lock, flags);
+ spin_lock(&dbf->pay_lock);
memset(payload, 0, sizeof(*payload));
memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
payload->fsf_req_id = req_id;
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index db2739079cbb..790babc5ef66 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -353,7 +353,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
#endif
-static int probe_irq __initdata;
+static int probe_irq;
/**
* probe_intr - helper for IRQ autoprobe
@@ -365,7 +365,7 @@ static int probe_irq __initdata;
* used by the IRQ probe code.
*/
-static irqreturn_t __init probe_intr(int irq, void *dev_id)
+static irqreturn_t probe_intr(int irq, void *dev_id)
{
probe_irq = irq;
return IRQ_HANDLED;
@@ -380,7 +380,7 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id)
* and then looking to see what interrupt actually turned up.
*/
-static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
+static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
int possible)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 6a6906f847db..d9239c2d49b1 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(be_max_phys_size,
"memory that can be allocated. Range is 16 - 128");
#define beiscsi_disp_param(_name)\
-ssize_t \
+static ssize_t \
beiscsi_##_name##_disp(struct device *dev,\
struct device_attribute *attrib, char *buf) \
{ \
@@ -74,7 +74,7 @@ beiscsi_##_name##_disp(struct device *dev,\
}
#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
-int \
+static int \
beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
{\
if (val >= _minval && val <= _maxval) {\
@@ -93,7 +93,7 @@ beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
}
#define beiscsi_store_param(_name) \
-ssize_t \
+static ssize_t \
beiscsi_##_name##_store(struct device *dev,\
struct device_attribute *attr, const char *buf,\
size_t count) \
@@ -112,7 +112,7 @@ beiscsi_##_name##_store(struct device *dev,\
}
#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
-int \
+static int \
beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
{ \
if (val >= _minval && val <= _maxval) {\
@@ -900,8 +900,9 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ unsigned long flags;
- spin_lock_bh(&phba->io_sgl_lock);
+ spin_lock_irqsave(&phba->io_sgl_lock, flags);
if (phba->io_sgl_hndl_avbl) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In alloc_io_sgl_handle,"
@@ -919,14 +920,16 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
phba->io_sgl_alloc_index++;
} else
psgl_handle = NULL;
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
return psgl_handle;
}
static void
free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
- spin_lock_bh(&phba->io_sgl_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->io_sgl_lock, flags);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In free_,io_sgl_free_index=%d\n",
phba->io_sgl_free_index);
@@ -941,7 +944,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"value there=%p\n", phba->io_sgl_free_index,
phba->io_sgl_hndl_base
[phba->io_sgl_free_index]);
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
return;
}
phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -950,7 +953,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->io_sgl_free_index = 0;
else
phba->io_sgl_free_index++;
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
}
static inline struct wrb_handle *
@@ -958,15 +961,16 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
unsigned int wrbs_per_cxn)
{
struct wrb_handle *pwrb_handle;
+ unsigned long flags;
- spin_lock_bh(&pwrb_context->wrb_lock);
+ spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
pwrb_context->wrb_handles_available--;
if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
pwrb_context->alloc_index = 0;
else
pwrb_context->alloc_index++;
- spin_unlock_bh(&pwrb_context->wrb_lock);
+ spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
if (pwrb_handle)
memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
@@ -1001,14 +1005,16 @@ beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
struct wrb_handle *pwrb_handle,
unsigned int wrbs_per_cxn)
{
- spin_lock_bh(&pwrb_context->wrb_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
pwrb_context->wrb_handles_available++;
if (pwrb_context->free_index == (wrbs_per_cxn - 1))
pwrb_context->free_index = 0;
else
pwrb_context->free_index++;
- spin_unlock_bh(&pwrb_context->wrb_lock);
+ spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
}
/**
@@ -1037,8 +1043,9 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ unsigned long flags;
- spin_lock_bh(&phba->mgmt_sgl_lock);
+ spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
if (phba->eh_sgl_hndl_avbl) {
psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
@@ -1056,14 +1063,16 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
phba->eh_sgl_alloc_index++;
} else
psgl_handle = NULL;
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
return psgl_handle;
}
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
- spin_lock_bh(&phba->mgmt_sgl_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BM_%d : In free_mgmt_sgl_handle,"
"eh_sgl_free_index=%d\n",
@@ -1078,7 +1087,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"BM_%d : Double Free in eh SGL ,"
"eh_sgl_free_index=%d\n",
phba->eh_sgl_free_index);
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
return;
}
phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1088,7 +1097,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->eh_sgl_free_index = 0;
else
phba->eh_sgl_free_index++;
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
}
static void
@@ -4584,7 +4593,7 @@ free_hndls:
io_task->cmd_bhs = NULL;
return -ENOMEM;
}
-int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
+static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
unsigned int num_sg, unsigned int xferlen,
unsigned int writedir)
{
@@ -4973,7 +4982,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
return rc;
}
-void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
+static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
{
/* Set the logging parameter */
beiscsi_log_enable_init(phba, beiscsi_log_enable);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 7c0d7af0d3b7..0039bebaa9e2 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -685,6 +685,11 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
req_completion);
csk->snd_nxt += len;
cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
+ } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
+ (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
+ struct cpl_close_con_req *req =
+ (struct cpl_close_con_req *)skb->data;
+ req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
}
total_size += skb->truesize;
t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 516bd6c4f442..cbf010324c18 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -30,161 +30,41 @@
#include "NCR5380.h"
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/isapnp.h>
+#include <linux/isa.h>
+#include <linux/pnp.h>
#include <linux/interrupt.h>
+#define MAX_CARDS 8
+
+/* old-style parameters for compatibility */
static int ncr_irq;
-static int ncr_dma;
static int ncr_addr;
static int ncr_5380;
static int ncr_53c400;
static int ncr_53c400a;
static int dtc_3181e;
static int hp_c2502;
+module_param(ncr_irq, int, 0);
+module_param(ncr_addr, int, 0);
+module_param(ncr_5380, int, 0);
+module_param(ncr_53c400, int, 0);
+module_param(ncr_53c400a, int, 0);
+module_param(dtc_3181e, int, 0);
+module_param(hp_c2502, int, 0);
-static struct override {
- NCR5380_map_type NCR5380_map_name;
- int irq;
- int dma;
- int board; /* Use NCR53c400, Ricoh, etc. extensions ? */
-} overrides
-#ifdef GENERIC_NCR5380_OVERRIDE
-[] __initdata = GENERIC_NCR5380_OVERRIDE;
-#else
-[1] __initdata = { { 0,},};
-#endif
-
-#define NO_OVERRIDES ARRAY_SIZE(overrides)
-
-#ifndef MODULE
-
-/**
- * internal_setup - handle lilo command string override
- * @board: BOARD_* identifier for the board
- * @str: unused
- * @ints: numeric parameters
- *
- * Do LILO command line initialization of the overrides array. Display
- * errors when needed
- *
- * Locks: none
- */
-
-static void __init internal_setup(int board, char *str, int *ints)
-{
- static int commandline_current;
- switch (board) {
- case BOARD_NCR5380:
- if (ints[0] != 2 && ints[0] != 3) {
- printk(KERN_ERR "generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n");
- return;
- }
- break;
- case BOARD_NCR53C400:
- if (ints[0] != 2) {
- printk(KERN_ERR "generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n");
- return;
- }
- break;
- case BOARD_NCR53C400A:
- if (ints[0] != 2) {
- printk(KERN_ERR "generic_NCR53C400A_setup : usage ncr53c400a=" STRVAL(NCR5380_map_name) ",irq\n");
- return;
- }
- break;
- case BOARD_DTC3181E:
- if (ints[0] != 2) {
- printk("generic_DTC3181E_setup : usage dtc3181e=" STRVAL(NCR5380_map_name) ",irq\n");
- return;
- }
- break;
- }
-
- if (commandline_current < NO_OVERRIDES) {
- overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type) ints[1];
- overrides[commandline_current].irq = ints[2];
- if (ints[0] == 3)
- overrides[commandline_current].dma = ints[3];
- else
- overrides[commandline_current].dma = DMA_NONE;
- overrides[commandline_current].board = board;
- ++commandline_current;
- }
-}
-
-
-/**
- * do_NCR53C80_setup - set up entry point
- * @str: unused
- *
- * Setup function invoked at boot to parse the ncr5380= command
- * line.
- */
-
-static int __init do_NCR5380_setup(char *str)
-{
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_NCR5380, str, ints);
- return 1;
-}
-
-/**
- * do_NCR53C400_setup - set up entry point
- * @str: unused
- * @ints: integer parameters from kernel setup code
- *
- * Setup function invoked at boot to parse the ncr53c400= command
- * line.
- */
-
-static int __init do_NCR53C400_setup(char *str)
-{
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_NCR53C400, str, ints);
- return 1;
-}
-
-/**
- * do_NCR53C400A_setup - set up entry point
- * @str: unused
- * @ints: integer parameters from kernel setup code
- *
- * Setup function invoked at boot to parse the ncr53c400a= command
- * line.
- */
-
-static int __init do_NCR53C400A_setup(char *str)
-{
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_NCR53C400A, str, ints);
- return 1;
-}
-
-/**
- * do_DTC3181E_setup - set up entry point
- * @str: unused
- * @ints: integer parameters from kernel setup code
- *
- * Setup function invoked at boot to parse the dtc3181e= command
- * line.
- */
+static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ number(s)");
-static int __init do_DTC3181E_setup(char *str)
-{
- int ints[10];
+static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+module_param_array(base, int, NULL, 0);
+MODULE_PARM_DESC(base, "base address(es)");
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_DTC3181E, str, ints);
- return 1;
-}
+static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+module_param_array(card, int, NULL, 0);
+MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)");
-#endif
+MODULE_LICENSE("GPL");
#ifndef SCSI_G_NCR5380_MEM
/*
@@ -210,21 +90,9 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
}
#endif
-/**
- * generic_NCR5380_detect - look for NCR5380 controllers
- * @tpnt: the scsi template
- *
- * Scan for the present of NCR5380, NCR53C400, NCR53C400A, DTC3181E
- * and DTC436(ISAPnP) controllers. If overrides have been set we use
- * them.
- *
- * Locks: none
- */
-
-static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
+static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
+ struct device *pdev, int base, int irq, int board)
{
- static int current_override;
- int count;
unsigned int *ports;
u8 *magic = NULL;
#ifndef SCSI_G_NCR5380_MEM
@@ -232,272 +100,222 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
int port_idx = -1;
unsigned long region_size;
#endif
- static unsigned int __initdata ncr_53c400a_ports[] = {
+ static unsigned int ncr_53c400a_ports[] = {
0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
};
- static unsigned int __initdata dtc_3181e_ports[] = {
+ static unsigned int dtc_3181e_ports[] = {
0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
};
- static u8 ncr_53c400a_magic[] __initdata = { /* 53C400A & DTC436 */
+ static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
0x59, 0xb9, 0xc5, 0xae, 0xa6
};
- static u8 hp_c2502_magic[] __initdata = { /* HP C2502 */
+ static u8 hp_c2502_magic[] = { /* HP C2502 */
0x0f, 0x22, 0xf0, 0x20, 0x80
};
- int flags;
+ int flags, ret;
struct Scsi_Host *instance;
struct NCR5380_hostdata *hostdata;
#ifdef SCSI_G_NCR5380_MEM
- unsigned long base;
void __iomem *iomem;
resource_size_t iomem_size;
#endif
- if (ncr_irq)
- overrides[0].irq = ncr_irq;
- if (ncr_dma)
- overrides[0].dma = ncr_dma;
- if (ncr_addr)
- overrides[0].NCR5380_map_name = (NCR5380_map_type) ncr_addr;
- if (ncr_5380)
- overrides[0].board = BOARD_NCR5380;
- else if (ncr_53c400)
- overrides[0].board = BOARD_NCR53C400;
- else if (ncr_53c400a)
- overrides[0].board = BOARD_NCR53C400A;
- else if (dtc_3181e)
- overrides[0].board = BOARD_DTC3181E;
- else if (hp_c2502)
- overrides[0].board = BOARD_HP_C2502;
-#ifndef SCSI_G_NCR5380_MEM
- if (!current_override && isapnp_present()) {
- struct pnp_dev *dev = NULL;
- count = 0;
- while ((dev = pnp_find_dev(NULL, ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e), dev))) {
- if (count >= NO_OVERRIDES)
- break;
- if (pnp_device_attach(dev) < 0)
- continue;
- if (pnp_activate_dev(dev) < 0) {
- printk(KERN_ERR "dtc436e probe: activate failed\n");
- pnp_device_detach(dev);
- continue;
- }
- if (!pnp_port_valid(dev, 0)) {
- printk(KERN_ERR "dtc436e probe: no valid port\n");
- pnp_device_detach(dev);
- continue;
- }
- if (pnp_irq_valid(dev, 0))
- overrides[count].irq = pnp_irq(dev, 0);
- else
- overrides[count].irq = NO_IRQ;
- if (pnp_dma_valid(dev, 0))
- overrides[count].dma = pnp_dma(dev, 0);
- else
- overrides[count].dma = DMA_NONE;
- overrides[count].NCR5380_map_name = (NCR5380_map_type) pnp_port_start(dev, 0);
- overrides[count].board = BOARD_DTC3181E;
- count++;
- }
+ ports = NULL;
+ flags = 0;
+ switch (board) {
+ case BOARD_NCR5380:
+ flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
+ break;
+ case BOARD_NCR53C400A:
+ ports = ncr_53c400a_ports;
+ magic = ncr_53c400a_magic;
+ break;
+ case BOARD_HP_C2502:
+ ports = ncr_53c400a_ports;
+ magic = hp_c2502_magic;
+ break;
+ case BOARD_DTC3181E:
+ ports = dtc_3181e_ports;
+ magic = ncr_53c400a_magic;
+ break;
}
-#endif
-
- for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
- if (!(overrides[current_override].NCR5380_map_name))
- continue;
-
- ports = NULL;
- flags = 0;
- switch (overrides[current_override].board) {
- case BOARD_NCR5380:
- flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
- break;
- case BOARD_NCR53C400A:
- ports = ncr_53c400a_ports;
- magic = ncr_53c400a_magic;
- break;
- case BOARD_HP_C2502:
- ports = ncr_53c400a_ports;
- magic = hp_c2502_magic;
- break;
- case BOARD_DTC3181E:
- ports = dtc_3181e_ports;
- magic = ncr_53c400a_magic;
- break;
- }
#ifndef SCSI_G_NCR5380_MEM
- if (ports && magic) {
- /* wakeup sequence for the NCR53C400A and DTC3181E */
-
- /* Disable the adapter and look for a free io port */
- magic_configure(-1, 0, magic);
-
- region_size = 16;
-
- if (overrides[current_override].NCR5380_map_name != PORT_AUTO)
- for (i = 0; ports[i]; i++) {
- if (!request_region(ports[i], region_size, "ncr53c80"))
- continue;
- if (overrides[current_override].NCR5380_map_name == ports[i])
- break;
- release_region(ports[i], region_size);
- } else
- for (i = 0; ports[i]; i++) {
- if (!request_region(ports[i], region_size, "ncr53c80"))
- continue;
- if (inb(ports[i]) == 0xff)
- break;
- release_region(ports[i], region_size);
+ if (ports && magic) {
+ /* wakeup sequence for the NCR53C400A and DTC3181E */
+
+ /* Disable the adapter and look for a free io port */
+ magic_configure(-1, 0, magic);
+
+ region_size = 16;
+ if (base)
+ for (i = 0; ports[i]; i++) {
+ if (base == ports[i]) { /* index found */
+ if (!request_region(ports[i],
+ region_size,
+ "ncr53c80"))
+ return -EBUSY;
+ break;
}
- if (ports[i]) {
- /* At this point we have our region reserved */
- magic_configure(i, 0, magic); /* no IRQ yet */
- outb(0xc0, ports[i] + 9);
- if (inb(ports[i] + 9) != 0x80)
- continue;
- overrides[current_override].NCR5380_map_name = ports[i];
- port_idx = i;
- } else
- continue;
- }
+ }
else
- {
- /* Not a 53C400A style setup - just grab */
- region_size = 8;
- if (!request_region(overrides[current_override].NCR5380_map_name,
- region_size, "ncr5380"))
- continue;
- }
+ for (i = 0; ports[i]; i++) {
+ if (!request_region(ports[i], region_size,
+ "ncr53c80"))
+ continue;
+ if (inb(ports[i]) == 0xff)
+ break;
+ release_region(ports[i], region_size);
+ }
+ if (ports[i]) {
+ /* At this point we have our region reserved */
+ magic_configure(i, 0, magic); /* no IRQ yet */
+ outb(0xc0, ports[i] + 9);
+ if (inb(ports[i] + 9) != 0x80) {
+ ret = -ENODEV;
+ goto out_release;
+ }
+ base = ports[i];
+ port_idx = i;
+ } else
+ return -EINVAL;
+ }
+ else
+ {
+ /* NCR5380 - no configuration, just grab */
+ region_size = 8;
+ if (!base || !request_region(base, region_size, "ncr5380"))
+ return -EBUSY;
+ }
#else
- base = overrides[current_override].NCR5380_map_name;
- iomem_size = NCR53C400_region_size;
- if (!request_mem_region(base, iomem_size, "ncr5380"))
- continue;
- iomem = ioremap(base, iomem_size);
- if (!iomem) {
- release_mem_region(base, iomem_size);
- continue;
- }
+ iomem_size = NCR53C400_region_size;
+ if (!request_mem_region(base, iomem_size, "ncr5380"))
+ return -EBUSY;
+ iomem = ioremap(base, iomem_size);
+ if (!iomem) {
+ release_mem_region(base, iomem_size);
+ return -ENOMEM;
+ }
#endif
- instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata));
- if (instance == NULL)
- goto out_release;
- hostdata = shost_priv(instance);
+ instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
+ if (instance == NULL) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+ hostdata = shost_priv(instance);
#ifndef SCSI_G_NCR5380_MEM
- instance->io_port = overrides[current_override].NCR5380_map_name;
- instance->n_io_port = region_size;
- hostdata->io_width = 1; /* 8-bit PDMA by default */
-
- /*
- * On NCR53C400 boards, NCR5380 registers are mapped 8 past
- * the base address.
- */
- switch (overrides[current_override].board) {
- case BOARD_NCR53C400:
- instance->io_port += 8;
- hostdata->c400_ctl_status = 0;
- hostdata->c400_blk_cnt = 1;
- hostdata->c400_host_buf = 4;
- break;
- case BOARD_DTC3181E:
- hostdata->io_width = 2; /* 16-bit PDMA */
- /* fall through */
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- hostdata->c400_ctl_status = 9;
- hostdata->c400_blk_cnt = 10;
- hostdata->c400_host_buf = 8;
- break;
- }
+ instance->io_port = base;
+ instance->n_io_port = region_size;
+ hostdata->io_width = 1; /* 8-bit PDMA by default */
+
+ /*
+ * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+ * the base address.
+ */
+ switch (board) {
+ case BOARD_NCR53C400:
+ instance->io_port += 8;
+ hostdata->c400_ctl_status = 0;
+ hostdata->c400_blk_cnt = 1;
+ hostdata->c400_host_buf = 4;
+ break;
+ case BOARD_DTC3181E:
+ hostdata->io_width = 2; /* 16-bit PDMA */
+ /* fall through */
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ hostdata->c400_ctl_status = 9;
+ hostdata->c400_blk_cnt = 10;
+ hostdata->c400_host_buf = 8;
+ break;
+ }
#else
- instance->base = overrides[current_override].NCR5380_map_name;
- hostdata->iomem = iomem;
- hostdata->iomem_size = iomem_size;
- switch (overrides[current_override].board) {
- case BOARD_NCR53C400:
- hostdata->c400_ctl_status = 0x100;
- hostdata->c400_blk_cnt = 0x101;
- hostdata->c400_host_buf = 0x104;
- break;
- case BOARD_DTC3181E:
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
- goto out_unregister;
- }
+ instance->base = base;
+ hostdata->iomem = iomem;
+ hostdata->iomem_size = iomem_size;
+ switch (board) {
+ case BOARD_NCR53C400:
+ hostdata->c400_ctl_status = 0x100;
+ hostdata->c400_blk_cnt = 0x101;
+ hostdata->c400_host_buf = 0x104;
+ break;
+ case BOARD_DTC3181E:
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
+ ret = -EINVAL;
+ goto out_unregister;
+ }
#endif
- if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP))
- goto out_unregister;
+ ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
+ if (ret)
+ goto out_unregister;
- switch (overrides[current_override].board) {
- case BOARD_NCR53C400:
- case BOARD_DTC3181E:
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
- }
+ switch (board) {
+ case BOARD_NCR53C400:
+ case BOARD_DTC3181E:
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
+ }
- NCR5380_maybe_reset_bus(instance);
+ NCR5380_maybe_reset_bus(instance);
- if (overrides[current_override].irq != IRQ_AUTO)
- instance->irq = overrides[current_override].irq;
- else
- instance->irq = NCR5380_probe_irq(instance, 0xffff);
+ if (irq != IRQ_AUTO)
+ instance->irq = irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, 0xffff);
- /* Compatibility with documented NCR5380 kernel parameters */
- if (instance->irq == 255)
- instance->irq = NO_IRQ;
+ /* Compatibility with documented NCR5380 kernel parameters */
+ if (instance->irq == 255)
+ instance->irq = NO_IRQ;
- if (instance->irq != NO_IRQ) {
+ if (instance->irq != NO_IRQ) {
#ifndef SCSI_G_NCR5380_MEM
- /* set IRQ for HP C2502 */
- if (overrides[current_override].board == BOARD_HP_C2502)
- magic_configure(port_idx, instance->irq, magic);
+ /* set IRQ for HP C2502 */
+ if (board == BOARD_HP_C2502)
+ magic_configure(port_idx, instance->irq, magic);
#endif
- if (request_irq(instance->irq, generic_NCR5380_intr,
- 0, "NCR5380", instance)) {
- printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
- instance->irq = NO_IRQ;
- }
- }
-
- if (instance->irq == NO_IRQ) {
- printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
- printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ if (request_irq(instance->irq, generic_NCR5380_intr,
+ 0, "NCR5380", instance)) {
+ printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
+ instance->irq = NO_IRQ;
}
+ }
- ++current_override;
- ++count;
+ if (instance->irq == NO_IRQ) {
+ printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
}
- return count;
+ ret = scsi_add_host(instance, pdev);
+ if (ret)
+ goto out_free_irq;
+ scsi_scan_host(instance);
+ dev_set_drvdata(pdev, instance);
+ return 0;
+
+out_free_irq:
+ if (instance->irq != NO_IRQ)
+ free_irq(instance->irq, instance);
+ NCR5380_exit(instance);
out_unregister:
- scsi_unregister(instance);
+ scsi_host_put(instance);
out_release:
#ifndef SCSI_G_NCR5380_MEM
- release_region(overrides[current_override].NCR5380_map_name, region_size);
+ release_region(base, region_size);
#else
iounmap(iomem);
release_mem_region(base, iomem_size);
#endif
- return count;
+ return ret;
}
-/**
- * generic_NCR5380_release_resources - free resources
- * @instance: host adapter to clean up
- *
- * Free the generic interface resources from this adapter.
- *
- * Locks: none
- */
-
-static int generic_NCR5380_release_resources(struct Scsi_Host *instance)
+static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
{
+ scsi_remove_host(instance);
if (instance->irq != NO_IRQ)
free_irq(instance->irq, instance);
NCR5380_exit(instance);
@@ -511,7 +329,7 @@ static int generic_NCR5380_release_resources(struct Scsi_Host *instance)
release_mem_region(instance->base, hostdata->iomem_size);
}
#endif
- return 0;
+ scsi_host_put(instance);
}
/**
@@ -701,10 +519,9 @@ static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance,
#include "NCR5380.c"
static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
.proc_name = DRV_MODULE_NAME,
.name = "Generic NCR5380/NCR53C400 SCSI",
- .detect = generic_NCR5380_detect,
- .release = generic_NCR5380_release_resources,
.info = generic_NCR5380_info,
.queuecommand = generic_NCR5380_queue_command,
.eh_abort_handler = generic_NCR5380_abort,
@@ -718,31 +535,115 @@ static struct scsi_host_template driver_template = {
.max_sectors = 128,
};
-#include "scsi_module.c"
-module_param(ncr_irq, int, 0);
-module_param(ncr_dma, int, 0);
-module_param(ncr_addr, int, 0);
-module_param(ncr_5380, int, 0);
-module_param(ncr_53c400, int, 0);
-module_param(ncr_53c400a, int, 0);
-module_param(dtc_3181e, int, 0);
-module_param(hp_c2502, int, 0);
-MODULE_LICENSE("GPL");
+static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev)
+{
+ int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev],
+ irq[ndev], card[ndev]);
+ if (ret) {
+ if (base[ndev])
+ printk(KERN_WARNING "Card not found at address 0x%03x\n",
+ base[ndev]);
+ return 0;
+ }
-#if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE)
-static struct isapnp_device_id id_table[] = {
- {
- ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e),
- 0},
- {0}
+ return 1;
+}
+
+static int generic_NCR5380_isa_remove(struct device *pdev,
+ unsigned int ndev)
+{
+ generic_NCR5380_release_resources(dev_get_drvdata(pdev));
+ dev_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct isa_driver generic_NCR5380_isa_driver = {
+ .match = generic_NCR5380_isa_match,
+ .remove = generic_NCR5380_isa_remove,
+ .driver = {
+ .name = DRV_MODULE_NAME
+ },
+};
+
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
+ { .id = "DTC436e", .driver_data = BOARD_DTC3181E },
+ { .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids);
+
+static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev,
+ const struct pnp_device_id *id)
+{
+ int base, irq;
+
+ if (pnp_activate_dev(pdev) < 0)
+ return -EBUSY;
+
+ base = pnp_port_start(pdev, 0);
+ irq = pnp_irq(pdev, 0);
+
+ return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq,
+ id->driver_data);
+}
+
+static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev)
+{
+ generic_NCR5380_release_resources(pnp_get_drvdata(pdev));
+ pnp_set_drvdata(pdev, NULL);
+}
+
+static struct pnp_driver generic_NCR5380_pnp_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = generic_NCR5380_pnp_ids,
+ .probe = generic_NCR5380_pnp_probe,
+ .remove = generic_NCR5380_pnp_remove,
};
+#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */
+
+static int pnp_registered, isa_registered;
+
+static int __init generic_NCR5380_init(void)
+{
+ int ret = 0;
+
+ /* compatibility with old-style parameters */
+ if (irq[0] == 0 && base[0] == 0 && card[0] == -1) {
+ irq[0] = ncr_irq;
+ base[0] = ncr_addr;
+ if (ncr_5380)
+ card[0] = BOARD_NCR5380;
+ if (ncr_53c400)
+ card[0] = BOARD_NCR53C400;
+ if (ncr_53c400a)
+ card[0] = BOARD_NCR53C400A;
+ if (dtc_3181e)
+ card[0] = BOARD_DTC3181E;
+ if (hp_c2502)
+ card[0] = BOARD_HP_C2502;
+ }
-MODULE_DEVICE_TABLE(isapnp, id_table);
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+ if (!pnp_register_driver(&generic_NCR5380_pnp_driver))
+ pnp_registered = 1;
#endif
+ ret = isa_register_driver(&generic_NCR5380_isa_driver, MAX_CARDS);
+ if (!ret)
+ isa_registered = 1;
+
+ return (pnp_registered || isa_registered) ? 0 : ret;
+}
+
+static void __exit generic_NCR5380_exit(void)
+{
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+ if (pnp_registered)
+ pnp_unregister_driver(&generic_NCR5380_pnp_driver);
+#endif
+ if (isa_registered)
+ isa_unregister_driver(&generic_NCR5380_isa_driver);
+}
-__setup("ncr5380=", do_NCR5380_setup);
-__setup("ncr53c400=", do_NCR53C400_setup);
-__setup("ncr53c400a=", do_NCR53C400A_setup);
-__setup("dtc3181e=", do_DTC3181E_setup);
+module_init(generic_NCR5380_init);
+module_exit(generic_NCR5380_exit);
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 595177428d76..b175b9234458 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,15 +14,9 @@
#ifndef GENERIC_NCR5380_H
#define GENERIC_NCR5380_H
-#define __STRVAL(x) #x
-#define STRVAL(x) __STRVAL(x)
-
#ifndef SCSI_G_NCR5380_MEM
#define DRV_MODULE_NAME "g_NCR5380"
-#define NCR5380_map_type int
-#define NCR5380_map_name port
-
#define NCR5380_read(reg) \
inb(instance->io_port + (reg))
#define NCR5380_write(reg, value) \
@@ -38,8 +32,6 @@
/* therefore SCSI_G_NCR5380_MEM */
#define DRV_MODULE_NAME "g_NCR5380_mmio"
-#define NCR5380_map_type unsigned long
-#define NCR5380_map_name base
#define NCR53C400_mem_base 0x3880
#define NCR53C400_host_buffer 0x3900
#define NCR53C400_region_size 0x3a00
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index a8762a3efeef..532474109624 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2586,7 +2586,6 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
u32 fd_ioasc;
- char *envp[] = { "ASYNC_ERR_LOG=1", NULL };
if (ioa_cfg->sis64)
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
@@ -2607,8 +2606,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
}
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
+ schedule_work(&ioa_cfg->work_q);
hostrcb = ipr_get_free_hostrcb(ioa_cfg);
- kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp);
ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c051694bfcb0..f9b6fba689ff 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -791,9 +791,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
free_task:
/* regular RX path uses back_lock */
- spin_lock_bh(&session->back_lock);
+ spin_lock(&session->back_lock);
__iscsi_put_task(task);
- spin_unlock_bh(&session->back_lock);
+ spin_unlock(&session->back_lock);
return NULL;
}
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 54d446c9f56e..b8d3b97b217a 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -36,9 +36,9 @@ struct scsi_dh_blist {
};
static const struct scsi_dh_blist scsi_dh_blist[] = {
- {"DGC", "RAID", "clariion" },
- {"DGC", "DISK", "clariion" },
- {"DGC", "VRAID", "clariion" },
+ {"DGC", "RAID", "emc" },
+ {"DGC", "DISK", "emc" },
+ {"DGC", "VRAID", "emc" },
{"COMPAQ", "MSA1000 VOLUME", "hp_sw" },
{"COMPAQ", "HSV110", "hp_sw" },
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 212e98d940bc..6f7128f49c30 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1307,7 +1307,6 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
enum scsi_scan_mode rescan)
{
- char devname[64];
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
unsigned int length;
u64 lun;
@@ -1349,9 +1348,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
}
}
- sprintf(devname, "host %d channel %d id %d",
- shost->host_no, sdev->channel, sdev->id);
-
/*
* Allocate enough to hold the header (the same size as one scsi_lun)
* plus the number of luns we are requesting. 511 was the default
@@ -1470,12 +1466,12 @@ retry:
out_err:
kfree(lun_data);
out:
- scsi_device_put(sdev);
if (scsi_device_created(sdev))
/*
* the sdev we used didn't appear in the report luns scan
*/
__scsi_remove_device(sdev);
+ scsi_device_put(sdev);
return ret;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7af5226aa55b..618422ea3a41 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4922,9 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
res = get_user_pages_unlocked(
uaddr,
nr_pages,
- rw == READ,
- 0, /* don't force */
- pages);
+ pages,
+ rw == READ ? FOLL_WRITE : 0); /* don't force */
/* Errors and no page mapped should return here */
if (res < nr_pages)
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 47966909286d..e27b4d4e6ae2 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -63,7 +63,7 @@ config SCSI_UFSHCD_PCI
config SCSI_UFS_DWC_TC_PCI
tristate "DesignWare pci support using a G210 Test Chip"
- depends on SCSI_UFSHCD && PCI
+ depends on SCSI_UFSHCD_PCI
---help---
Synopsys Test Chip is a PHY for prototyping purposes.
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index ee4ab85e2801..22f881e9253a 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -25,6 +25,7 @@
#define UFS_VENDOR_TOSHIBA 0x198
#define UFS_VENDOR_SAMSUNG 0x1CE
+#define UFS_VENDOR_SKHYNIX 0x1AD
/**
* ufs_device_info - ufs device details
@@ -145,6 +146,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
UFS_DEVICE_QUIRK_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
END_FIX
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 37f3c51e9d92..05c745663c10 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1266,9 +1266,12 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
0, query->request.query_func, 0, 0);
- /* Data segment length */
- ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
- 0, 0, len >> 8, (u8)len);
+ /* Data segment length only need for WRITE_DESC */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ ucd_req_ptr->header.dword_2 =
+ UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
+ else
+ ucd_req_ptr->header.dword_2 = 0;
/* Copy the Query Request buffer as is */
memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
@@ -6500,6 +6503,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
if (IS_ERR(hba->devfreq)) {
dev_err(hba->dev, "Unable to register with devfreq %ld\n",
PTR_ERR(hba->devfreq));
+ err = PTR_ERR(hba->devfreq);
goto out_remove_scsi_host;
}
/* Suspend devfreq until the UFS device is detected */
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index fe42a2fdf351..e6e90e80519a 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,6 +1,7 @@
menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/bcm/Kconfig"
+source "drivers/soc/fsl/qbman/Kconfig"
source "drivers/soc/fsl/qe/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 203307fd92c1..75e1f5334821 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -2,5 +2,6 @@
# Makefile for the Linux Kernel SOC fsl specific device drivers
#
+obj-$(CONFIG_FSL_DPAA) += qbman/
obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
new file mode 100644
index 000000000000..757033c0586c
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -0,0 +1,67 @@
+menuconfig FSL_DPAA
+ bool "Freescale DPAA 1.x support"
+ depends on FSL_SOC_BOOKE
+ select GENERIC_ALLOCATOR
+ help
+ The Freescale Data Path Acceleration Architecture (DPAA) is a set of
+ hardware components on specific QorIQ multicore processors.
+ This architecture provides the infrastructure to support simplified
+ sharing of networking interfaces and accelerators by multiple CPUs.
+ The major h/w blocks composing DPAA are BMan and QMan.
+
+ The Buffer Manager (BMan) is a hardware buffer pool management block
+ that allows software and accelerators on the datapath to acquire and
+ release buffers in order to build frames.
+
+ The Queue Manager (QMan) is a hardware queue management block
+ that allows software and accelerators on the datapath to enqueue and
+ dequeue frames in order to communicate.
+
+if FSL_DPAA
+
+config FSL_DPAA_CHECKING
+ bool "Additional driver checking"
+ help
+ Compiles in additional checks, to sanity-check the drivers and
+ any use of the exported API. Not recommended for performance.
+
+config FSL_BMAN_TEST
+ tristate "BMan self-tests"
+ help
+ Compile the BMan self-test code. These tests will
+ exercise the BMan APIs to confirm functionality
+ of both the software drivers and hardware device.
+
+config FSL_BMAN_TEST_API
+ bool "High-level API self-test"
+ depends on FSL_BMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine
+ to the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST
+ tristate "QMan self-tests"
+ help
+ Compile self-test code for QMan.
+
+config FSL_QMAN_TEST_API
+ bool "QMan high-level self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine to
+ the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST_STASH
+ bool "QMan 'hot potato' data-stashing self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This performs a "hot potato" style test enqueuing/dequeuing a frame
+ across a series of FQs scheduled to different portals (and cpus), with
+ DQRR, data and context stashing always on.
+
+endif # FSL_DPAA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
new file mode 100644
index 000000000000..7ae199f1664e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \
+ bman_portal.o qman_portal.o \
+ bman.o qman.o
+
+obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o
+bman-test-y = bman_test.o
+bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o
+
+obj-$(CONFIG_FSL_QMAN_TEST) += qman-test.o
+qman-test-y = qman_test.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_API) += qman_test_api.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_STASH) += qman_test_stash.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 000000000000..ffa48fdbb1a9
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,797 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x0000
+#define BM_REG_RCR_CI_CINH 0x0004
+#define BM_REG_RCR_ITR 0x0008
+#define BM_REG_CFG 0x0100
+#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
+#define BM_REG_ISR 0x0e00
+#define BM_REG_IER 0x0e04
+#define BM_REG_ISDR 0x0e08
+#define BM_REG_IIR 0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+/* Release Command */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+};
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+struct bm_mc_command {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used by acquire command */
+ u8 __reserved[62];
+};
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* MC result, Acquire and Query Response */
+union bm_mc_result {
+ struct {
+ u8 verb;
+ u8 bpid;
+ u8 __reserved[62];
+ };
+ struct bm_buffer bufs[8];
+};
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+#define BM_MCR_TIMEOUT 10000 /* us */
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ union bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 bm_in(struct bm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
+{
+ __raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ce + offset);
+}
+
+struct bman_portal {
+ struct bm_portal p;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ /* probing time config params for cpu-affine portals */
+ const struct bm_portal_config *config;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(bman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals.
+ */
+struct bman_pool {
+ /* index of the buffer pool to encapsulate (0-63) */
+ u32 bpid;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+};
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct bman_portal *p = ptr;
+ struct bm_portal *portal = &p->p;
+ u32 clear = p->irq_sources;
+ u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ clear |= poll_portal_slow(p, is);
+ bm_out(portal, BM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+/* --- RCR API --- */
+
+#define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
+#define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~RCR_CARRY;
+
+ return (struct bm_rcr_entry *)addr;
+}
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int rcr_ptr2idx(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
+}
+#endif
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void rcr_inc(struct bm_rcr *rcr)
+{
+ /* increment to the next RCR pointer and handle overflow and 'vbit' */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = rcr_carryclear(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static int bm_rcr_get_avail(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static int bm_rcr_get_fill(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(portal, BM_REG_RCR_ITR, ithresh);
+}
+
+static void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
+}
+
+static u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
+ diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(!rcr->busy);
+ if (!rcr->available)
+ return NULL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 1;
+#endif
+ dpaa_zero(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+ DPAA_ASSERT(rcr->available >= 1);
+ dma_wmb();
+ rcursor = rcr->cursor;
+ rcursor->_ncw_verb = myverb | rcr->vbit;
+ dpaa_flush(rcursor);
+ rcr_inc(rcr);
+ rcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ enum bm_rcr_cmode cmode)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.ce + BM_CL_RCR;
+ rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
+ BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
+ | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(portal, BM_REG_CFG, cfg);
+ return 0;
+}
+
+static void bm_rcr_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_rcr *rcr = &portal->rcr;
+ int i;
+
+ DPAA_ASSERT(!rcr->busy);
+
+ i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr_ptr2idx(rcr->cursor))
+ pr_crit("losing uncommited RCR entries\n");
+
+ i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != rcr_ptr2idx(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+#endif
+}
+
+/* --- Management command API --- */
+static int bm_mc_init(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + BM_CL_CR;
+ mc->rr = portal->addr.ce + BM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
+ 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static void bm_mc_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_hw;
+#endif
+}
+
+static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+static inline int bm_mc_result_timeout(struct bm_portal *portal,
+ union bm_mc_result **mcr)
+{
+ int timeout = BM_MCR_TIMEOUT;
+
+ do {
+ *mcr = bm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+/* Disable all BSCN interrupts for the portal */
+static void bm_isr_bscn_disable(struct bm_portal *portal)
+{
+ bm_out(portal, BM_REG_SCN(0), 0);
+ bm_out(portal, BM_REG_SCN(1), 0);
+}
+
+static int bman_create_portal(struct bman_portal *portal,
+ const struct bm_portal_config *c)
+{
+ struct bm_portal *p;
+ int ret;
+
+ p = &portal->p;
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference...
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+ dev_err(c->dev, "RCR initialisation failed\n");
+ goto fail_rcr;
+ }
+ if (bm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /*
+ * Default to all BPIDs disabled, we enable as required at
+ * run-time.
+ */
+ bm_isr_bscn_disable(p);
+
+ /* Write-to-clear any stale interrupt status bits */
+ bm_out(p, BM_REG_ISDR, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_out(p, BM_REG_IER, 0);
+ bm_out(p, BM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+ if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+ irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+ dev_err(c->dev, "irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = c;
+
+ bm_out(p, BM_REG_ISDR, 0);
+ bm_out(p, BM_REG_IIR, 0);
+
+ return 0;
+
+fail_rcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ bm_mc_finish(p);
+fail_mc:
+ bm_rcr_finish(p);
+fail_rcr:
+ return -EIO;
+}
+
+struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
+{
+ struct bman_portal *portal;
+ int err;
+
+ portal = &per_cpu(bman_affine_portal, c->cpu);
+ err = bman_create_portal(portal, c);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is)
+{
+ u32 ret = is;
+
+ if (is & BM_PIRQ_RCRI) {
+ bm_rcr_cce_update(&p->p);
+ bm_rcr_set_ithresh(&p->p, 0);
+ bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
+ is &= ~BM_PIRQ_RCRI;
+ }
+
+ /* There should be no status register bits left undefined */
+ DPAA_ASSERT(!is);
+ return ret;
+}
+
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+ bm_out(&p->p, BM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+ return 0;
+}
+
+static int bm_shutdown_pool(u32 bpid)
+{
+ struct bm_mc_command *bm_cmd;
+ union bm_mc_result *bm_res;
+
+ while (1) {
+ struct bman_portal *p = get_affine_portal();
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(&p->p);
+ bm_cmd->bpid = bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ if (!bm_mc_result_timeout(&p->p, &bm_res)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Command timedout\n");
+ return -ETIMEDOUT;
+ }
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ put_affine_portal();
+ /* Pool is empty */
+ return 0;
+ }
+ put_affine_portal();
+ }
+
+ return 0;
+}
+
+struct gen_pool *bm_bpalloc;
+
+static int bm_alloc_bpid_range(u32 *result, u32 count)
+{
+ unsigned long addr;
+
+ addr = gen_pool_alloc(bm_bpalloc, count);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+static int bm_release_bpid(u32 bpid)
+{
+ int ret;
+
+ ret = bm_shutdown_pool(bpid);
+ if (ret) {
+ pr_debug("BPID %d leaked\n", bpid);
+ return ret;
+ }
+
+ gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+
+struct bman_pool *bman_new_pool(void)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (bm_alloc_bpid_range(&bpid, 1))
+ return NULL;
+
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+
+ pool->bpid = bpid;
+
+ return pool;
+err:
+ bm_release_bpid(bpid);
+ kfree(pool);
+ return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+ bm_release_bpid(pool->bpid);
+
+ kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+int bman_get_bpid(const struct bman_pool *pool)
+{
+ return pool->bpid;
+}
+EXPORT_SYMBOL(bman_get_bpid);
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ unsigned long irqflags;
+ int avail, timeout = 1000; /* 1ms */
+ int i = num - 1;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ do {
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ avail = bm_rcr_get_avail(&p->p);
+ if (avail < 2)
+ update_rcr_ci(p, avail);
+ r = bm_rcr_start(&p->p);
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ if (likely(r))
+ break;
+
+ udelay(1);
+ } while (--timeout);
+
+ if (unlikely(!timeout))
+ return -ETIMEDOUT;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ /*
+ * we can copy all but the first entry, as this can trigger badness
+ * with the valid-bit
+ */
+ bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
+ bm_buffer_set_bpid(r->bufs, pool->bpid);
+ if (i)
+ memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_release);
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ union bm_mc_result *mcr;
+ int ret;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ mcc = bm_mc_start(&p->p);
+ mcc->bpid = pool->bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ if (!bm_mc_result_timeout(&p->p, &mcr)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Timeout\n");
+ return -ETIMEDOUT;
+ }
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs)
+ memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
+
+ put_affine_portal();
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+ return portal->config;
+}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644
index 000000000000..9deb0524543f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -0,0 +1,263 @@
+/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+/* Register offsets */
+#define REG_FBPR_FPC 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FBPR_BARE 0x0c00
+#define REG_FBPR_BAR 0x0c04
+#define REG_FBPR_AR 0x0c10
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_ERR_ISDR 0x0e08
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
+
+struct bman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+ { BM_EIRQ_IVCI, "Invalid Command Verb" },
+ { BM_EIRQ_FLWI, "FBPR Low Watermark" },
+ { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { BM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { BM_EIRQ_BSCN, "Pool State Change Notification" },
+};
+
+/* Only trigger low water mark interrupt once only */
+#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
+
+/* Pointer to the start of the BMan's CCSR space */
+static u32 __iomem *bm_ccsr_start;
+
+static inline u32 bm_ccsr_in(u32 offset)
+{
+ return ioread32be(bm_ccsr_start + offset/4);
+}
+static inline void bm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, bm_ccsr_start + offset/4);
+}
+
+static void bm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = bm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+/* signal transactions for FBPRs with higher priority */
+#define FBPR_AR_RPRIO_HI BIT(30)
+
+static void bm_set_memory(u64 ba, u32 size)
+{
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
+ is_power_of_2(size));
+ /* choke if '[e]ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+ bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_AR, exp - 1);
+}
+
+/*
+ * Location and size of BMan private memory
+ *
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved). Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+ fbpr_a = rmem->base;
+ fbpr_sz = rmem->size;
+
+ WARN_ON(!(fbpr_a && fbpr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = bm_ccsr_in(REG_ERR_IER);
+ isr_val = bm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = bm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
+ if (bman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ bman_hwerr_txts[i].txt);
+ if (bman_hwerr_txts[i].mask & ecsr_val) {
+ /* Re-arm error capture registers */
+ bm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ bman_hwerr_txts[i].mask);
+ ier_val &= ~bman_hwerr_txts[i].mask;
+ bm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ bm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int fsl_bman_probe(struct platform_device *pdev)
+{
+ int ret, err_irq;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ u16 id, bm_pool_cnt;
+ u8 major, minor;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ bm_ccsr_start = devm_ioremap(dev, res->start,
+ res->end - res->start + 1);
+ if (!bm_ccsr_start)
+ return -ENXIO;
+
+ bm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ bman_ip_rev = BMAN_REV10;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else if (major == 2 && minor == 0) {
+ bman_ip_rev = BMAN_REV20;
+ bm_pool_cnt = 8;
+ } else if (major == 2 && minor == 1) {
+ bman_ip_rev = BMAN_REV21;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else {
+ dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
+ id, major, minor);
+ return -ENODEV;
+ }
+
+ bm_set_memory(fbpr_a, fbpr_sz);
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %s IRQ\n", node->full_name);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+ ret, node->full_name);
+ return ret;
+ }
+ /* Disable Buffer Pool State Change */
+ bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ bm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
+ if (IS_ERR(bm_bpalloc)) {
+ ret = PTR_ERR(bm_bpalloc);
+ dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* seed BMan resource pool */
+ ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
+ 0, bm_pool_cnt - 1, ret);
+ return ret;
+ }
+
+ return 0;
+};
+
+static const struct of_device_id fsl_bman_ids[] = {
+ {
+ .compatible = "fsl,bman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_bman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_bman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_bman_probe,
+};
+
+builtin_platform_driver(fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644
index 000000000000..6579cc18811a
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -0,0 +1,219 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct bman_portal *affine_bportals[NR_CPUS];
+static struct cpumask portal_cpus;
+/* protect bman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(bman_lock);
+
+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
+{
+ struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ bman_p_irqsource_add(p, BM_PIRQ_RCRI);
+ affine_bportals[pcfg->cpu] = p;
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static void bman_offline_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return;
+
+ irq_set_affinity(pcfg->irq, cpumask_of(0));
+}
+
+static void bman_online_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return;
+
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+}
+
+static int bman_hotplug_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ bman_online_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ bman_offline_cpu(cpu);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bman_hotplug_cpu_notifier = {
+ .notifier_call = bman_hotplug_cpu_callback,
+};
+
+static int bman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct bm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ void __iomem *va;
+ int irq, cpu;
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg)
+ return -ENOMEM;
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %s property 'reg::CE'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %s property 'reg::CI'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ pcfg->cpu = -1;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %s IRQ'\n", node->full_name);
+ return -ENXIO;
+ }
+ pcfg->irq = irq;
+
+ va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+ if (!va)
+ goto err_ioremap1;
+
+ pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+ va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+ _PAGE_GUARDED | _PAGE_NO_CACHE);
+ if (!va)
+ goto err_ioremap2;
+
+ pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+ spin_lock(&bman_lock);
+ cpu = cpumask_next_zero(-1, &portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ /* unassigned portal, skip init */
+ spin_unlock(&bman_lock);
+ return 0;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&bman_lock);
+ pcfg->cpu = cpu;
+
+ if (!init_pcfg(pcfg))
+ goto err_ioremap2;
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ bman_offline_cpu(cpu);
+
+ return 0;
+
+err_ioremap2:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+ dev_err(dev, "ioremap failed\n");
+ return -ENXIO;
+}
+
+static const struct of_device_id bman_portal_ids[] = {
+ {
+ .compatible = "fsl,bman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = bman_portal_ids,
+ },
+ .probe = bman_portal_probe,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
+
+ return 0;
+}
+
+module_driver(bman_portal_driver,
+ bman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644
index 000000000000..f6896a2f6d90
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -0,0 +1,80 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
+
+extern struct gen_pool *bm_bpalloc;
+
+struct bm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ struct device *dev;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+};
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+/*
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+ */
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644
index 000000000000..09b1c960b26a
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.c
@@ -0,0 +1,53 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("BMan testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_API
+ int loop = 1;
+
+ while (loop--)
+ bman_test_api();
+#endif
+ return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644
index 000000000000..037ed342add4
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.h
@@ -0,0 +1,35 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+void bman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644
index 000000000000..6f6bdd154fe3
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test_api.c
@@ -0,0 +1,151 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+#define NUM_BUFS 93
+#define LOOPS 3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+static struct bman_pool *pool;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+static void bufs_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BUFS; i++)
+ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+ bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+ if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
+
+ /*
+ * On SoCs with BMan revison 2.0, BMan only respects the 40
+ * LS-bits of buffer addresses, masking off the upper 8-bits on
+ * release commands. The API provides for 48-bit addresses
+ * because some SoCs support all 48-bits. When generating
+ * garbage addresses for testing, we either need to zero the
+ * upper 8-bits when releasing to BMan (otherwise we'll be
+ * disappointed when the buffers we acquire back from BMan
+ * don't match), or we need to mask the upper 8-bits off when
+ * comparing. We do the latter.
+ */
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return -1;
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return 1;
+ } else {
+ if (bm_buffer_get64(a) < bm_buffer_get64(b))
+ return -1;
+ if (bm_buffer_get64(a) > bm_buffer_get64(b))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bufs_confirm(void)
+{
+ int i, j;
+
+ for (i = 0; i < NUM_BUFS; i++) {
+ int matches = 0;
+
+ for (j = 0; j < NUM_BUFS; j++)
+ if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+ matches++;
+ WARN_ON(matches != 1);
+ }
+}
+
+/* test */
+void bman_test_api(void)
+{
+ int i, loops = LOOPS;
+
+ bufs_init();
+
+ pr_info("%s(): Starting\n", __func__);
+
+ pool = bman_new_pool();
+ if (!pool) {
+ pr_crit("bman_new_pool() failed\n");
+ goto failed;
+ }
+
+ /* Release buffers */
+do_loop:
+ i = 0;
+ while (i < NUM_BUFS) {
+ int num = 8;
+
+ if (i + num > NUM_BUFS)
+ num = NUM_BUFS - i;
+ if (bman_release(pool, bufs_in + i, num)) {
+ pr_crit("bman_release() failed\n");
+ goto failed;
+ }
+ i += num;
+ }
+
+ /* Acquire buffers */
+ while (i > 0) {
+ int tmp, num = 8;
+
+ if (num > i)
+ num = i;
+ tmp = bman_acquire(pool, bufs_out + i - num, num);
+ WARN_ON(tmp != num);
+ i -= num;
+ }
+ i = bman_acquire(pool, NULL, 1);
+ WARN_ON(i > 0);
+
+ bufs_confirm();
+
+ if (--loops)
+ goto do_loop;
+
+ /* Clean up */
+ bman_free_pool(pool);
+ pr_info("%s(): Finished\n", __func__);
+ return;
+
+failed:
+ WARN_ON(1);
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644
index 000000000000..b63fd72295c6
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -0,0 +1,103 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/prefetch.h>
+#include <linux/genalloc.h>
+#include <asm/cacheflush.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
+#error "Unsupported Cacheline Size"
+#endif
+
+static inline void dpaa_flush(void *p)
+{
+#ifdef CONFIG_PPC
+ flush_dcache_range((unsigned long)p, (unsigned long)p+64);
+#elif defined(CONFIG_ARM32)
+ __cpuc_flush_dcache_area(p, 64);
+#elif defined(CONFIG_ARM64)
+ __flush_dcache_area(p, 64);
+#endif
+}
+
+#define dpaa_invalidate(p) dpaa_flush(p)
+
+#define dpaa_zero(p) memset(p, 0, 64)
+
+static inline void dpaa_touch_ro(void *p)
+{
+#if (L1_CACHE_BYTES == 32)
+ prefetch(p+32);
+#endif
+ prefetch(p);
+}
+
+/* Commonly used combo */
+static inline void dpaa_invalidate_touch_ro(void *p)
+{
+ dpaa_invalidate(p);
+ dpaa_touch_ro(p);
+}
+
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+#define DPAA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPAA_ASSERT(x)
+#endif
+
+/* cyclic helper for rings */
+static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Offset applied to genalloc pools due to zero being an error return */
+#define DPAA_GENALLOC_OFF 0x80000000
+
+#endif /* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 000000000000..119054bc922b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,2881 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x0000
+#define QM_REG_EQCR_CI_CINH 0x0004
+#define QM_REG_EQCR_ITR 0x0008
+#define QM_REG_DQRR_PI_CINH 0x0040
+#define QM_REG_DQRR_CI_CINH 0x0044
+#define QM_REG_DQRR_ITR 0x0048
+#define QM_REG_DQRR_DCAP 0x0050
+#define QM_REG_DQRR_SDQCR 0x0054
+#define QM_REG_DQRR_VDQCR 0x0058
+#define QM_REG_DQRR_PDQCR 0x005c
+#define QM_REG_MR_PI_CINH 0x0080
+#define QM_REG_MR_CI_CINH 0x0084
+#define QM_REG_MR_ITR 0x0088
+#define QM_REG_CFG 0x0100
+#define QM_REG_ISR 0x0e00
+#define QM_REG_IER 0x0e04
+#define QM_REG_ISDR 0x0e08
+#define QM_REG_IIR 0x0e0c
+#define QM_REG_ITPR 0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3100
+#define QM_CL_DQRR_PI_CENA 0x3200
+#define QM_CL_DQRR_CI_CENA 0x3300
+#define QM_CL_MR_PI_CENA 0x3400
+#define QM_CL_MR_CI_CENA 0x3500
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+/*
+ * BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses and data-dependencies. Use of barrier()s
+ * or other order-preserving primitives simply degrade performance. Hence the
+ * use of the __raw_*() interfaces, which simply ensure that the compiler treats
+ * the portal registers as volatile
+ */
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+/* "Enqueue Command" */
+struct qm_eqcr_entry {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 dca;
+ u16 seqnum;
+ u32 orp; /* 24-bit */
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd;
+ u8 __reserved3[32];
+} __packed;
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ const struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ union qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+/* "Query FQ" */
+struct qm_mcc_queryfq {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+/* "Alter FQ State Commands " */
+struct qm_mcc_alterfq {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2;
+ u8 count; /* number of consecutive FQID */
+ u8 __reserved3[10];
+ u32 context_b; /* frame queue context b */
+ u8 __reserved4[40];
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcc_querycgr {
+ u8 _ncw_verb;
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
+};
+
+struct qm_mcc_querywq {
+ u8 _ncw_verb;
+ u8 __reserved;
+ /* select channel if verb != QUERYWQ_DEDICATED */
+ u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+ u8 __reserved2[60];
+} __packed;
+
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+union qm_mc_command {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 __reserved[63];
+ };
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_queryfq queryfq;
+ struct qm_mcc_alterfq alterfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_querycgr querycgr;
+ struct qm_mcc_querywq querywq;
+ struct qm_mcc_queryfq_np queryfq_np;
+};
+
+/* MC (Management Command) result */
+/* "Query FQ" */
+struct qm_mcr_queryfq {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+
+/* "Alter FQ State Commands" */
+struct qm_mcr_alterfq {
+ u8 verb;
+ u8 result;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+};
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MCR_TIMEOUT 10000 /* us */
+union qm_mc_result {
+ struct {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[62];
+ };
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ struct qm_mcr_queryfq_np queryfq_np;
+};
+
+struct qm_mc {
+ union qm_mc_command *cr;
+ union qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+struct qm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ /*
+ * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
+ * and including 'mc' fits within a cacheline (yay!). The 'config' part
+ * is setup-only, so isn't a cause for a concern. In other words, don't
+ * rearrange this structure on a whim, there be dragons ...
+ */
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 qm_in(struct qm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
+{
+ __raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ce + offset);
+}
+
+/* --- EQCR API --- */
+
+#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
+#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~EQCR_CARRY;
+
+ return (struct qm_eqcr_entry *)addr;
+}
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void eqcr_inc(struct qm_eqcr *eqcr)
+{
+ /* increment to the next EQCR pointer and handle overflow and 'vbit' */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = eqcr_carryclear(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+ return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (pi != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("losing uncommited EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available)
+ return NULL;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
+ (QM_EQCR_SIZE - 1);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
+{
+ DPAA_ASSERT(eqcr->busy);
+ DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
+ DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
+ DPAA_ASSERT(eqcr->available >= 1);
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+ eqcr_commit_checks(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+ dma_wmb();
+ eqcursor = eqcr->cursor;
+ eqcursor->_ncw_verb = myverb | eqcr->vbit;
+ dpaa_flush(eqcursor);
+ eqcr_inc(eqcr);
+ eqcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(portal, QM_REG_EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
+#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
+
+static const struct qm_dqrr_entry *dqrr_carryclear(
+ const struct qm_dqrr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~DQRR_CARRY;
+
+ return (const struct qm_dqrr_entry *)addr;
+}
+
+static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
+}
+
+static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
+{
+ return dqrr_carryclear(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(portal, QM_REG_DQRR_SDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_VDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dpaa_invalidate(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(portal, QM_REG_CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (dqrr->cmode != qm_dqrr_cdc &&
+ dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->fill);
+ dqrr->cursor = dqrr_inc(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#ifndef CONFIG_FSL_PAMU
+ /*
+ * If PAMU is not available we need to invalidate the cache.
+ * When PAMU is available the cache is updated by stash
+ */
+ dpaa_invalidate_touch_ro(res);
+#endif
+ /*
+ * when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+ int idx = dqrr_ptr2idx(dq);
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPAA_ASSERT((dqrr->ring + idx) == dq);
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+}
+
+/* --- MR API --- */
+
+#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
+#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
+
+static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~MR_CARRY;
+
+ return (union qm_mr_entry *)addr;
+}
+
+static inline int mr_ptr2idx(const union qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
+}
+
+static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
+{
+ return mr_carryclear(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.ce + QM_CL_MR;
+ mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
+ ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != mr_ptr2idx(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline int qm_mr_next(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->fill);
+ mr->cursor = mr_inc(mr->cursor);
+ return --mr->fill;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+ union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+ DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+ /*
+ * when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = mr_inc(res);
+ }
+ dpaa_invalidate_touch_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = mr_ptr2idx(mr->cursor);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + QM_CL_CR;
+ mc->rr = portal->addr.ce + QM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
+ ? 0 : 1;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+static inline int qm_mc_result_timeout(struct qm_portal *portal,
+ union qm_mc_result **mcr)
+{
+ int timeout = QM_MCR_TIMEOUT;
+
+ do {
+ *mcr = qm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ set_bits(mask, &fq->flags);
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ clear_bits(mask, &fq->flags);
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long bits;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ /* only 1 volatile dequeue at a time */
+ struct qman_fq *vdqcr_owned;
+ u32 sdqcr;
+ /* probing time config params for cpu-affine portals */
+ const struct qm_portal_config *config;
+ /* needed for providing a non-NULL device to dma_map_***() */
+ struct platform_device *pdev;
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ struct work_struct congestion_work;
+ struct work_struct mr_work;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+struct qman_portal *affine_portals[NR_CPUS];
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(qman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(qman_affine_portal);
+}
+
+static struct workqueue_struct *qm_portal_wq;
+
+int qman_wq_alloc(void)
+{
+ qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+ if (!qm_portal_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static struct qman_fq **fq_table;
+static u32 num_fqids;
+
+int qman_alloc_fq_table(u32 _num_fqids)
+{
+ num_fqids = _num_fqids;
+
+ fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
+ if (!fq_table)
+ return -ENOMEM;
+
+ pr_debug("Allocated fq lookup table at %p, entry count %u\n",
+ fq_table, num_fqids * 2);
+ return 0;
+}
+
+static struct qman_fq *idx_to_fq(u32 idx)
+{
+ struct qman_fq *fq;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (WARN_ON(idx >= num_fqids * 2))
+ return NULL;
+#endif
+ fq = fq_table[idx];
+ DPAA_ASSERT(!fq || idx == fq->idx);
+
+ return fq;
+}
+
+/*
+ * Only returns full-service fq objects, not enqueue-only
+ * references (QMAN_FQ_FLAG_NO_MODIFY).
+ */
+static struct qman_fq *fqid_to_fq(u32 fqid)
+{
+ return idx_to_fq(fqid * 2);
+}
+
+static struct qman_fq *tag_to_fq(u32 tag)
+{
+#if BITS_PER_LONG == 64
+ return idx_to_fq(tag);
+#else
+ return (struct qman_fq *)tag;
+#endif
+}
+
+static u32 fq_to_tag(struct qman_fq *fq)
+{
+#if BITS_PER_LONG == 64
+ return fq->idx;
+#else
+ return (u32)fq;
+#endif
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit);
+static void qm_congestion_task(struct work_struct *work);
+static void qm_mr_process_task(struct work_struct *work);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+
+ u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
+ u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, QMAN_POLL_LIMIT);
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is);
+ qm_out(&p->p, QM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const union qm_mr_entry *msg;
+loop:
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /*
+ * if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here.
+ */
+ u64 now, then = jiffies;
+
+ do {
+ now = jiffies;
+ } while ((then + 10000) > now);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+static int qman_create_portal(struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *p;
+ char buf[16];
+ int ret;
+ u32 isdr;
+
+ p = &portal->p;
+
+#ifdef CONFIG_FSL_PAMU
+ /* PAMU is required for stashing */
+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+#else
+ portal->use_eqcr_ci_stashing = 0;
+#endif
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+ dev_err(c->dev, "EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ dev_err(c->dev, "DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+ dev_err(c->dev, "MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
+ qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
+ qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ INIT_WORK(&portal->congestion_work, qm_congestion_task);
+ INIT_WORK(&portal->mr_work, qm_mr_process_task);
+ portal->bits = 0;
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ sprintf(buf, "qportal-%d", c->channel);
+ portal->pdev = platform_device_alloc(buf, -1);
+ if (!portal->pdev)
+ goto fail_devalloc;
+ if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
+ goto fail_devadd;
+ ret = platform_device_add(portal->pdev);
+ if (ret)
+ goto fail_devadd;
+ isdr = 0xffffffff;
+ qm_out(p, QM_REG_ISDR, isdr);
+ portal->irq_sources = 0;
+ qm_out(p, QM_REG_IER, 0);
+ qm_out(p, QM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+ if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+ irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+ dev_err(c->dev, "irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+
+ /* Need EQCR to be empty before continuing */
+ isdr &= ~QM_PIRQ_EQCI;
+ qm_out(p, QM_REG_ISDR, isdr);
+ ret = qm_eqcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_out(p, QM_REG_ISDR, isdr);
+ if (qm_dqrr_current(p)) {
+ dev_err(c->dev, "DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(p, 0xffff);
+ }
+ if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ const union qm_mr_entry *e = qm_mr_current(p);
+
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
+ e->verb, e->ern.rc, e->ern.fd.addr_lo);
+ goto fail_dqrr_mr_empty;
+ }
+ /* Success */
+ portal->config = c;
+ qm_out(p, QM_REG_ISDR, 0);
+ qm_out(p, QM_REG_IIR, 0);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(p, portal->sdqcr);
+ return 0;
+
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ platform_device_del(portal->pdev);
+fail_devadd:
+ platform_device_put(portal->pdev);
+fail_devalloc:
+ kfree(portal->cgrs);
+fail_cgrs:
+ qm_mc_finish(p);
+fail_mc:
+ qm_mr_finish(p);
+fail_mr:
+ qm_dqrr_finish(p);
+fail_dqrr:
+ qm_eqcr_finish(p);
+fail_eqcr:
+ return -EIO;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qman_portal *portal;
+ int err;
+
+ portal = &per_cpu(qman_affine_portal, c->cpu);
+ err = qman_create_portal(portal, c, cgrs);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ affine_channels[c->cpu] = c->channel;
+ affine_portals[c->cpu] = portal;
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /*
+ * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began.
+ */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->irq, qm);
+
+ kfree(qm->cgrs);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ platform_device_del(qm->pdev);
+ platform_device_put(qm->pdev);
+
+ qm->config = NULL;
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+ struct qman_portal *qm = get_affine_portal();
+ const struct qm_portal_config *pcfg;
+ int cpu;
+
+ pcfg = qm->config;
+ cpu = pcfg->cpu;
+
+ qman_destroy_portal(qm);
+
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ put_affine_portal();
+ return pcfg;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg, u8 verb)
+{
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPAA_ASSERT(fq->state == qman_fq_state_parked ||
+ fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPAA_ASSERT(fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+}
+
+static void qm_congestion_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ congestion_work);
+ struct qman_cgrs rr, c;
+ union qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+ spin_lock(&p->cgr_lock);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ spin_unlock(&p->cgr_lock);
+ dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ return;
+ }
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
+ &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock(&p->cgr_lock);
+}
+
+static void qm_mr_process_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ mr_work);
+ const union qm_mr_entry *msg;
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+
+ preempt_disable();
+
+ while (1) {
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ break;
+
+ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is clear */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = fqid_to_fq(msg->fq.fqid);
+ if (WARN_ON(!fq))
+ break;
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+ fq = tag_to_fq(msg->fq.contextB);
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ pr_crit_once("Leaking DCP ERNs!\n");
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+ fq = tag_to_fq(msg->ern.tag);
+ fq->cb.ern(p, fq, msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ }
+
+ qm_mr_cci_consume(&p->p, num);
+ preempt_enable();
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ if (is & QM_PIRQ_CSCI) {
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->congestion_work);
+ }
+
+ if (is & QM_PIRQ_EQRI) {
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->mr_work);
+ }
+
+ return is;
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /*
+ * VDQCR: don't trust contextB as the FQ may have
+ * been configured for h/w consumption and we're
+ * draining it post-retirement.
+ */
+ fq = p->vdqcr_owned;
+ /*
+ * We only set QMAN_FQ_STATE_NE when retiring, so we
+ * only need to check for clearing it when doing
+ * volatile dequeues. It's one less thing to check
+ * in the critical path (SDQCR).
+ */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /*
+ * This is duplicated from the SDQCR code, but we
+ * have stuff to do before *and* after this callback,
+ * and we don't want multiple if()s in the critical
+ * path (SDQCR).
+ */
+ res = fq->cb.dqrr(p, fq, dq);
+ if (res == qman_cb_dqrr_stop)
+ break;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: contextB points to the FQ */
+ fq = tag_to_fq(dq->contextB);
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq);
+ /*
+ * The callback can request that we exit without
+ * consuming this entry nor advancing;
+ */
+ if (res == qman_cb_dqrr_stop)
+ break;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+ return limit;
+}
+
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+ u32 ier;
+
+ /*
+ * Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+ local_irq_save(irqflags);
+ bits &= QM_PIRQ_VISIBLE;
+ clear_bits(bits, &p->irq_sources);
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ ier = qm_in(&p->p, QM_REG_IER);
+ /*
+ * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_out(&p->p, QM_REG_ISR, ~ier);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_affine_portal();
+
+ cpu = portal->config->cpu;
+ put_affine_portal();
+ }
+ WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+struct qman_portal *qman_get_affine_portal(int cpu)
+{
+ return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+ return __poll_portal_fast(p, limit);
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ pools &= p->config->pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ fq->fqid = fqid;
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+
+ /* A context_b of 0 is allegedly special, so don't use that fqid */
+ if (fqid == 0 || fqid >= num_fqids) {
+ WARN(1, "bad fqid %d\n", fqid);
+ return -EINVAL;
+ }
+
+ fq->idx = fqid * 2;
+ if (flags & QMAN_FQ_FLAG_NO_MODIFY)
+ fq->idx++;
+
+ WARN_ON(fq_table[fq->idx]);
+ fq_table[fq->idx] = fq;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq)
+{
+ /*
+ * We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks.
+ */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+
+ DPAA_ASSERT(fq_table[fq->idx]);
+ fq_table[fq->idx] = NULL;
+ return;
+ default:
+ break;
+ }
+ DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ u8 res, myverb;
+ int ret = 0;
+
+ myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
+ ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ mcc->initfq.fqid = fq->fqid;
+ mcc->initfq.count = 0;
+ /*
+ * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it.
+ */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+ mcc->initfq.fqd.context_b = fq_to_tag(fq);
+ /*
+ * and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings.
+ */
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+ phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
+ DMA_TO_DEVICE);
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ int wq = 0;
+
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ wq = 4;
+ }
+ qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
+ }
+ qm_mc_commit(&p->p, myverb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "MCR timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ if (opts) {
+ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (opts->we_mask & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state != qman_fq_state_parked) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "ALTER_SCHED timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_sched)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state == qman_fq_state_retired ||
+ fq->state == qman_fq_state_oos) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /*
+ * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change().
+ */
+ if (res == QM_MCR_RESULT_OK) {
+ ret = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /*
+ * Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI...
+ */
+ union qm_mr_entry msg;
+
+ msg.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ msg.fq.fqid = fq->fqid;
+ msg.fq.contextB = fq_to_tag(fq);
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ ret = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
+ fq->state != qman_fq_state_retired) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+
+static int qman_query_fq_np(struct qman_fq *fq,
+ struct qm_mcr_queryfq_np *np)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *np = mcr->queryfq_np;
+ else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+ ret = -ERANGE;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+
+static int qman_query_cgr(struct qman_cgr *cgr,
+ struct qm_mcr_querycgr *cgrd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->querycgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ else {
+ dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
+ mcr_result_str(mcr->result));
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
+{
+ struct qm_mcr_querycgr query_cgr;
+ int err;
+
+ err = qman_query_cgr(cgr, &query_cgr);
+ if (err)
+ return err;
+
+ *result = !!query_cgr.cgr.cs;
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr_congested);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+ unsigned long irqflags;
+ int ret = -EBUSY;
+
+ local_irq_save(irqflags);
+ if (p->vdqcr_owned)
+ goto out;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto out;
+
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ p->vdqcr_owned = fq;
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ ret = 0;
+out:
+ local_irq_restore(irqflags);
+ return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+ int ret;
+
+ *p = get_affine_portal();
+ ret = set_p_vdqcr(*p, fq, vdqcr);
+ put_affine_portal();
+ return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !set_vdqcr(p, fq, vdqcr));
+ else
+ wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
+ return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_retired)
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+ else
+ ret = set_vdqcr(&p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /*
+ * NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue.
+ */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags;
+ u8 avail;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq))
+ goto out;
+
+ eq->fqid = fq->fqid;
+ eq->tag = fq_to_tag(fq);
+ eq->fd = *fd;
+
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
+out:
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ if (mcr->result != QM_MCR_RESULT_OK)
+ ret = -EIO;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
+#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
+
+static u8 qman_cgr_cpus[CGR_NUM];
+
+void qman_init_cgr_all(void)
+{
+ struct qman_cgr cgr;
+ int err_cnt = 0;
+
+ for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
+ if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
+ err_cnt++;
+ }
+
+ if (err_cnt)
+ pr_err("Warning: %d error%s while initialising CGR h/w\n",
+ err_cnt, (err_cnt > 1) ? "s" : "");
+}
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts = {};
+ int ret;
+ struct qman_portal *p;
+
+ /*
+ * We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= CGR_NUM)
+ return -EINVAL;
+
+ preempt_disable();
+ p = get_affine_portal();
+ qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+ preempt_enable();
+
+ cgr->chan = p->config->channel;
+ spin_lock(&p->cgr_lock);
+
+ if (opts) {
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto out;
+ if (opts)
+ local_opts = *opts;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+ else
+ /* Overwrite TARG */
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_MASK(p);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto out;
+ }
+
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success */
+ dev_err(p->config->dev, "CGR HW state partially modified\n");
+ ret = 0;
+ goto out;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en &&
+ qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+out:
+ spin_unlock(&p->cgr_lock);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ unsigned long irqflags;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->channel) {
+ /* attempt to delete from other portal than creator */
+ dev_err(p->config->dev, "CGR not owned by current portal");
+ dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->channel);
+
+ ret = -EINVAL;
+ goto put_portal;
+ }
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+ * update CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if (i->cgrid == cgr->cgrid && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+ /* Overwrite TARG */
+ local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+ ~(TARG_MASK(p));
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+ struct qman_cgr *cgr;
+ struct completion completion;
+};
+
+static int qman_delete_cgr_thread(void *p)
+{
+ struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
+ int ret;
+
+ ret = qman_delete_cgr(cgr_comp->cgr);
+ complete(&cgr_comp->completion);
+
+ return ret;
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+ struct task_struct *thread;
+ struct cgr_comp cgr_comp;
+
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+ init_completion(&cgr_comp.completion);
+ cgr_comp.cgr = cgr;
+ thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
+ "cgr_del");
+
+ if (IS_ERR(thread))
+ goto out;
+
+ kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
+ wake_up_process(thread);
+ wait_for_completion(&cgr_comp.completion);
+ preempt_enable();
+ return;
+ }
+out:
+ qman_delete_cgr(cgr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+/* Cleanup FQs */
+
+static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
+{
+ const union qm_mr_entry *msg;
+ int found = 0;
+
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ while (msg) {
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
+ found = 1;
+ qm_mr_next(p);
+ qm_mr_cci_consume_to_current(p);
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ }
+ return found;
+}
+
+static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
+ bool wait)
+{
+ const struct qm_dqrr_entry *dqrr;
+ int found = 0;
+
+ do {
+ qm_dqrr_pvb_update(p);
+ dqrr = qm_dqrr_current(p);
+ if (!dqrr)
+ cpu_relax();
+ } while (wait && !dqrr);
+
+ while (dqrr) {
+ if (dqrr->fqid == fqid && (dqrr->stat & s))
+ found = 1;
+ qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
+ qm_dqrr_pvb_update(p);
+ qm_dqrr_next(p);
+ dqrr = qm_dqrr_current(p);
+ }
+ return found;
+}
+
+#define qm_mr_drain(p, V) \
+ _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
+
+#define qm_dqrr_drain(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
+
+#define qm_dqrr_drain_wait(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
+
+#define qm_dqrr_drain_nomatch(p) \
+ _qm_dqrr_consume_and_match(p, 0, 0, false)
+
+static int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p;
+ struct device *dev;
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ int orl_empty, drain = 0, ret = 0;
+ u32 channel, wq, res;
+ u8 state;
+
+ p = get_affine_portal();
+ dev = p->config->dev;
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq_np.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ goto out; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ /* Need to store these since the MCR gets reused */
+ channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
+ wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (res == QM_MCR_RESULT_PENDING) {
+ /*
+ * Need to wait for the FQRN in the message ring, which
+ * will only occur once the FQ has been drained. In
+ * order for the FQ to drain the portal needs to be set
+ * to dequeue from the channel the FQ is scheduled on
+ */
+ int found_fqrn = 0;
+ u16 dequeue_wq = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < qm_channel_pool1 + 15) {
+ /* Pool channel, enable the bit in the portal */
+ dequeue_wq = (channel -
+ qm_channel_pool1 + 1)<<4 | wq;
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ dequeue_wq = wq;
+ } else {
+ dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
+ fqid, channel);
+ ret = -EBUSY;
+ goto out;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ qm_dqrr_sdqcr_set(&p->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ qm_dqrr_sdqcr_set(&p->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ do {
+ /* Keep draining DQRR while checking the MR*/
+ qm_dqrr_drain_nomatch(&p->p);
+ /* Process message ring too */
+ found_fqrn = qm_mr_drain(&p->p, FQRN);
+ cpu_relax();
+ } while (!found_fqrn);
+
+ }
+ if (res != QM_MCR_RESULT_OK &&
+ res != QM_MCR_RESULT_PENDING) {
+ dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
+ fqid, res);
+ ret = -EIO;
+ goto out;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /*
+ * ORL had no entries, no need to wait until the
+ * ERNs come in
+ */
+ orl_empty = 1;
+ }
+ /*
+ * Retirement succeeded, check to see if FQ needs
+ * to be drained
+ */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ do {
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ /*
+ * Wait for a dequeue and process the dequeues,
+ * making sure to empty the ring completely
+ */
+ } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+ }
+ qm_dqrr_sdqcr_set(&p->p, 0);
+
+ while (!orl_empty) {
+ /* Wait for the ORL to have been completely drained */
+ orl_empty = qm_mr_drain(&p->p, FQRL);
+ cpu_relax();
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result) {
+ dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_OOS:
+ /* Done */
+ break;
+
+ default:
+ ret = -EIO;
+ }
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal)
+{
+ return portal->config;
+}
+
+struct gen_pool *qm_fqalloc; /* FQID allocator */
+struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+
+static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
+{
+ unsigned long addr;
+
+ addr = gen_pool_alloc(p, cnt);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_fqalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+int qman_alloc_pool_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_qpalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+int qman_alloc_cgrid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_cgralloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+int qman_release_fqid(u32 fqid)
+{
+ int ret = qman_shutdown_fq(fqid);
+
+ if (ret) {
+ pr_debug("FQID %d leaked\n", fqid);
+ return ret;
+ }
+
+ gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_fqid);
+
+static int qpool_cleanup(u32 qp)
+{
+ /*
+ * We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose destination channel is the pool-channel being released.
+ * When a non-OOS FQD is found we attempt to clean it up
+ */
+ struct qman_fq fq = {
+ .fqid = QM_FQID_RANGE_START
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return 0;
+ if (qm_fqd_get_chan(&fqd) == qp) {
+ /* The channel is the FQ's target, clean it */
+ err = qman_shutdown_fq(fq.fqid);
+ if (err)
+ /*
+ * Couldn't shut down the FQ
+ * so the pool must be leaked
+ */
+ return err;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_pool(u32 qp)
+{
+ int ret;
+
+ ret = qpool_cleanup(qp);
+ if (ret) {
+ pr_debug("CHID %d leaked\n", qp);
+ return ret;
+ }
+
+ gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_pool);
+
+static int cgr_cleanup(u32 cgrid)
+{
+ /*
+ * query all FQDs starting from FQID 1 until we get an "invalid FQID"
+ * error, looking for non-OOS FQDs whose CGR is the CGR being released
+ */
+ struct qman_fq fq = {
+ .fqid = 1
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return 0;
+ if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+ fqd.cgid == cgrid) {
+ pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
+ cgrid, fq.fqid);
+ return -EIO;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_cgrid(u32 cgrid)
+{
+ int ret;
+
+ ret = cgr_cleanup(cgrid);
+ if (ret) {
+ pr_debug("CGRID %d leaked\n", cgrid);
+ return ret;
+ }
+
+ gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644
index 000000000000..0cace9e0077e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -0,0 +1,808 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
+#define REG_DD_CFG 0x0200
+#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC 0x0400
+#define REG_PFDR_FP_HEAD 0x0404
+#define REG_PFDR_FP_TAIL 0x0408
+#define REG_PFDR_FP_LWIT 0x0410
+#define REG_PFDR_CFG 0x0414
+#define REG_SFDR_CFG 0x0500
+#define REG_SFDR_IN_USE 0x0504
+#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID 0x0630
+#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_ECIR2 0x0a0c
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_MCR 0x0b00
+#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG 0x0be0
+#define REG_HID_CFG 0x0bf0
+#define REG_IDLE_STAT 0x0bf4
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FQD_BARE 0x0c00
+#define REG_PFDR_BARE 0x0c20
+#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE 0x0c80
+#define REG_QCSP_BAR 0x0c84
+#define REG_CI_SCHED_CFG 0x0d00
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_CI_RLM_AVG 0x0d14
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR 0x01000000
+#define MCR_get_rslt(v) (u8)((v) >> 24)
+#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0))
+#define MCR_rslt_ok(r) ((r) == 0xf0)
+#define MCR_rslt_eaccess(r) ((r) == 0xf8)
+#define MCR_rslt_inval(r) ((r) == 0xff)
+
+/*
+ * Corenet initiator settings. Stash request queues are 4-deep to match cores
+ * ability to snarf. Stash priority is 3, other priorities are 2.
+ */
+#define QM_CI_SCHED_CFG_SRCCIV 4
+#define QM_CI_SCHED_CFG_SRQ_W 3
+#define QM_CI_SCHED_CFG_RW_W 2
+#define QM_CI_SCHED_CFG_BMAN_W 2
+/* write SRCCIV enable */
+#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31)
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+ qm_wq_portal = 0,
+ qm_wq_pool = 1,
+ qm_wq_fman0 = 2,
+ qm_wq_fman1 = 3,
+ qm_wq_caam = 4,
+ qm_wq_pme = 5,
+ qm_wq_first = qm_wq_portal,
+ qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+ qm_memory_fqd,
+ qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
+#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
+#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
+#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
+#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
+#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IFSI)
+
+struct qm_ecir {
+ u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
+};
+
+static bool qm_ecir_is_dcp(const struct qm_ecir *p)
+{
+ return p->info & BIT(29);
+}
+
+static int qm_ecir_get_pnum(const struct qm_ecir *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_ecir_get_fqid(const struct qm_ecir *p)
+{
+ return p->info & (BIT(24) - 1);
+}
+
+struct qm_ecir2 {
+ u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
+};
+
+static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
+{
+ return p->info & BIT(31);
+}
+
+static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
+{
+ return p->info & (BIT(10) - 1);
+}
+
+struct qm_eadr {
+ u32 info; /* memid[24-27], eadr[0-11] */
+ /* v3: memid[24-28], eadr[0-15] */
+};
+
+static int qm_eadr_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0xf;
+}
+
+static int qm_eadr_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(12) - 1);
+}
+
+static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(16) - 1);
+}
+
+struct qman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+ { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
+ { QM_EIRQ_CTDE, "Corenet Target Data Error" },
+ { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
+ { QM_EIRQ_PLWI, "PFDR Low Watermark" },
+ { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { QM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
+ { QM_EIRQ_ICVI, "Invalid Command Verb" },
+ { QM_EIRQ_IFSI, "Invalid Flow Control State" },
+ { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
+ { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
+ { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
+ { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
+ { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
+ { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
+ { QM_EIRQ_IESI, "Invalid Enqueue State" },
+ { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
+ { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
+};
+
+struct qman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+static const struct qman_error_info_mdata error_mdata[] = {
+ { 0x01FF, 24, "FQD cache tag memory 0" },
+ { 0x01FF, 24, "FQD cache tag memory 1" },
+ { 0x01FF, 24, "FQD cache tag memory 2" },
+ { 0x01FF, 24, "FQD cache tag memory 3" },
+ { 0x0FFF, 512, "FQD cache memory" },
+ { 0x07FF, 128, "SFDR memory" },
+ { 0x01FF, 72, "WQ context memory" },
+ { 0x00FF, 240, "CGR memory" },
+ { 0x00FF, 302, "Internal Order Restoration List memory" },
+ { 0x01FF, 256, "SW portal ring memory" },
+};
+
+#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Pointer to the start of the QMan's CCSR space */
+static u32 __iomem *qm_ccsr_start;
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 qm_pools_sdqcr;
+
+static inline u32 qm_ccsr_in(u32 offset)
+{
+ return ioread32be(qm_ccsr_start + offset/4);
+}
+
+static inline void qm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, qm_ccsr_start + offset/4);
+}
+
+u32 qm_get_pools_sdqcr(void)
+{
+ return qm_pools_sdqcr;
+}
+
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1
+};
+
+static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
+{
+ DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
+ portal == qm_dc_portal_fman1);
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+ else
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
+ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
+ u8 csw5, u8 csw6, u8 csw7)
+{
+ qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+ ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(void)
+{
+ qm_ccsr_out(REG_HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(void)
+{
+ qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
+ (QM_CI_SCHED_CFG_SRCCIV << 24) |
+ (QM_CI_SCHED_CFG_SRQ_W << 8) |
+ (QM_CI_SCHED_CFG_RW_W << 4) |
+ QM_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = qm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+#define PFDR_AR_EN BIT(31)
+static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+{
+ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+ u32 exp = ilog2(size);
+
+ /* choke if size isn't within range */
+ DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
+ is_power_of_2(size));
+ /* choke if 'ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+ qm_ccsr_out(offset, upper_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+}
+
+static void qm_set_pfdr_threshold(u32 th, u8 k)
+{
+ qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
+ qm_ccsr_out(REG_PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(u16 th)
+{
+ qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
+{
+ u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+
+ DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+ /* Make sure the command interface is 'idle' */
+ if (!MCR_rslt_idle(rslt)) {
+ dev_crit(dev, "QMAN_MCR isn't idle");
+ WARN_ON(1);
+ }
+
+ /* Write the MCR command params then the verb */
+ qm_ccsr_out(REG_MCP(0), pfdr_start);
+ /*
+ * TODO: remove this - it's a workaround for a model bug that is
+ * corrected in more recent versions. We use the workaround until
+ * everyone has upgraded.
+ */
+ qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
+ dma_wmb();
+ qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
+ /* Poll for the result */
+ do {
+ rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+ } while (!MCR_rslt_idle(rslt));
+ if (MCR_rslt_ok(rslt))
+ return 0;
+ if (MCR_rslt_eaccess(rslt))
+ return -EACCES;
+ if (MCR_rslt_inval(rslt))
+ return -EINVAL;
+ dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+ return -ENODEV;
+}
+
+/*
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved). Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+ fqd_a = rmem->base;
+ fqd_sz = rmem->size;
+
+ WARN_ON(!(fqd_a && fqd_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+ pfdr_a = rmem->base;
+ pfdr_sz = rmem->size;
+
+ WARN_ON(!(pfdr_a && pfdr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+
+static unsigned int qm_get_fqid_maxcnt(void)
+{
+ return fqd_sz / 64;
+}
+
+/*
+ * Flush this memory range from data cache so that QMAN originated
+ * transactions for this memory region could be marked non-coherent.
+ */
+static int zero_priv_mem(struct device *dev, struct device_node *node,
+ phys_addr_t addr, size_t sz)
+{
+ /* map as cacheable, non-guarded */
+ void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+
+ memset_io(tmpp, 0, sz);
+ flush_dcache_range((unsigned long)tmpp,
+ (unsigned long)tmpp + sz);
+ iounmap(tmpp);
+
+ return 0;
+}
+
+static void log_edata_bits(struct device *dev, u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ dev_warn(dev, "ErrInt, EDATA:\n");
+ i = bit_count / 32;
+ if (bit_count % 32) {
+ i++;
+ mask = ~(mask << bit_count % 32);
+ }
+ j = 16 - i;
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
+}
+
+static void log_additional_error_info(struct device *dev, u32 isr_val,
+ u32 ecsr_val)
+{
+ struct qm_ecir ecir_val;
+ struct qm_eadr eadr_val;
+ int memid;
+
+ ecir_val.info = qm_ccsr_in(REG_ECIR);
+ /* Is portal info valid */
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ struct qm_ecir2 ecir2_val;
+
+ ecir2_val.info = qm_ccsr_in(REG_ECIR2);
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
+ qm_ecir2_get_pnum(&ecir2_val));
+ }
+ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_v3_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_v3_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ } else {
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
+ qm_ecir_get_pnum(&ecir_val));
+ }
+ if (ecsr_val & FQID_ECSR_ERR)
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ }
+}
+
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = qm_ccsr_in(REG_ERR_IER);
+ isr_val = qm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = qm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
+ if (qman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ qman_hwerr_txts[i].txt);
+ if (qman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(dev, isr_mask,
+ ecsr_val);
+ /* Re-arm error capture registers */
+ qm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ qman_hwerr_txts[i].mask);
+ ier_val &= ~qman_hwerr_txts[i].mask;
+ qm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ qm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int qman_init_ccsr(struct device *dev)
+{
+ int i, err;
+
+ /* FQD memory */
+ qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+ /* PFDR memory */
+ qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+ err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+ if (err)
+ return err;
+ /* thresholds */
+ qm_set_pfdr_threshold(512, 64);
+ qm_set_sfdr_threshold(128);
+ /* clear stale PEBI bit from interrupt status register */
+ qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
+ /* corenet initiator settings */
+ qm_set_corenet_initiator();
+ /* HID settings */
+ qm_set_hid();
+ /* Set scheduling weights to defaults */
+ for (i = qm_wq_first; i <= qm_wq_last; i++)
+ qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
+ /* We are not prepared to accept ERNs for hardware enqueues */
+ qm_set_dc(qm_dc_portal_fman0, 1, 0);
+ qm_set_dc(qm_dc_portal_fman1, 1, 0);
+ return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+ static int done;
+ static u32 liodn_offset;
+ u32 before, after;
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
+ else
+ before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
+ if (!done) {
+ liodn_offset = before & LIO_CFG_LIODN_MASK;
+ done = 1;
+ return;
+ }
+ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+ else
+ qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+void qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+ u32 before, after;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
+ /* Each pair of vcpu share the same SRQ(SDEST) */
+ cpu_idx /= 2;
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
+ } else {
+ before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
+ }
+}
+
+static int qman_resource_init(struct device *dev)
+{
+ int pool_chan_num, cgrid_num;
+ int ret, i;
+
+ switch (qman_ip_rev >> 8) {
+ case 1:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ case 2:
+ pool_chan_num = 3;
+ cgrid_num = 64;
+ break;
+ case 3:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
+ pool_chan_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
+ return ret;
+ }
+
+ ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
+ return ret;
+ }
+
+ /* parse pool channels into the SDQCR mask */
+ for (i = 0; i < cgrid_num; i++)
+ qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
+
+ ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
+ qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_qman_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ int ret, err_irq;
+ u16 id;
+ u8 major, minor;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+ if (!qm_ccsr_start)
+ return -ENXIO;
+
+ qm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
+ return -ENODEV;
+ } else if (major == 1 && minor == 1)
+ qman_ip_rev = QMAN_REV11;
+ else if (major == 1 && minor == 2)
+ qman_ip_rev = QMAN_REV12;
+ else if (major == 2 && minor == 0)
+ qman_ip_rev = QMAN_REV20;
+ else if (major == 3 && minor == 0)
+ qman_ip_rev = QMAN_REV30;
+ else if (major == 3 && minor == 1)
+ qman_ip_rev = QMAN_REV31;
+ else {
+ dev_err(dev, "Unknown QMan version\n");
+ return -ENODEV;
+ }
+
+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30)
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+
+ ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
+ WARN_ON(ret);
+ if (ret)
+ return -ENODEV;
+
+ ret = qman_init_ccsr(dev);
+ if (ret) {
+ dev_err(dev, "CCSR setup failed\n");
+ return ret;
+ }
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %s property 'interrupts'\n",
+ node->full_name);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+ ret, node->full_name);
+ return ret;
+ }
+
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ qm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
+ if (IS_ERR(qm_fqalloc)) {
+ ret = PTR_ERR(qm_fqalloc);
+ dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
+ if (IS_ERR(qm_qpalloc)) {
+ ret = PTR_ERR(qm_qpalloc);
+ dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
+ if (IS_ERR(qm_cgralloc)) {
+ ret = PTR_ERR(qm_cgralloc);
+ dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qman_resource_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
+ if (ret)
+ return ret;
+
+ ret = qman_wq_alloc();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id fsl_qman_ids[] = {
+ {
+ .compatible = "fsl,qman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_qman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_qman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_qman_probe,
+};
+
+builtin_platform_driver(fsl_qman_driver);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 000000000000..148614388fca
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,355 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW 1
+#define CONFIG_FSL_DPA_PIRQ_FAST 1
+
+static struct cpumask portal_cpus;
+/* protect qman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(qman_lock);
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+ struct device *dev = pcfg->dev;
+ int window_count = 1;
+ struct iommu_domain_geometry geom_attr;
+ struct pamu_stash_attribute stash_attr;
+ int ret;
+
+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+ if (!pcfg->iommu_domain) {
+ dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+ goto no_iommu;
+ }
+ geom_attr.aperture_start = 0;
+ geom_attr.aperture_end =
+ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+ geom_attr.force_aperture = true;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+ &geom_attr);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+ &window_count);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH,
+ &stash_attr);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+ __func__, ret);
+ goto out_domain_free;
+ }
+ ret = iommu_attach_device(pcfg->iommu_domain, dev);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
+ &window_count);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_detach_device;
+ }
+
+no_iommu:
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+
+ return;
+
+#ifdef CONFIG_FSL_PAMU
+out_detach_device:
+ iommu_detach_device(pcfg->iommu_domain, NULL);
+out_domain_free:
+ iommu_domain_free(pcfg->iommu_domain);
+ pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+ struct qman_portal *p;
+ u32 irq_sources = 0;
+
+ /* We need the same LIODN offset for all portals */
+ qman_liodn_fixup(pcfg->channel);
+
+ pcfg->iommu_domain = NULL;
+ portal_set_cpu(pcfg, pcfg->cpu);
+
+ p = qman_create_affine_portal(pcfg, NULL);
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ /* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+ QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+ irq_sources |= QM_PIRQ_DQRI;
+#endif
+ qman_p_irqsource_add(p, irq_sources);
+
+ spin_lock(&qman_lock);
+ if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
+ /* all assigned portals are initialized now */
+ qman_init_cgr_all();
+ }
+ spin_unlock(&qman_lock);
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+ unsigned int cpu)
+{
+#ifdef CONFIG_FSL_PAMU /* TODO */
+ struct pamu_stash_attribute stash_attr;
+ int ret;
+
+ if (pcfg->iommu_domain) {
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+ if (ret < 0) {
+ dev_err(pcfg->dev,
+ "Failed to update pamu stash setting\n");
+ return;
+ }
+ }
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+}
+
+static void qman_offline_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->irq, cpumask_of(0));
+ qman_portal_update_sdest(pcfg, 0);
+ }
+ }
+}
+
+static void qman_online_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ qman_portal_update_sdest(pcfg, cpu);
+ }
+ }
+}
+
+static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ qman_online_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ qman_offline_cpu(cpu);
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block qman_hotplug_cpu_notifier = {
+ .notifier_call = qman_hotplug_cpu_callback,
+};
+
+static int qman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct qm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ const u32 *channel;
+ void __iomem *va;
+ int irq, len, cpu;
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg)
+ return -ENOMEM;
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %s property 'reg::CE'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %s property 'reg::CI'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ channel = of_get_property(node, "cell-index", &len);
+ if (!channel || (len != 4)) {
+ dev_err(dev, "Can't get %s property 'cell-index'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ pcfg->channel = *channel;
+ pcfg->cpu = -1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %s IRQ\n", node->full_name);
+ return -ENXIO;
+ }
+ pcfg->irq = irq;
+
+ va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+ if (!va)
+ goto err_ioremap1;
+
+ pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+ va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+ _PAGE_GUARDED | _PAGE_NO_CACHE);
+ if (!va)
+ goto err_ioremap2;
+
+ pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+ pcfg->pools = qm_get_pools_sdqcr();
+
+ spin_lock(&qman_lock);
+ cpu = cpumask_next_zero(-1, &portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ /* unassigned portal, skip init */
+ spin_unlock(&qman_lock);
+ return 0;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&qman_lock);
+ pcfg->cpu = cpu;
+
+ if (!init_pcfg(pcfg))
+ goto err_ioremap2;
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ qman_offline_cpu(cpu);
+
+ return 0;
+
+err_ioremap2:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+ dev_err(dev, "ioremap failed\n");
+ return -ENXIO;
+}
+
+static const struct of_device_id qman_portal_ids[] = {
+ {
+ .compatible = "fsl,qman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = qman_portal_ids,
+ },
+ .probe = qman_portal_probe,
+};
+
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
+
+ return 0;
+}
+
+module_driver(qman_portal_driver,
+ qman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 000000000000..5cf821e623a9
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,371 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
+#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+ u8 verb;
+ u8 result;
+ u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+ return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+ u8 verb;
+ u8 result;
+ u8 __reserved[30];
+ /* Access this struct using qman_cgrs_get() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+ u8 verb;
+ u8 result;
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[6];
+ u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved3[3];
+ u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+}
+
+/* "Query FQ Non-Programmable Fields" */
+struct qm_mcc_queryfq_np {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+
+struct qm_mcr_queryfq_np {
+ u8 verb;
+ u8 result;
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+ u32 fqd_link; /* 24-bit, _res2[24-31] */
+ u16 odp_seq; /* 14-bit, _res3[14-15] */
+ u16 orp_nesn; /* 14-bit, _res4[14-15] */
+ u16 orp_ea_hseq; /* 15-bit, _res5[15] */
+ u16 orp_ea_tseq; /* 15-bit, _res6[15] */
+ u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
+ u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
+ u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
+ u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
+ u8 __reserved2[5];
+ u8 is; /* 1-bit, _res12[1-7] */
+ u16 ics_surp;
+ u32 byte_cnt;
+ u32 frm_cnt; /* 24-bit, _res13[24-31] */
+ u32 __reserved3;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved4;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+} __packed;
+
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+
+enum qm_mcr_queryfq_np_masks {
+ qm_mcr_fqd_link_mask = BIT(24)-1,
+ qm_mcr_odp_seq_mask = BIT(14)-1,
+ qm_mcr_orp_nesn_mask = BIT(14)-1,
+ qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
+ qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
+ qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
+ qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
+ qm_mcr_pfdr_hptr_mask = BIT(24)-1,
+ qm_mcr_pfdr_tptr_mask = BIT(24)-1,
+ qm_mcr_is_mask = BIT(1)-1,
+ qm_mcr_frm_cnt_mask = BIT(24)-1,
+};
+#define qm_mcr_np_get(np, field) \
+ ((np)->field & (qm_mcr_##field##_mask))
+
+/* Congestion Groups */
+
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
+{
+ return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ *dest = *src;
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ & *_b++;
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ ^ *_b++;
+}
+
+void qman_init_cgr_all(void);
+
+struct qm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ struct device *dev;
+ struct iommu_domain *iommu_domain;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /*
+ * the portal's dedicated channel id, used initialising
+ * frame queues to target this portal when scheduled
+ */
+ u16 channel;
+ /*
+ * mask of pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+ */
+ u32 pools;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
+
+int qman_wq_alloc(void);
+void qman_liodn_fixup(u16 channel);
+void qman_set_sdest(u16 channel, unsigned int cpu_idx);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/*
+ * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
+
+/*
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
+ * for the QMAN_FQ_STATE_VDQCR bit to disappear.
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+int qman_alloc_fq_table(u32 num_fqids);
+
+/* QMan s/w corenet portal, low-level i/face */
+
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+extern struct qman_portal *affine_portals[NR_CPUS];
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644
index 000000000000..18f7f0202fa7
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.c
@@ -0,0 +1,62 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("QMan testing");
+
+static int test_init(void)
+{
+ int loop = 1;
+ int err = 0;
+
+ while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH
+ err = qman_test_stash();
+ if (err)
+ break;
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_API
+ err = qman_test_api();
+ if (err)
+ break;
+#endif
+ }
+ return err;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644
index 000000000000..d5f8cb2260dc
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.h
@@ -0,0 +1,36 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+int qman_test_stash(void);
+int qman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644
index 000000000000..6880ff17f45e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -0,0 +1,252 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#define CGR_ID 27
+#define POOL_ID 2
+#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES 10
+#define NUM_PARTIAL 4
+#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
+ QM_SDQCR_TYPE_PRIO_QOS | \
+ QM_SDQCR_TOKEN_SET(0x98) | \
+ QM_SDQCR_CHANNELS_DEDICATED | \
+ QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
+#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+ struct qman_fq *,
+ const struct qm_dqrr_entry *);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+ .cb.dqrr = cb_dqrr,
+ .cb.ern = cb_ern,
+ .cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *fd)
+{
+ qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
+ qm_fd_set_contig_big(fd, 0x0000ffff);
+ fd->cmd = 0xfeedf00d;
+}
+
+static void fd_inc(struct qm_fd *fd)
+{
+ u64 t = qm_fd_addr_get64(fd);
+ int z = t >> 40;
+ unsigned int len, off;
+ enum qm_fd_format fmt;
+
+ t <<= 1;
+ if (z)
+ t |= 1;
+ qm_fd_addr_set64(fd, t);
+
+ fmt = qm_fd_get_format(fd);
+ off = qm_fd_get_offset(fd);
+ len = qm_fd_get_length(fd);
+ len--;
+ qm_fd_set_param(fd, fmt, off, len);
+
+ fd->cmd++;
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+{
+ int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+
+ if (!r) {
+ enum qm_fd_format fmt_a, fmt_b;
+
+ fmt_a = qm_fd_get_format(a);
+ fmt_b = qm_fd_get_format(b);
+ r = fmt_a - fmt_b;
+ }
+ if (!r)
+ r = a->cfg - b->cfg;
+ if (!r)
+ r = a->cmd - b->cmd;
+ return r;
+}
+
+/* test */
+static int do_enqueues(struct qman_fq *fq)
+{
+ unsigned int loop;
+ int err = 0;
+
+ for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+ if (qman_enqueue(fq, &fd)) {
+ pr_crit("qman_enqueue() failed\n");
+ err = -EIO;
+ }
+ fd_inc(&fd);
+ }
+
+ return err;
+}
+
+int qman_test_api(void)
+{
+ unsigned int flags, frmcnt;
+ int err;
+ struct qman_fq *fq = &fq_base;
+
+ pr_info("%s(): Starting\n", __func__);
+ fd_init(&fd);
+ fd_init(&fd_dq);
+
+ /* Initialise (parked) FQ */
+ err = qman_create_fq(0, FQ_FLAGS, fq);
+ if (err) {
+ pr_crit("qman_create_fq() failed\n");
+ goto failed;
+ }
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err) {
+ pr_crit("qman_init_fq() failed\n");
+ goto failed;
+ }
+ /* Do enqueues + VDQCR, twice. (Parked FQ) */
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (till-empty);\n");
+ frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+ NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_err("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("scheduled dequeue (till-empty)\n");
+ err = qman_schedule_fq(fq);
+ if (err) {
+ pr_crit("qman_schedule_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, sdqcr_complete);
+
+ /* Retire and OOS the FQ */
+ err = qman_retire_fq(fq, &flags);
+ if (err < 0) {
+ pr_crit("qman_retire_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, retire_complete);
+ if (flags & QMAN_FQ_STATE_BLOCKOOS) {
+ err = -EIO;
+ pr_crit("leaking frames\n");
+ goto failed;
+ }
+ err = qman_oos_fq(fq);
+ if (err) {
+ pr_crit("qman_oos_fq() failed\n");
+ goto failed;
+ }
+ qman_destroy_fq(fq);
+ pr_info("%s(): Finished\n", __func__);
+ return 0;
+
+failed:
+ WARN_ON(1);
+ return err;
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) {
+ pr_err("BADNESS: dequeued frame doesn't match;\n");
+ return qman_cb_dqrr_consume;
+ }
+ fd_inc(&fd_dq);
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+ sdqcr_complete = 1;
+ wake_up(&waitqueue);
+ }
+ return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ pr_crit("cb_ern() unimplemented");
+ WARN_ON(1);
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+
+ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
+ pr_crit("unexpected FQS message");
+ WARN_ON(1);
+ return;
+ }
+ pr_info("Retirement message received\n");
+ retire_complete = 1;
+ wake_up(&waitqueue);
+}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644
index 000000000000..43cf66ba42f5
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -0,0 +1,617 @@
+/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+/*
+ * Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ * handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ * and advance the iterator for the next loop. This includes a final fixup,
+ * which connects the last handler to the first (and which is why phase 2
+ * and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * initialise FQ objects and advance the iterator for the next loop.
+ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ * initialisation targets the correct cpu.
+ */
+
+/*
+ * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive).
+ */
+struct bstrap {
+ int (*fn)(void);
+ atomic_t started;
+};
+static int bstrap_fn(void *bs)
+{
+ struct bstrap *bstrap = bs;
+ int err;
+
+ atomic_inc(&bstrap->started);
+ err = bstrap->fn();
+ if (err)
+ return err;
+ while (!kthread_should_stop())
+ msleep(20);
+ return 0;
+}
+static int on_all_cpus(int (*fn)(void))
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct bstrap bstrap = {
+ .fn = fn,
+ .started = ATOMIC_INIT(0)
+ };
+ struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+ "hotpotato%d", cpu);
+ int ret;
+
+ if (IS_ERR(k))
+ return -ENOMEM;
+ kthread_bind(k, cpu);
+ wake_up_process(k);
+ /*
+ * If we call kthread_stop() before the "wake up" has had an
+ * effect, then the thread may exit with -EINTR without ever
+ * running the function. So poll until it's started before
+ * requesting it to stop.
+ */
+ while (!atomic_read(&bstrap.started))
+ msleep(20);
+ ret = kthread_stop(k);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+struct hp_handler {
+
+ /* The following data is stashed when 'rx' is dequeued; */
+ /* -------------- */
+ /* The Rx FQ, dequeues of which will stash the entire hp_handler */
+ struct qman_fq rx;
+ /* The Tx FQ we should forward to */
+ struct qman_fq tx;
+ /* The value we XOR post-dequeue, prior to validating */
+ u32 rx_mixer;
+ /* The value we XOR pre-enqueue, after validating */
+ u32 tx_mixer;
+ /* what the hotpotato address should be on dequeue */
+ dma_addr_t addr;
+ u32 *frame_ptr;
+
+ /* The following data isn't (necessarily) stashed on dequeue; */
+ /* -------------- */
+ u32 fqid_rx, fqid_tx;
+ /* list node for linking us into 'hp_cpu' */
+ struct list_head node;
+ /* Just to check ... */
+ unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+ /* identify the cpu we run on; */
+ unsigned int processor_id;
+ /* root node for the per-cpu list of handlers */
+ struct list_head handlers;
+ /* list node for linking us into 'hp_cpu_list' */
+ struct list_head node;
+ /*
+ * when repeatedly scanning 'hp_list', each time linking the n'th
+ * handlers together, this is used as per-cpu iterator state
+ */
+ struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+static void *__frame_ptr;
+static u32 *frame_ptr;
+static dma_addr_t frame_dma;
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU 2
+#define HP_LOOPS 8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS 80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD 0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static int allocate_frame_data(void)
+{
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+ struct platform_device *pdev = platform_device_alloc("foobar", -1);
+
+ if (!pdev) {
+ pr_crit("platform_device_alloc() failed");
+ return -EIO;
+ }
+ if (platform_device_add(pdev)) {
+ pr_crit("platform_device_add() failed");
+ return -EIO;
+ }
+ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+ if (!__frame_ptr)
+ return -ENOMEM;
+
+ frame_ptr = PTR_ALIGN(__frame_ptr, 64);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+ frame_ptr[loop] = lfsr;
+ lfsr = do_lfsr(lfsr);
+ }
+ frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ platform_device_del(pdev);
+ platform_device_put(pdev);
+ return 0;
+}
+
+static void deallocate_frame_data(void)
+{
+ kfree(__frame_ptr);
+}
+
+static inline int process_frame_data(struct hp_handler *handler,
+ const struct qm_fd *fd)
+{
+ u32 *p = handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+
+ if (qm_fd_addr_get64(fd) != handler->addr) {
+ pr_crit("bad frame address");
+ return -EIO;
+ }
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ *p ^= handler->rx_mixer;
+ if (*p != lfsr) {
+ pr_crit("corrupt frame data");
+ return -EIO;
+ }
+ *p ^= handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ return 0;
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ if (process_frame_data(handler, &dqrr->fd)) {
+ WARN_ON(1);
+ goto skip;
+ }
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ process_frame_data(handler, &dqrr->fd);
+ if (++loop_counter < HP_LOOPS) {
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ goto skip;
+ }
+ } else {
+ pr_info("Received final (%dth) frame\n", loop_counter);
+ wake_up(&queue);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static int create_per_cpu_handlers(void)
+{
+ struct hp_handler *handler;
+ int loop;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ hp_cpu->processor_id = smp_processor_id();
+ spin_lock(&hp_lock);
+ list_add_tail(&hp_cpu->node, &hp_cpu_list);
+ hp_cpu_list_length++;
+ spin_unlock(&hp_lock);
+ INIT_LIST_HEAD(&hp_cpu->handlers);
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+ if (!handler) {
+ pr_crit("kmem_cache_alloc() failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ handler->processor_id = hp_cpu->processor_id;
+ handler->addr = frame_dma;
+ handler->frame_ptr = frame_ptr;
+ list_add_tail(&handler->node, &hp_cpu->handlers);
+ }
+ return 0;
+}
+
+static int destroy_per_cpu_handlers(void)
+{
+ struct list_head *loop, *tmp;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ spin_lock(&hp_lock);
+ list_del(&hp_cpu->node);
+ spin_unlock(&hp_lock);
+ list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+ u32 flags = 0;
+ struct hp_handler *handler = list_entry(loop, struct hp_handler,
+ node);
+ if (qman_retire_fq(&handler->rx, &flags) ||
+ (flags & QMAN_FQ_STATE_BLOCKOOS)) {
+ pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
+ WARN_ON(1);
+ return -EIO;
+ }
+ if (qman_oos_fq(&handler->rx)) {
+ pr_crit("qman_oos_fq(rx) failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ qman_destroy_fq(&handler->rx);
+ qman_destroy_fq(&handler->tx);
+ qman_release_fqid(handler->fqid_rx);
+ list_del(&handler->node);
+ kmem_cache_free(hp_handler_slab, handler);
+ }
+ return 0;
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+ u8 res = (offset + (L1_CACHE_BYTES - 1))
+ / (L1_CACHE_BYTES);
+ if (res > 3)
+ return 3;
+ return res;
+}
+#define STASH_DATA_CL \
+ num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+ num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static int init_handler(void *h)
+{
+ struct qm_mcc_initfq opts;
+ struct hp_handler *handler = h;
+ int err;
+
+ if (handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ /* Set up rx */
+ memset(&handler->rx, 0, sizeof(handler->rx));
+ if (handler == special_handler)
+ handler->rx.cb.dqrr = special_dqrr;
+ else
+ handler->rx.cb.dqrr = normal_dqrr;
+ err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
+ if (err) {
+ pr_crit("qman_create_fq(rx) failed");
+ goto failed;
+ }
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+ qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
+ err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+ QMAN_INITFQ_FLAG_LOCAL, &opts);
+ if (err) {
+ pr_crit("qman_init_fq(rx) failed");
+ goto failed;
+ }
+ /* Set up tx */
+ memset(&handler->tx, 0, sizeof(handler->tx));
+ err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+ &handler->tx);
+ if (err) {
+ pr_crit("qman_create_fq(tx) failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void init_handler_cb(void *h)
+{
+ if (init_handler(h))
+ WARN_ON(1);
+}
+
+static int init_phase2(void)
+{
+ int loop;
+ u32 fqid = 0;
+ u32 lfsr = 0xdeadbeef;
+ struct hp_cpu *hp_cpu;
+ struct hp_handler *handler;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ int err;
+
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ /* Rx FQID is the previous handler's Tx FQID */
+ hp_cpu->iterator->fqid_rx = fqid;
+ /* Allocate new FQID for Tx */
+ err = qman_alloc_fqid(&fqid);
+ if (err) {
+ pr_crit("qman_alloc_fqid() failed");
+ return err;
+ }
+ hp_cpu->iterator->fqid_tx = fqid;
+ /* Rx mixer is the previous handler's Tx mixer */
+ hp_cpu->iterator->rx_mixer = lfsr;
+ /* Get new mixer for Tx */
+ lfsr = do_lfsr(lfsr);
+ hp_cpu->iterator->tx_mixer = lfsr;
+ }
+ }
+ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+ if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
+ return 1;
+ handler->fqid_rx = fqid;
+ handler->rx_mixer = lfsr;
+ /* and tag it as our "special" handler */
+ special_handler = handler;
+ return 0;
+}
+
+static int init_phase3(void)
+{
+ int loop, err;
+ struct hp_cpu *hp_cpu;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ preempt_disable();
+ if (hp_cpu->processor_id == smp_processor_id()) {
+ err = init_handler(hp_cpu->iterator);
+ if (err)
+ return err;
+ } else {
+ smp_call_function_single(hp_cpu->processor_id,
+ init_handler_cb, hp_cpu->iterator, 1);
+ }
+ preempt_enable();
+ }
+ }
+ return 0;
+}
+
+static int send_first_frame(void *ignore)
+{
+ u32 *p = special_handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop, err;
+ struct qm_fd fd;
+
+ if (special_handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ memset(&fd, 0, sizeof(fd));
+ qm_fd_addr_set64(&fd, special_handler->addr);
+ qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ if (*p != lfsr) {
+ err = -EIO;
+ pr_crit("corrupt frame data");
+ goto failed;
+ }
+ *p ^= special_handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ pr_info("Sending first frame\n");
+ err = qman_enqueue(&special_handler->tx, &fd);
+ if (err) {
+ pr_crit("qman_enqueue() failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void send_first_frame_cb(void *ignore)
+{
+ if (send_first_frame(NULL))
+ WARN_ON(1);
+}
+
+int qman_test_stash(void)
+{
+ int err;
+
+ if (cpumask_weight(cpu_online_mask) < 2) {
+ pr_info("%s(): skip - only 1 CPU\n", __func__);
+ return 0;
+ }
+
+ pr_info("%s(): Starting\n", __func__);
+
+ hp_cpu_list_length = 0;
+ loop_counter = 0;
+ hp_handler_slab = kmem_cache_create("hp_handler_slab",
+ sizeof(struct hp_handler), L1_CACHE_BYTES,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!hp_handler_slab) {
+ err = -EIO;
+ pr_crit("kmem_cache_create() failed");
+ goto failed;
+ }
+
+ err = allocate_frame_data();
+ if (err)
+ goto failed;
+
+ /* Init phase 1 */
+ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+ if (on_all_cpus(create_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ pr_info("Number of cpus: %d, total of %d handlers\n",
+ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+ err = init_phase2();
+ if (err)
+ goto failed;
+
+ err = init_phase3();
+ if (err)
+ goto failed;
+
+ preempt_disable();
+ if (special_handler->processor_id == smp_processor_id()) {
+ err = send_first_frame(NULL);
+ if (err)
+ goto failed;
+ } else {
+ smp_call_function_single(special_handler->processor_id,
+ send_first_frame_cb, NULL, 1);
+ }
+ preempt_enable();
+
+ wait_event(queue, loop_counter == HP_LOOPS);
+ deallocate_frame_data();
+ if (on_all_cpus(destroy_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ kmem_cache_destroy(hp_handler_slab);
+ pr_info("%s(): Finished\n", __func__);
+
+ return 0;
+failed:
+ WARN_ON(1);
+ return err;
+}
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 333eb2215a57..0aaf429f31d5 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -41,7 +41,8 @@ struct qe_gpio_chip {
static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
- struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc);
+ struct qe_gpio_chip *qe_gc =
+ container_of(mm_gc, struct qe_gpio_chip, mm_gc);
struct qe_pio_regs __iomem *regs = mm_gc->regs;
qe_gc->cpdata = in_be32(&regs->cpdata);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 7026507e6f1d..2707a827261b 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -69,8 +69,8 @@ static phys_addr_t qebase = -1;
phys_addr_t get_qe_base(void)
{
struct device_node *qe;
- int size;
- const u32 *prop;
+ int ret;
+ struct resource res;
if (qebase != -1)
return qebase;
@@ -82,9 +82,9 @@ phys_addr_t get_qe_base(void)
return qebase;
}
- prop = of_get_property(qe, "reg", &size);
- if (prop && size >= sizeof(*prop))
- qebase = of_translate_address(qe, prop);
+ ret = of_address_to_resource(qe, 0, &res);
+ if (!ret)
+ qebase = res.start;
of_node_put(qe);
return qebase;
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 41eff805a904..104e68d9b84f 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -70,6 +70,11 @@ int cpm_muram_init(void)
}
muram_pool = gen_pool_create(0, -1);
+ if (!muram_pool) {
+ pr_err("Cannot allocate memory pool for CPM/QE muram");
+ ret = -ENOMEM;
+ goto out_muram;
+ }
muram_pbase = of_translate_address(np, zero);
if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
pr_err("Cannot translate zero through CPM muram node");
@@ -116,6 +121,9 @@ static unsigned long cpm_muram_alloc_common(unsigned long size,
struct muram_block *entry;
unsigned long start;
+ if (!muram_pool && cpm_muram_init())
+ goto out2;
+
start = gen_pool_alloc_algo(muram_pool, size, algo, data);
if (!start)
goto out2;
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index 5e48b1470178..a1048b44e6b9 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -99,7 +99,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
utdm->tdm_port = val;
ut_info->uf_info.tdm_num = utdm->tdm_port;
- if (of_get_property(np, "fsl,tdm-internal-loopback", NULL))
+ if (of_property_read_bool(np, "fsl,tdm-internal-loopback"))
utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
else
utdm->tdm_mode = TDM_NORMAL;
@@ -167,7 +167,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
}
if (siram_init_flag == 0) {
- memset_io(utdm->siram, 0, res->end - res->start + 1);
+ memset_io(utdm->siram, 0, resource_size(res));
siram_init_flag = 1;
}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 396ded52ab70..209a8f7ef02b 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1187,8 +1187,10 @@ int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
hdata.type = heap->type;
hdata.heap_id = heap->id;
- ret = copy_to_user(&buffer[cnt],
- &hdata, sizeof(hdata));
+ if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
+ ret = -EFAULT;
+ goto out;
+ }
cnt++;
if (cnt >= max_cnt)
diff --git a/drivers/staging/android/ion/ion_of.c b/drivers/staging/android/ion/ion_of.c
index 15bac92b7f04..46b2bb99bfd6 100644
--- a/drivers/staging/android/ion/ion_of.c
+++ b/drivers/staging/android/ion/ion_of.c
@@ -107,7 +107,7 @@ struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
heap_pdev = of_platform_device_create(node, heaps[i].name,
&pdev->dev);
- if (!pdev)
+ if (!heap_pdev)
return ERR_PTR(-ENOMEM);
heap_pdev->dev.platform_data = &heaps[i];
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index e36ee984485b..34307ac3f255 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -128,6 +128,7 @@ int arche_platform_change_state(enum arche_platform_state state,
pdev = of_find_device_by_node(np);
if (!pdev) {
pr_err("arche-platform device not found\n");
+ of_node_put(np);
return -ENODEV;
}
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
index 071bb1cfd3ae..baab460eeaa3 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/staging/greybus/es2.c
@@ -1548,7 +1548,8 @@ static int ap_probe(struct usb_interface *interface,
INIT_LIST_HEAD(&es2->arpcs);
spin_lock_init(&es2->arpc_lock);
- if (es2_arpc_in_enable(es2))
+ retval = es2_arpc_in_enable(es2);
+ if (retval)
goto error;
retval = gb_hd_add(hd);
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 5e06e4229e42..250caa00de5e 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -702,15 +702,13 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
ret = gb_gpio_irqchip_add(gpio, irqc, 0,
handle_level_irq, IRQ_TYPE_NONE);
if (ret) {
- dev_err(&connection->bundle->dev,
- "failed to add irq chip: %d\n", ret);
+ dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
goto exit_line_free;
}
ret = gpiochip_add(gpio);
if (ret) {
- dev_err(&connection->bundle->dev,
- "failed to add gpio chip: %d\n", ret);
+ dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
goto exit_gpio_irqchip_remove;
}
diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c
index 69f67ddbd4a3..660b4674a76f 100644
--- a/drivers/staging/greybus/module.c
+++ b/drivers/staging/greybus/module.c
@@ -127,7 +127,7 @@ struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
return module;
err_put_interfaces:
- for (--i; i > 0; --i)
+ for (--i; i >= 0; --i)
gb_interface_put(module->interfaces[i]);
put_device(&module->dev);
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 5ee7954bd9f9..2633d2bfb1b4 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -888,7 +888,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
minor = alloc_minor(gb_tty);
if (minor < 0) {
if (minor == -ENOSPC) {
- dev_err(&connection->bundle->dev,
+ dev_err(&gbphy_dev->dev,
"no more free minor numbers\n");
retval = -ENODEV;
} else {
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index d626125d7af9..564b36d4f648 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -468,6 +468,8 @@ static inline int __sca3000_get_base_freq(struct sca3000_state *st,
case SCA3000_MEAS_MODE_OP_2:
*base_freq = info->option_mode_2_freq;
break;
+ default:
+ ret = -EINVAL;
}
error_ret:
return ret;
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 6eae60595905..23fda9d98bff 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -871,12 +871,10 @@ static ssize_t xattr_cache_store(struct kobject *kobj,
}
LUSTRE_RW_ATTR(xattr_cache);
-static ssize_t unstable_stats_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
+static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = sbi->ll_cache;
long pages;
int mb;
@@ -884,19 +882,21 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
pages = atomic_long_read(&cache->ccc_unstable_nr);
mb = (pages * PAGE_SIZE) >> 20;
- return sprintf(buf, "unstable_check: %8d\n"
- "unstable_pages: %12ld\n"
- "unstable_mb: %8d\n",
- cache->ccc_unstable_check, pages, mb);
+ seq_printf(m,
+ "unstable_check: %8d\n"
+ "unstable_pages: %12ld\n"
+ "unstable_mb: %8d\n",
+ cache->ccc_unstable_check, pages, mb);
+
+ return 0;
}
-static ssize_t unstable_stats_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
+static ssize_t ll_unstable_stats_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
char kernbuf[128];
int val, rc;
@@ -922,7 +922,7 @@ static ssize_t unstable_stats_store(struct kobject *kobj,
return count;
}
-LUSTRE_RW_ATTR(unstable_stats);
+LPROC_SEQ_FOPS(ll_unstable_stats);
static ssize_t root_squash_show(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -995,6 +995,7 @@ static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
/* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
{ "max_cached_mb", &ll_max_cached_mb_fops, NULL },
{ "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
+ { "unstable_stats", &ll_unstable_stats_fops, NULL },
{ "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
{ .name = "nosquash_nids",
.fops = &ll_nosquash_nids_fops },
@@ -1026,7 +1027,6 @@ static struct attribute *llite_attrs[] = {
&lustre_attr_max_easize.attr,
&lustre_attr_default_easize.attr,
&lustre_attr_xattr_cache.attr,
- &lustre_attr_unstable_stats.attr,
&lustre_attr_root_squash.attr,
NULL,
};
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index c29040fdf9a7..1091b9f1dd07 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -423,8 +423,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
actual_pages = get_user_pages(task, task->mm,
(unsigned long)buf & ~(PAGE_SIZE - 1),
num_pages,
- (type == PAGELIST_READ) /*Write */ ,
- 0 /*Force */ ,
+ (type == PAGELIST_READ) ? FOLL_WRITE : 0,
pages,
NULL /*vmas */);
up_read(&task->mm->mmap_sem);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index e11c0e07471b..7b6cd4d80621 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1477,8 +1477,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
current->mm, /* mm */
(unsigned long)virt_addr, /* start */
num_pages, /* len */
- 0, /* write */
- 0, /* force */
+ 0, /* gup_flags */
pages, /* pages (array of page pointers) */
NULL); /* vmas */
up_read(&current->mm->mmap_sem);
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 78f5613e9467..6ab7443eabde 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -3388,7 +3388,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
clients_count++;
- destroy_workqueue(hif_workqueue);
_fail_:
return result;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 39b928c2849d..b7d747e92c7a 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1804,6 +1804,10 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* Otherwise, initiator is not expecting a NOPIN is response.
* Just ignore for now.
*/
+
+ if (cmd)
+ iscsit_free_cmd(cmd, false);
+
return 0;
}
EXPORT_SYMBOL(iscsit_process_nop_out);
@@ -2982,7 +2986,7 @@ iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
- "Solicitied" : "Unsolicitied", cmd->init_task_tag,
+ "Solicited" : "Unsolicited", cmd->init_task_tag,
cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
}
EXPORT_SYMBOL(iscsit_build_nopin_rsp);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index adf419fa4291..15f79a2ca34a 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -434,7 +434,7 @@ static int iscsi_login_zero_tsih_s2(
/*
* Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
- * Immediate Data + Unsolicitied Data-OUT if necessary..
+ * Immediate Data + Unsolicited Data-OUT if necessary..
*/
param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
conn->param_list);
@@ -646,7 +646,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
/*
- * FIXME: Unsolicitied NopIN support for ISER
+ * FIXME: Unsolicited NopIN support for ISER
*/
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
return;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6094a6beddde..7dfefd66df93 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -754,15 +754,7 @@ EXPORT_SYMBOL(target_complete_cmd);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
- if (scsi_status != SAM_STAT_GOOD) {
- return;
- }
-
- /*
- * Calculate new residual count based upon length of SCSI data
- * transferred.
- */
- if (length < cmd->data_length) {
+ if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
@@ -771,12 +763,6 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
}
cmd->data_length = length;
- } else if (length > cmd->data_length) {
- cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
- cmd->residual_count = length - cmd->data_length;
- } else {
- cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT);
- cmd->residual_count = 0;
}
target_complete_cmd(cmd, scsi_status);
@@ -1706,6 +1692,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2547,8 +2534,12 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref)
- kref_get(&se_cmd->cmd_kref);
+ if (ack_kref) {
+ if (!kref_get_unless_zero(&se_cmd->cmd_kref))
+ return -EINVAL;
+
+ se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+ }
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
@@ -2627,7 +2618,7 @@ EXPORT_SYMBOL(target_put_sess_cmd);
*/
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
- struct se_cmd *se_cmd;
+ struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
int rc;
@@ -2639,14 +2630,16 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
se_sess->sess_tearing_down = 1;
list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+ list_for_each_entry_safe(se_cmd, tmp_cmd,
+ &se_sess->sess_wait_list, se_cmd_list) {
rc = kref_get_unless_zero(&se_cmd->cmd_kref);
if (rc) {
se_cmd->cmd_wait_set = 1;
spin_lock(&se_cmd->t_state_lock);
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
spin_unlock(&se_cmd->t_state_lock);
- }
+ } else
+ list_del_init(&se_cmd->se_cmd_list);
}
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2871,6 +2864,12 @@ static const struct sense_info sense_info_table[] = {
.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
.add_sector_info = true,
},
+ [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
+ .key = COPY_ABORTED,
+ .asc = 0x0d,
+ .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
+
+ },
[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
/*
* Returning ILLEGAL REQUEST would cause immediate IO errors on
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 0f173bf7dbac..2b3c8564ace8 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -96,7 +96,7 @@ struct tcmu_dev {
size_t dev_size;
u32 cmdr_size;
u32 cmdr_last_cleaned;
- /* Offset of data ring from start of mb */
+ /* Offset of data area from start of mb */
/* Must add data_off and mb_addr to get the address */
size_t data_off;
size_t data_size;
@@ -349,7 +349,7 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap)
/*
* We can't queue a command until we have space available on the cmd ring *and*
- * space available on the data ring.
+ * space available on the data area.
*
* Called with ring lock held.
*/
@@ -389,7 +389,8 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
return true;
}
-static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+static sense_reason_t
+tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
@@ -405,7 +406,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* Must be a certain minimum size for response sense info, but
@@ -432,11 +433,14 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
data_length += se_cmd->t_bidi_data_sg->length;
}
- if ((command_size > (udev->cmdr_size / 2))
- || data_length > udev->data_size)
- pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
- "cmd/data ring buffers\n", command_size, data_length,
+ if ((command_size > (udev->cmdr_size / 2)) ||
+ data_length > udev->data_size) {
+ pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
+ "cmd ring/data area\n", command_size, data_length,
udev->cmdr_size, udev->data_size);
+ spin_unlock_irq(&udev->cmdr_lock);
+ return TCM_INVALID_CDB_FIELD;
+ }
while (!is_ring_space_avail(udev, command_size, data_length)) {
int ret;
@@ -450,7 +454,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
finish_wait(&udev->wait_cmdr, &__wait);
if (!ret) {
pr_warn("tcmu: command timed out\n");
- return -ETIMEDOUT;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
spin_lock_irq(&udev->cmdr_lock);
@@ -487,9 +491,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
- /*
- * Fix up iovecs, and handle if allocation in data ring wrapped.
- */
+ /* Handle allocating space from the data area */
iov = &entry->req.iov[0];
iov_cnt = 0;
copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
@@ -526,10 +528,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
mod_timer(&udev->timeout,
round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
- return 0;
+ return TCM_NO_SENSE;
}
-static int tcmu_queue_cmd(struct se_cmd *se_cmd)
+static sense_reason_t
+tcmu_queue_cmd(struct se_cmd *se_cmd)
{
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
@@ -538,10 +541,10 @@ static int tcmu_queue_cmd(struct se_cmd *se_cmd)
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
if (!tcmu_cmd)
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = tcmu_queue_cmd_ring(tcmu_cmd);
- if (ret < 0) {
+ if (ret != TCM_NO_SENSE) {
pr_err("TCMU: Could not queue command\n");
spin_lock_irq(&udev->commands_lock);
idr_remove(&udev->commands, tcmu_cmd->cmd_id);
@@ -561,7 +564,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
/*
* cmd has been completed already from timeout, just reclaim
- * data ring space and free cmd
+ * data area space and free cmd
*/
free_data_area(udev, cmd);
@@ -1129,20 +1132,9 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
}
static sense_reason_t
-tcmu_pass_op(struct se_cmd *se_cmd)
-{
- int ret = tcmu_queue_cmd(se_cmd);
-
- if (ret != 0)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- else
- return TCM_NO_SENSE;
-}
-
-static sense_reason_t
tcmu_parse_cdb(struct se_cmd *cmd)
{
- return passthrough_parse_cdb(cmd, tcmu_pass_op);
+ return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
}
static const struct target_backend_ops tcmu_ops = {
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 75cd85426ae3..094a1440eacb 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
}
mutex_unlock(&g_device_mutex);
- pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
return -EINVAL;
}
@@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
struct xcopy_op *xop, unsigned char *p,
- unsigned short tdll)
+ unsigned short tdll, sense_reason_t *sense_ret)
{
struct se_device *local_dev = se_cmd->se_dev;
unsigned char *desc = p;
@@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
unsigned short start = 0;
bool src = true;
+ *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
if (offset != 0) {
pr_err("XCOPY target descriptor list length is not"
" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
@@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
else
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
-
- if (rc < 0)
+ /*
+ * If a matching IEEE NAA 0x83 descriptor for the requested device
+ * is not located on this node, return COPY_ABORTED with ASQ/ASQC
+ * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
+ * initiator to fall back to normal copy method.
+ */
+ if (rc < 0) {
+ *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
goto out;
+ }
pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
xop->src_dev, &xop->src_tid_wwn[0]);
@@ -653,6 +662,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
remote_port, true);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -664,6 +674,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -714,6 +725,7 @@ static int target_xcopy_write_destination(
remote_port, false);
if (rc < 0) {
struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
/*
* If the failure happened before the t_mem_list hand-off in
* target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
@@ -729,6 +741,7 @@ static int target_xcopy_write_destination(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
transport_generic_free_cmd(se_cmd, 0);
return rc;
@@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work)
out:
xcopy_pt_undepend_remotedev(xop);
kfree(xop);
-
- pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
- ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ /*
+ * Don't override an error scsi status if it has already been set
+ */
+ if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
+ pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
+ " CHECK_CONDITION -> sending response\n", rc);
+ ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ }
target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
}
@@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
- rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
+ rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
if (rc <= 0)
goto out;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 216e18cc9133..ff5de9a96643 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -572,10 +572,10 @@ static void ft_send_work(struct work_struct *work)
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir,
- TARGET_SCF_ACK_KREF))
+ TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID))
goto err;
- pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
+ pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
return;
err:
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 6ffbb603d912..fd5c3de79470 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -39,6 +39,11 @@
#include "tcm_fc.h"
+#define TFC_SESS_DBG(lport, fmt, args...) \
+ pr_debug("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args )
+
static void ft_sess_delete_all(struct ft_tport *);
/*
@@ -167,24 +172,29 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
struct ft_tport *tport;
struct hlist_head *head;
struct ft_sess *sess;
+ char *reason = "no session created";
rcu_read_lock();
tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
- if (!tport)
+ if (!tport) {
+ reason = "not an FCP port";
goto out;
+ }
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
- pr_debug("port_id %x found %p\n", port_id, sess);
+ TFC_SESS_DBG(lport, "port_id %x found %p\n",
+ port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
- pr_debug("port_id %x not found\n", port_id);
+ TFC_SESS_DBG(lport, "port_id %x not found, %s\n",
+ port_id, reason);
return NULL;
}
@@ -195,7 +205,7 @@ static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
struct ft_tport *tport = sess->tport;
struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
- pr_debug("port_id %x sess %p\n", sess->port_id, sess);
+ TFC_SESS_DBG(tport->lport, "port_id %x sess %p\n", sess->port_id, sess);
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
@@ -223,7 +233,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess)
- return NULL;
+ return ERR_PTR(-ENOMEM);
kref_init(&sess->kref); /* ref for table entry */
sess->tport = tport;
@@ -234,8 +244,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
TARGET_PROT_NORMAL, &initiatorname[0],
sess, ft_sess_alloc_cb);
if (IS_ERR(sess->se_sess)) {
+ int rc = PTR_ERR(sess->se_sess);
kfree(sess);
- return NULL;
+ sess = ERR_PTR(rc);
}
return sess;
}
@@ -319,7 +330,7 @@ void ft_sess_close(struct se_session *se_sess)
mutex_unlock(&ft_lport_lock);
return;
}
- pr_debug("port_id %x\n", port_id);
+ TFC_SESS_DBG(sess->tport->lport, "port_id %x close session\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
ft_close_sess(sess);
@@ -379,8 +390,13 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
if (!(fcp_parm & FCP_SPPF_INIT_FCN))
return FC_SPP_RESP_CONF;
sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
- if (!sess)
- return FC_SPP_RESP_RES;
+ if (IS_ERR(sess)) {
+ if (PTR_ERR(sess) == -EACCES) {
+ spp->spp_flags &= ~FC_SPP_EST_IMG_PAIR;
+ return FC_SPP_RESP_CONF;
+ } else
+ return FC_SPP_RESP_RES;
+ }
if (!sess->params)
rdata->prli_count++;
sess->params = fcp_parm;
@@ -423,8 +439,8 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
- pr_debug("port_id %x flags %x ret %x\n",
- rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
+ TFC_SESS_DBG(rdata->local_port, "port_id %x flags %x ret %x\n",
+ rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
@@ -477,11 +493,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
- pr_debug("sid %x\n", sid);
+ TFC_SESS_DBG(lport, "recv sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
- pr_debug("sid %x sess lookup failed\n", sid);
+ TFC_SESS_DBG(lport, "sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
index 9b4815e81b0d..19bf2028e508 100644
--- a/drivers/thermal/intel_pch_thermal.c
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -20,10 +20,13 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
#include <linux/thermal.h>
#include <linux/pm.h>
/* Intel PCH thermal Device IDs */
+#define PCH_THERMAL_DID_HSW_1 0x9C24 /* Haswell PCH */
+#define PCH_THERMAL_DID_HSW_2 0x8C24 /* Haswell PCH */
#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
#define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */
@@ -66,9 +69,53 @@ struct pch_thermal_device {
unsigned long crt_temp;
int hot_trip_id;
unsigned long hot_temp;
+ int psv_trip_id;
+ unsigned long psv_temp;
bool bios_enabled;
};
+#ifdef CONFIG_ACPI
+
+/*
+ * On some platforms, there is a companion ACPI device, which adds
+ * passive trip temperature using _PSV method. There is no specific
+ * passive temperature setting in MMIO interface of this PCI device.
+ */
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ int *nr_trips)
+{
+ struct acpi_device *adev;
+
+ ptd->psv_trip_id = -1;
+
+ adev = ACPI_COMPANION(&ptd->pdev->dev);
+ if (adev) {
+ unsigned long long r;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle, "_PSV", NULL,
+ &r);
+ if (ACPI_SUCCESS(status)) {
+ unsigned long trip_temp;
+
+ trip_temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+ if (trip_temp) {
+ ptd->psv_temp = trip_temp;
+ ptd->psv_trip_id = *nr_trips;
+ ++(*nr_trips);
+ }
+ }
+ }
+}
+#else
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ int *nr_trips)
+{
+ ptd->psv_trip_id = -1;
+
+}
+#endif
+
static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
{
u8 tsel;
@@ -119,6 +166,8 @@ read_trips:
++(*nr_trips);
}
+ pch_wpt_add_acpi_psv_trip(ptd, nr_trips);
+
return 0;
}
@@ -194,6 +243,8 @@ static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip,
*type = THERMAL_TRIP_CRITICAL;
else if (ptd->hot_trip_id == trip)
*type = THERMAL_TRIP_HOT;
+ else if (ptd->psv_trip_id == trip)
+ *type = THERMAL_TRIP_PASSIVE;
else
return -EINVAL;
@@ -208,6 +259,8 @@ static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *tem
*temp = ptd->crt_temp;
else if (ptd->hot_trip_id == trip)
*temp = ptd->hot_temp;
+ else if (ptd->psv_trip_id == trip)
+ *temp = ptd->psv_temp;
else
return -EINVAL;
@@ -242,6 +295,11 @@ static int intel_pch_thermal_probe(struct pci_dev *pdev,
ptd->ops = &pch_dev_ops_wpt;
dev_name = "pch_skylake";
break;
+ case PCH_THERMAL_DID_HSW_1:
+ case PCH_THERMAL_DID_HSW_2:
+ ptd->ops = &pch_dev_ops_wpt;
+ dev_name = "pch_haswell";
+ break;
default:
dev_err(&pdev->dev, "unknown pch thermal device\n");
return -ENODEV;
@@ -324,6 +382,8 @@ static int intel_pch_thermal_resume(struct device *device)
static struct pci_device_id intel_pch_thermal_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 0e4dc0afcfd2..7a223074df3d 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -669,20 +669,10 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
.set_cur_state = powerclamp_set_cur_state,
};
-static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
-
static int __init powerclamp_probe(void)
{
- if (!x86_match_cpu(intel_powerclamp_ids)) {
- pr_err("Intel powerclamp does not run on family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
+ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ pr_err("CPU does not support MWAIT");
return -ENODEV;
}
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 886fcf37f291..b9923464599f 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -213,7 +213,7 @@ static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
struct pci_dev *pdev = to_pci_dev(port->dev);
int ret;
- ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1bfb6fdbaa20..1731b98d2471 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -83,7 +83,8 @@ static const struct serial8250_config uart_config[] = {
.name = "16550A",
.fifo_size = 16,
.tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO,
},
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index b8d9c8c9d02a..417d9e7038e1 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -99,7 +99,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
case UART_LCR:
valshift = UNIPHIER_UART_LCR_SHIFT;
/* Divisor latch access bit does not exist. */
- value &= ~(UART_LCR_DLAB << valshift);
+ value &= ~UART_LCR_DLAB;
/* fall through */
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
@@ -199,7 +199,7 @@ static int uniphier_uart_probe(struct platform_device *pdev)
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
- dev_err(dev, "failed to get memory resource");
+ dev_err(dev, "failed to get memory resource\n");
return -EINVAL;
}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index c7831407a882..25c1d7bc0100 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1625,6 +1625,7 @@ config SERIAL_SPRD_CONSOLE
config SERIAL_STM32
tristate "STMicroelectronics STM32 serial port support"
select SERIAL_CORE
+ depends on HAS_DMA
depends on ARM || COMPILE_TEST
help
This driver is for the on-chip Serial Controller on
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index fd8aa1f4ba78..168b10cad47b 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2132,11 +2132,29 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
mode |= ATMEL_US_USMODE_RS485;
} else if (termios->c_cflag & CRTSCTS) {
/* RS232 with hardware handshake (RTS/CTS) */
- if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
- dev_info(port->dev, "not enabling hardware flow control because DMA is used");
- termios->c_cflag &= ~CRTSCTS;
- } else {
+ if (atmel_use_fifo(port) &&
+ !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
+ /*
+ * with ATMEL_US_USMODE_HWHS set, the controller will
+ * be able to drive the RTS pin high/low when the RX
+ * FIFO is above RXFTHRES/below RXFTHRES2.
+ * It will also disable the transmitter when the CTS
+ * pin is high.
+ * This mode is not activated if CTS pin is a GPIO
+ * because in this case, the transmitter is always
+ * disabled (there must be an internal pull-up
+ * responsible for this behaviour).
+ * If the RTS pin is a GPIO, the controller won't be
+ * able to drive it according to the FIFO thresholds,
+ * but it will be handled by the driver.
+ */
mode |= ATMEL_US_USMODE_HWHS;
+ } else {
+ /*
+ * For platforms without FIFO, the flow control is
+ * handled by the driver.
+ */
+ mode |= ATMEL_US_USMODE_NORMAL;
}
} else {
/* RS232 without hadware handshake */
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index de9d5107c00a..76103f2c4a80 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -328,7 +328,7 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
sport->dma_tx_bytes = uart_circ_chars_pending(xmit);
- if (xmit->tail < xmit->head) {
+ if (xmit->tail < xmit->head || xmit->head == 0) {
sport->dma_tx_nents = 1;
sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes);
} else {
@@ -359,7 +359,6 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
sport->dma_tx_in_progress = true;
sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
dma_async_issue_pending(sport->dma_tx_chan);
-
}
static void lpuart_dma_tx_complete(void *arg)
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index d391650b82e7..42caccb5e87e 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -419,6 +419,7 @@ static struct dmi_system_id pch_uart_dmi_table[] = {
},
(void *)MINNOW_UARTCLK,
},
+ { }
};
/* Return UART clock, checking for board specific clocks. */
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 2675792a8f59..fb0672554123 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1130,9 +1130,13 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
{
struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
+ u8 state = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG);
- sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset),
- val ? BIT(offset) : 0);
+ if (val)
+ state |= BIT(offset);
+ else
+ state &= ~BIT(offset);
+ sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset),
BIT(offset));
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 6e4f63627479..f2303f390345 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -111,7 +111,7 @@ void uart_write_wakeup(struct uart_port *port)
* closed. No cookie for you.
*/
BUG_ON(!state);
- tty_wakeup(state->port.tty);
+ tty_port_tty_wakeup(&state->port);
}
static void uart_stop(struct tty_struct *tty)
@@ -632,7 +632,7 @@ static void uart_flush_buffer(struct tty_struct *tty)
if (port->ops->flush_buffer)
port->ops->flush_buffer(port);
uart_port_unlock(port, flags);
- tty_wakeup(tty);
+ tty_port_tty_wakeup(&state->port);
}
/*
@@ -2746,8 +2746,6 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
uport->cons = drv->cons;
uport->minor = drv->tty_driver->minor_start + uport->line;
- port->console = uart_console(uport);
-
/*
* If this port is a console, then the spinlock is already
* initialised.
@@ -2761,6 +2759,8 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
uart_configure_port(drv, state, uport);
+ port->console = uart_console(uport);
+
num_groups = 2;
if (uport->attr_group)
num_groups++;
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index 41d974923102..cd97ceb76e4f 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -31,7 +31,7 @@ struct stm32_usart_info {
struct stm32_usart_config cfg;
};
-#define UNDEF_REG ~0
+#define UNDEF_REG 0xff
/* Register offsets */
struct stm32_usart_info stm32f4_info = {
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index f37edaa5ac75..dd4c02fa4820 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1200,6 +1200,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device,
OF_EARLYCON_DECLARE(cdns, "xlnx,xuartps", cdns_early_console_setup);
OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p8", cdns_early_console_setup);
OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p12", cdns_early_console_setup);
+OF_EARLYCON_DECLARE(cdns, "xlnx,zynqmp-uart", cdns_early_console_setup);
/**
* cdns_uart_console_write - perform write operation
@@ -1438,6 +1439,7 @@ static const struct of_device_id cdns_uart_of_match[] = {
{ .compatible = "xlnx,xuartps", },
{ .compatible = "cdns,uart-r1p8", },
{ .compatible = "cdns,uart-r1p12", .data = &zynqmp_uart_def },
+ { .compatible = "xlnx,zynqmp-uart", .data = &zynqmp_uart_def },
{}
};
MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 06fb39c1d6dd..8c3bf3d613c0 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -870,10 +870,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
+ if (new_screen_size > (4 << 20))
+ return -EINVAL;
newscreen = kmalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
+ if (vc == sel_cons)
+ clear_selection();
+
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;
@@ -1176,7 +1181,7 @@ static void csi_J(struct vc_data *vc, int vpar)
break;
case 3: /* erase scroll-back buffer (and whole display) */
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
- vc->vc_screenbuf_size >> 1);
+ vc->vc_screenbuf_size);
set_origin(vc);
if (con_is_visible(vc))
update_screen(vc);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 96ae69502c86..111b0e0b8698 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -188,6 +188,8 @@ static void host_stop(struct ci_hdrc *ci)
if (hcd) {
usb_remove_hcd(hcd);
+ ci->role = CI_ROLE_END;
+ synchronize_irq(ci->irq);
usb_put_hcd(hcd);
if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) &&
(ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index fa9b26b91507..4c0fa0b17353 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -463,9 +463,18 @@ static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
*/
void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
{
+ bool ret;
+
switch (hsotg->dr_mode) {
case USB_DR_MODE_HOST:
- dwc2_force_mode(hsotg, true);
+ ret = dwc2_force_mode(hsotg, true);
+ /*
+ * NOTE: This is required for some rockchip soc based
+ * platforms on their host-only dwc2.
+ */
+ if (!ret)
+ msleep(50);
+
break;
case USB_DR_MODE_PERIPHERAL:
dwc2_force_mode(hsotg, false);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index aad4107ef927..2a21a0414b1d 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -259,6 +259,13 @@ enum dwc2_lx_state {
DWC2_L3, /* Off state */
};
+/*
+ * Gadget periodic tx fifo sizes as used by legacy driver
+ * EP0 is not included
+ */
+#define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \
+ 768, 0, 0, 0, 0, 0, 0, 0}
+
/* Gadget ep0 states */
enum dwc2_ep0_state {
DWC2_EP0_SETUP,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 4cd6403a7566..24fbebc9b409 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -186,10 +186,9 @@ static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
*/
static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
{
- unsigned int fifo;
+ unsigned int ep;
unsigned int addr;
int timeout;
- u32 dptxfsizn;
u32 val;
/* Reset fifo map if not correctly cleared during previous session */
@@ -217,16 +216,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
* them to endpoints dynamically according to maxpacket size value of
* given endpoint.
*/
- for (fifo = 1; fifo < MAX_EPS_CHANNELS; fifo++) {
- dptxfsizn = dwc2_readl(hsotg->regs + DPTXFSIZN(fifo));
-
- val = (dptxfsizn & FIFOSIZE_DEPTH_MASK) | addr;
- addr += dptxfsizn >> FIFOSIZE_DEPTH_SHIFT;
-
- if (addr > hsotg->fifo_mem)
- break;
+ for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
+ if (!hsotg->g_tx_fifo_sz[ep])
+ continue;
+ val = addr;
+ val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
+ WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
+ "insufficient fifo memory");
+ addr += hsotg->g_tx_fifo_sz[ep];
- dwc2_writel(val, hsotg->regs + DPTXFSIZN(fifo));
+ dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
}
/*
@@ -3807,10 +3806,36 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg)
{
struct device_node *np = hsotg->dev->of_node;
+ u32 len = 0;
+ u32 i = 0;
/* Enable dma if requested in device tree */
hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
+ /*
+ * Register TX periodic fifo size per endpoint.
+ * EP0 is excluded since it has no fifo configuration.
+ */
+ if (!of_find_property(np, "g-tx-fifo-size", &len))
+ goto rx_fifo;
+
+ len /= sizeof(u32);
+
+ /* Read tx fifo sizes other than ep0 */
+ if (of_property_read_u32_array(np, "g-tx-fifo-size",
+ &hsotg->g_tx_fifo_sz[1], len))
+ goto rx_fifo;
+
+ /* Add ep0 */
+ len++;
+
+ /* Make remaining TX fifos unavailable */
+ if (len < MAX_EPS_CHANNELS) {
+ for (i = len; i < MAX_EPS_CHANNELS; i++)
+ hsotg->g_tx_fifo_sz[i] = 0;
+ }
+
+rx_fifo:
/* Register RX fifo size */
of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
@@ -3832,10 +3857,13 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
struct device *dev = hsotg->dev;
int epnum;
int ret;
+ int i;
+ u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
/* Initialize to legacy fifo configuration values */
hsotg->g_rx_fifo_sz = 2048;
hsotg->g_np_g_tx_fifo_sz = 1024;
+ memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
/* Device tree specific probe */
dwc2_hsotg_of_probe(hsotg);
@@ -3853,6 +3881,9 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
hsotg->g_np_g_tx_fifo_sz);
dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
+ for (i = 0; i < MAX_EPS_CHANNELS; i++)
+ dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
+ hsotg->g_tx_fifo_sz[i]);
hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 07cc8929f271..1dfa56a5f1c5 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -783,6 +783,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
req->trb = trb;
req->trb_dma = dwc3_trb_dma_offset(dep, trb);
req->first_trb_index = dep->trb_enqueue;
+ dep->queued_requests++;
}
dwc3_ep_inc_enq(dep);
@@ -833,8 +834,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
trb->ctrl |= DWC3_TRB_CTRL_HWO;
- dep->queued_requests++;
-
trace_dwc3_prepare_trb(dep, trb);
}
@@ -1074,9 +1073,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- dep->flags & DWC3_EP_PENDING_REQUEST) {
- if (list_empty(&dep->started_list)) {
+ /*
+ * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
+ * wait for a XferNotReady event so we will know what's the current
+ * (micro-)frame number.
+ *
+ * Without this trick, we are very, very likely gonna get Bus Expiry
+ * errors which will force us issue EndTransfer command.
+ */
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&
+ list_empty(&dep->started_list)) {
dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
}
@@ -1861,8 +1868,11 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
unsigned int s_pkt = 0;
unsigned int trb_status;
- dep->queued_requests--;
dwc3_ep_inc_deq(dep);
+
+ if (req->trb == trb)
+ dep->queued_requests--;
+
trace_dwc3_complete_trb(dep, trb);
/*
@@ -2980,7 +2990,7 @@ err3:
kfree(dwc->setup_buf);
err2:
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
err1:
@@ -3005,7 +3015,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
kfree(dwc->setup_buf);
kfree(dwc->zlp_buf);
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 54ad100af35b..e40d47d47d82 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -136,8 +136,60 @@ struct ffs_epfile {
/*
* Buffer for holding data from partial reads which may happen since
* we’re rounding user read requests to a multiple of a max packet size.
+ *
+ * The pointer is initialised with NULL value and may be set by
+ * __ffs_epfile_read_data function to point to a temporary buffer.
+ *
+ * In normal operation, calls to __ffs_epfile_read_buffered will consume
+ * data from said buffer and eventually free it. Importantly, while the
+ * function is using the buffer, it sets the pointer to NULL. This is
+ * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
+ * can never run concurrently (they are synchronised by epfile->mutex)
+ * so the latter will not assign a new value to the pointer.
+ *
+ * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
+ * valid) and sets the pointer to READ_BUFFER_DROP value. This special
+ * value is crux of the synchronisation between ffs_func_eps_disable and
+ * __ffs_epfile_read_data.
+ *
+ * Once __ffs_epfile_read_data is about to finish it will try to set the
+ * pointer back to its old value (as described above), but seeing as the
+ * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
+ * the buffer.
+ *
+ * == State transitions ==
+ *
+ * • ptr == NULL: (initial state)
+ * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP
+ * ◦ __ffs_epfile_read_buffered: nop
+ * ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == DROP:
+ * ◦ __ffs_epfile_read_buffer_free: nop
+ * ◦ __ffs_epfile_read_buffered: go to ptr == NULL
+ * ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == buf:
+ * ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
+ * ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading
+ * ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered
+ * is always called first
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == NULL and reading:
+ * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
+ * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
+ * ◦ __ffs_epfile_read_data: n/a, mutex is held
+ * ◦ reading finishes and …
+ * … all data read: free buf, go to ptr == NULL
+ * … otherwise: go to ptr == buf and reading
+ * • ptr == DROP and reading:
+ * ◦ __ffs_epfile_read_buffer_free: nop
+ * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
+ * ◦ __ffs_epfile_read_data: n/a, mutex is held
+ * ◦ reading finishes: free buf, go to ptr == DROP
*/
- struct ffs_buffer *read_buffer; /* P: epfile->mutex */
+ struct ffs_buffer *read_buffer;
+#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
char name[5];
@@ -736,25 +788,47 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
schedule_work(&io_data->work);
}
+static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
+{
+ /*
+ * See comment in struct ffs_epfile for full read_buffer pointer
+ * synchronisation story.
+ */
+ struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);
+ if (buf && buf != READ_BUFFER_DROP)
+ kfree(buf);
+}
+
/* Assumes epfile->mutex is held. */
static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
struct iov_iter *iter)
{
- struct ffs_buffer *buf = epfile->read_buffer;
+ /*
+ * Null out epfile->read_buffer so ffs_func_eps_disable does not free
+ * the buffer while we are using it. See comment in struct ffs_epfile
+ * for full read_buffer pointer synchronisation story.
+ */
+ struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
ssize_t ret;
- if (!buf)
+ if (!buf || buf == READ_BUFFER_DROP)
return 0;
ret = copy_to_iter(buf->data, buf->length, iter);
if (buf->length == ret) {
kfree(buf);
- epfile->read_buffer = NULL;
- } else if (unlikely(iov_iter_count(iter))) {
+ return ret;
+ }
+
+ if (unlikely(iov_iter_count(iter))) {
ret = -EFAULT;
} else {
buf->length -= ret;
buf->data += ret;
}
+
+ if (cmpxchg(&epfile->read_buffer, NULL, buf))
+ kfree(buf);
+
return ret;
}
@@ -783,7 +857,15 @@ static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
buf->length = data_len;
buf->data = buf->storage;
memcpy(buf->storage, data + ret, data_len);
- epfile->read_buffer = buf;
+
+ /*
+ * At this point read_buffer is NULL or READ_BUFFER_DROP (if
+ * ffs_func_eps_disable has been called in the meanwhile). See comment
+ * in struct ffs_epfile for full read_buffer pointer synchronisation
+ * story.
+ */
+ if (unlikely(cmpxchg(&epfile->read_buffer, NULL, buf)))
+ kfree(buf);
return ret;
}
@@ -1097,8 +1179,7 @@ ffs_epfile_release(struct inode *inode, struct file *file)
ENTER();
- kfree(epfile->read_buffer);
- epfile->read_buffer = NULL;
+ __ffs_epfile_read_buffer_free(epfile);
ffs_data_closed(epfile->ffs);
return 0;
@@ -1724,24 +1805,20 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned count = func->ffs->eps_count;
unsigned long flags;
+ spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
- if (epfile)
- mutex_lock(&epfile->mutex);
- spin_lock_irqsave(&func->ffs->eps_lock, flags);
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
++ep;
- spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
if (epfile) {
epfile->ep = NULL;
- kfree(epfile->read_buffer);
- epfile->read_buffer = NULL;
- mutex_unlock(&epfile->mutex);
+ __ffs_epfile_read_buffer_free(epfile);
++epfile;
}
} while (--count);
+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
static int ffs_func_eps_enable(struct ffs_function *func)
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 39a6df1e2ded..686067dd8d2c 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -581,8 +581,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
/* throttle high/super speed IRQ rate back slightly */
if (gadget_is_dualspeed(dev->gadget))
- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
- dev->gadget->speed == USB_SPEED_SUPER)
+ req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
+ dev->gadget->speed == USB_SPEED_SUPER)) &&
+ !list_empty(&dev->tx_reqs))
? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
: 0;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index bb1f6c8f0f01..45bc997d0711 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
goto err;
}
- ep->ep.name = name;
+ ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 1e5f529d51a2..063064801ceb 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1308,11 +1308,6 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_mv_driver
#endif
-#ifdef CONFIG_MIPS_SEAD3
-#include "ehci-sead3.c"
-#define PLATFORM_DRIVER ehci_hcd_sead3_driver
-#endif
-
static int __init ehci_hcd_init(void)
{
int retval = 0;
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 876dca4fc216..a268d9e8d6cf 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -39,7 +39,7 @@
#define DRIVER_DESC "EHCI generic platform driver"
#define EHCI_MAX_CLKS 4
-#define EHCI_MAX_RSTS 3
+#define EHCI_MAX_RSTS 4
#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
struct ehci_platform_priv {
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
deleted file mode 100644
index 3d86cc2ffe68..000000000000
--- a/drivers/usb/host/ehci-sead3.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * MIPS CI13320A EHCI Host Controller driver
- * Based on "ehci-au1xxx.c" by K.Boge <karsten.boge@amd.com>
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/err.h>
-#include <linux/platform_device.h>
-
-static int ehci_sead3_setup(struct usb_hcd *hcd)
-{
- int ret;
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
- ehci->caps = hcd->regs + 0x100;
-
-#ifdef __BIG_ENDIAN
- ehci->big_endian_mmio = 1;
- ehci->big_endian_desc = 1;
-#endif
-
- ret = ehci_setup(hcd);
- if (ret)
- return ret;
-
- ehci->need_io_watchdog = 0;
-
- /* Set burst length to 16 words. */
- ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]);
-
- return ret;
-}
-
-const struct hc_driver ehci_sead3_hc_driver = {
- .description = hcd_name,
- .product_desc = "SEAD-3 EHCI",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
-
- /*
- * basic lifecycle operations
- *
- */
- .reset = ehci_sead3_setup,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-};
-
-static int ehci_hcd_sead3_drv_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- struct resource *res;
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- if (pdev->resource[1].flags != IORESOURCE_IRQ) {
- pr_debug("resource[1] is not IORESOURCE_IRQ");
- return -ENOMEM;
- }
- hcd = usb_create_hcd(&ehci_sead3_hc_driver, &pdev->dev, "SEAD-3");
- if (!hcd)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- ret = PTR_ERR(hcd->regs);
- goto err1;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
- /* Root hub has integrated TT. */
- hcd->has_tt = 1;
-
- ret = usb_add_hcd(hcd, pdev->resource[1].start,
- IRQF_SHARED);
- if (ret == 0) {
- platform_set_drvdata(pdev, hcd);
- device_wakeup_enable(hcd->self.controller);
- return ret;
- }
-
-err1:
- usb_put_hcd(hcd);
- return ret;
-}
-
-static int ehci_hcd_sead3_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ehci_hcd_sead3_drv_suspend(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- bool do_wakeup = device_may_wakeup(dev);
-
- return ehci_suspend(hcd, do_wakeup);
-}
-
-static int ehci_hcd_sead3_drv_resume(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
-
- ehci_resume(hcd, false);
- return 0;
-}
-
-static const struct dev_pm_ops sead3_ehci_pmops = {
- .suspend = ehci_hcd_sead3_drv_suspend,
- .resume = ehci_hcd_sead3_drv_resume,
-};
-
-#define SEAD3_EHCI_PMOPS (&sead3_ehci_pmops)
-
-#else
-#define SEAD3_EHCI_PMOPS NULL
-#endif
-
-static struct platform_driver ehci_hcd_sead3_driver = {
- .probe = ehci_hcd_sead3_drv_probe,
- .remove = ehci_hcd_sead3_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .driver = {
- .name = "sead3-ehci",
- .pm = SEAD3_EHCI_PMOPS,
- }
-};
-
-MODULE_ALIAS("platform:sead3-ehci");
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 5b5880c0ae19..b38a228134df 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -221,6 +221,12 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
ohci->num_ports = board->ports;
at91_start_hc(pdev);
+ /*
+ * The RemoteWakeupConnected bit has to be set explicitly
+ * before calling ohci_run. The reset value of this bit is 0.
+ */
+ ohci->hc_control = OHCI_CTRL_RWC;
+
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval == 0) {
device_wakeup_enable(hcd->self.controller);
@@ -677,9 +683,6 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
* REVISIT: some boards will be able to turn VBUS off...
*/
if (!ohci_at91->wakeup) {
- ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
- ohci->hc_control &= OHCI_CTRL_RWC;
- ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
ohci->rh_state = OHCI_RH_HALTED;
/* flush the writes */
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 1700908b84ef..86612ac3fda2 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -72,7 +72,7 @@
static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
-#define IO_WATCHDOG_DELAY msecs_to_jiffies(250)
+#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
#include "ohci.h"
#include "pci-quirks.h"
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 730b9fd26685..0ef16900efed 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1166,7 +1166,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
@@ -1355,6 +1355,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
return 0;
}
+/*
+ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
+ * warm reset a USB3 device stuck in polling or compliance mode after resume.
+ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
+ */
+static bool xhci_port_missing_cas_quirk(int port_index,
+ __le32 __iomem **port_array)
+{
+ u32 portsc;
+
+ portsc = readl(port_array[port_index]);
+
+ /* if any of these are set we are not stuck */
+ if (portsc & (PORT_CONNECT | PORT_CAS))
+ return false;
+
+ if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
+ ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
+ return false;
+
+ /* clear wakeup/change bits, and do a warm port reset */
+ portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
+ portsc |= PORT_WR;
+ writel(portsc, port_array[port_index]);
+ /* flush write */
+ readl(port_array[port_index]);
+ return true;
+}
+
int xhci_bus_resume(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -1392,6 +1421,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
u32 temp;
temp = readl(port_array[port_index]);
+
+ /* warm reset CAS limited ports stuck in polling/compliance */
+ if ((xhci->quirks & XHCI_MISSING_CAS) &&
+ (hcd->speed >= HCD_USB3) &&
+ xhci_port_missing_cas_quirk(port_index, port_array)) {
+ xhci_dbg(xhci, "reset stuck port %d\n", port_index);
+ continue;
+ }
if (DEV_SUPERSPEED_ANY(temp))
temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
else
@@ -1410,7 +1447,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
if (need_usb2_u3_exit) {
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index d7b0f97abbad..e96ae80d107e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -45,11 +45,13 @@
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
+#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
static const char hcd_name[] = "xhci_hcd";
@@ -153,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
}
@@ -169,6 +172,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
}
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+ xhci->quirks |= XHCI_MISSING_CAS;
+
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_EJ168) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index b2c1dc5dc0f3..f945380035d0 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -314,6 +314,8 @@ struct xhci_op_regs {
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
#define XDEV_INACTIVE (0x6 << 5)
+#define XDEV_POLLING (0x7 << 5)
+#define XDEV_COMP_MODE (0xa << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
#define PORT_POWER (1 << 9)
@@ -1653,6 +1655,7 @@ struct xhci_hcd {
#define XHCI_MTK_HOST (1 << 21)
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
#define XHCI_NO_64BIT_SUPPORT (1 << 23)
+#define XHCI_MISSING_CAS (1 << 24)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index bff4869a57cd..4042ea017985 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1255,6 +1255,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
map_dma_buffer(request, musb, musb_ep);
+ pm_runtime_get_sync(musb->controller);
spin_lock_irqsave(&musb->lock, lockflags);
/* don't queue if the ep is down */
@@ -1275,6 +1276,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
unlock:
spin_unlock_irqrestore(&musb->lock, lockflags);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+
return status;
}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 1ab6973d4f61..cc1225485509 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -287,6 +287,7 @@ static int omap2430_musb_init(struct musb *musb)
}
musb->isr = omap2430_musb_interrupt;
phy_init(musb->phy);
+ phy_power_on(musb->phy);
l = musb_readl(musb->mregs, OTG_INTERFSEL);
@@ -323,8 +324,6 @@ static void omap2430_musb_enable(struct musb *musb)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
- if (!WARN_ON(!musb->phy))
- phy_power_on(musb->phy);
switch (glue->status) {
@@ -361,9 +360,6 @@ static void omap2430_musb_disable(struct musb *musb)
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- if (!WARN_ON(!musb->phy))
- phy_power_off(musb->phy);
-
if (glue->status != MUSB_UNKNOWN)
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
@@ -375,6 +371,7 @@ static int omap2430_musb_exit(struct musb *musb)
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
omap2430_low_level_exit(musb);
+ phy_power_off(musb->phy);
phy_exit(musb->phy);
musb->phy = NULL;
cancel_work_sync(&glue->omap_musb_mailbox_work);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index 1d70add926f0..d544b331c9f2 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/delay.h>
#include <linux/io.h>
#include "common.h"
#include "rcar3.h"
@@ -35,10 +36,13 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
- if (enable)
+ if (enable) {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
- else
+ /* The controller on R-Car Gen3 needs to wait up to 45 usec */
+ udelay(45);
+ } else {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
+ }
return 0;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 54a4de0efdba..f61477bed3a8 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1077,7 +1077,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
u8 control;
int result;
- cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+ result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+ if (result)
+ return result;
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b2d767e743fc..0ff7f38d7800 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
/* ekey Devices */
{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
/* Infineon Devices */
- { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
/* GE Healthcare devices */
{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
/* Active Research (Actisense) devices */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f87a938cf005..21011c0a4c64 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -626,8 +626,9 @@
/*
* Infineon Technologies
*/
-#define INFINEON_VID 0x058b
-#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_VID 0x058b
+#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/*
* Acton Research Corp.
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index d213cf44a7e4..4a037b4a79cf 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1078,7 +1078,8 @@ static int usb_serial_probe(struct usb_interface *interface,
serial->disconnected = 0;
- usb_serial_console_init(serial->port[0]->minor);
+ if (num_ports > 0)
+ usb_serial_console_init(serial->port[0]->minor);
exit:
module_put(type->driver.owner);
return 0;
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 79b2b628066d..79451f7ef1b7 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -133,6 +133,13 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
bo[itr] = bi1[itr] ^ bi2[itr];
}
+/* Scratch space for MAC calculations. */
+struct wusb_mac_scratch {
+ struct aes_ccm_b0 b0;
+ struct aes_ccm_b1 b1;
+ struct aes_ccm_a ax;
+};
+
/*
* CC-MAC function WUSB1.0[6.5]
*
@@ -197,16 +204,15 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
* what sg[4] is for. Maybe there is a smarter way to do this.
*/
static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
- struct crypto_cipher *tfm_aes, void *mic,
+ struct crypto_cipher *tfm_aes,
+ struct wusb_mac_scratch *scratch,
+ void *mic,
const struct aes_ccm_nonce *n,
const struct aes_ccm_label *a, const void *b,
size_t blen)
{
int result = 0;
SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
- struct aes_ccm_b0 b0;
- struct aes_ccm_b1 b1;
- struct aes_ccm_a ax;
struct scatterlist sg[4], sg_dst;
void *dst_buf;
size_t dst_size;
@@ -218,16 +224,17 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* These checks should be compile time optimized out
* ensure @a fills b1's mac_header and following fields
*/
- WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
- WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
- WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
- WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(*a) != sizeof(scratch->b1) - sizeof(scratch->b1.la));
+ WARN_ON(sizeof(scratch->b0) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(scratch->b1) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(scratch->ax) != sizeof(struct aes_ccm_block));
result = -ENOMEM;
zero_padding = blen % sizeof(struct aes_ccm_block);
if (zero_padding)
zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
- dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
+ dst_size = blen + sizeof(scratch->b0) + sizeof(scratch->b1) +
+ zero_padding;
dst_buf = kzalloc(dst_size, GFP_KERNEL);
if (!dst_buf)
goto error_dst_buf;
@@ -235,9 +242,9 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
memset(iv, 0, sizeof(iv));
/* Setup B0 */
- b0.flags = 0x59; /* Format B0 */
- b0.ccm_nonce = *n;
- b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
+ scratch->b0.flags = 0x59; /* Format B0 */
+ scratch->b0.ccm_nonce = *n;
+ scratch->b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
/* Setup B1
*
@@ -246,12 +253,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* 14'--after clarification, it means to use A's contents
* for MAC Header, EO, sec reserved and padding.
*/
- b1.la = cpu_to_be16(blen + 14);
- memcpy(&b1.mac_header, a, sizeof(*a));
+ scratch->b1.la = cpu_to_be16(blen + 14);
+ memcpy(&scratch->b1.mac_header, a, sizeof(*a));
sg_init_table(sg, ARRAY_SIZE(sg));
- sg_set_buf(&sg[0], &b0, sizeof(b0));
- sg_set_buf(&sg[1], &b1, sizeof(b1));
+ sg_set_buf(&sg[0], &scratch->b0, sizeof(scratch->b0));
+ sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1));
sg_set_buf(&sg[2], b, blen);
/* 0 if well behaved :) */
sg_set_buf(&sg[3], bzero, zero_padding);
@@ -276,11 +283,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* POS Crypto API: size is assumed to be AES's block size.
* Thanks for documenting it -- tip taken from airo.c
*/
- ax.flags = 0x01; /* as per WUSB 1.0 spec */
- ax.ccm_nonce = *n;
- ax.counter = 0;
- crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
- bytewise_xor(mic, &ax, iv, 8);
+ scratch->ax.flags = 0x01; /* as per WUSB 1.0 spec */
+ scratch->ax.ccm_nonce = *n;
+ scratch->ax.counter = 0;
+ crypto_cipher_encrypt_one(tfm_aes, (void *)&scratch->ax,
+ (void *)&scratch->ax);
+ bytewise_xor(mic, &scratch->ax, iv, 8);
result = 8;
error_cbc_crypt:
kfree(dst_buf);
@@ -303,6 +311,7 @@ ssize_t wusb_prf(void *out, size_t out_size,
struct aes_ccm_nonce n = *_n;
struct crypto_skcipher *tfm_cbc;
struct crypto_cipher *tfm_aes;
+ struct wusb_mac_scratch *scratch;
u64 sfn = 0;
__le64 sfn_le;
@@ -329,17 +338,25 @@ ssize_t wusb_prf(void *out, size_t out_size,
printk(KERN_ERR "E: can't set AES key: %d\n", (int)result);
goto error_setkey_aes;
}
+ scratch = kmalloc(sizeof(*scratch), GFP_KERNEL);
+ if (!scratch) {
+ result = -ENOMEM;
+ goto error_alloc_scratch;
+ }
for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
sfn_le = cpu_to_le64(sfn++);
memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */
- result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes,
+ result = wusb_ccm_mac(tfm_cbc, tfm_aes, scratch, out + bytes,
&n, a, b, blen);
if (result < 0)
goto error_ccm_mac;
bytes += result;
}
result = bytes;
+
+ kfree(scratch);
+error_alloc_scratch:
error_ccm_mac:
error_setkey_aes:
crypto_free_cipher(tfm_aes);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index af2f117208f1..5d3b0db5ce0a 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2187,7 +2187,7 @@ config FB_GOLDFISH
config FB_COBALT
tristate "Cobalt server LCD frame buffer support"
- depends on FB && (MIPS_COBALT || MIPS_SEAD3)
+ depends on FB && MIPS_COBALT
config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index 07675d6f323e..2d3b691f3fc4 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -63,7 +63,6 @@
#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK)
#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE)
-#ifdef CONFIG_MIPS_COBALT
static inline void lcd_write_control(struct fb_info *info, u8 control)
{
writel((u32)control << 24, info->screen_base);
@@ -83,47 +82,6 @@ static inline u8 lcd_read_data(struct fb_info *info)
{
return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
}
-#else
-
-#define LCD_CTL 0x00
-#define LCD_DATA 0x08
-#define CPLD_STATUS 0x10
-#define CPLD_DATA 0x18
-
-static inline void cpld_wait(struct fb_info *info)
-{
- do {
- } while (readl(info->screen_base + CPLD_STATUS) & 1);
-}
-
-static inline void lcd_write_control(struct fb_info *info, u8 control)
-{
- cpld_wait(info);
- writel(control, info->screen_base + LCD_CTL);
-}
-
-static inline u8 lcd_read_control(struct fb_info *info)
-{
- cpld_wait(info);
- readl(info->screen_base + LCD_CTL);
- cpld_wait(info);
- return readl(info->screen_base + CPLD_DATA) & 0xff;
-}
-
-static inline void lcd_write_data(struct fb_info *info, u8 data)
-{
- cpld_wait(info);
- writel(data, info->screen_base + LCD_DATA);
-}
-
-static inline u8 lcd_read_data(struct fb_info *info)
-{
- cpld_wait(info);
- readl(info->screen_base + LCD_DATA);
- cpld_wait(info);
- return readl(info->screen_base + CPLD_DATA) & 0xff;
-}
-#endif
static int lcd_busy_wait(struct fb_info *info)
{
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 3b1ca4411073..a2564ab91e62 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages)
return -ENOMEM;
- ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE,
- 0, pages);
+ ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
+ FOLL_WRITE);
if (ret < nr_pages) {
nr_pages = ret;
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 60bdad3a689b..150ce2abf6c8 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -245,8 +245,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
/* Get the physical addresses of the source buffer */
down_read(&current->mm->mmap_sem);
num_pinned = get_user_pages(param.local_vaddr - lb_offset,
- num_pages, (param.source == -1) ? READ : WRITE,
- 0, pages, NULL);
+ num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
+ pages, NULL);
up_read(&current->mm->mmap_sem);
if (num_pinned != num_pages) {
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 15b64076bc26..bdbadaa47ef3 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -156,12 +156,16 @@ size_t vme_get_size(struct vme_resource *resource)
case VME_MASTER:
retval = vme_master_get(resource, &enabled, &base, &size,
&aspace, &cycle, &dwidth);
+ if (retval)
+ return 0;
return size;
break;
case VME_SLAVE:
retval = vme_slave_get(resource, &enabled, &base, &size,
&buf_base, &aspace, &cycle);
+ if (retval)
+ return 0;
return size;
break;
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index e473e3b23720..6d1fbda0f461 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -499,6 +499,10 @@ static int wdat_wdt_resume_noirq(struct device *dev)
ret = wdat_wdt_enable_reboot(wdat);
if (ret)
return ret;
+
+ ret = wdat_wdt_ping(&wdat->wdd);
+ if (ret)
+ return ret;
}
return wdat_wdt_start(&wdat->wdd);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index e12bd3635f83..26e5e8507f03 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -168,7 +168,9 @@ out:
#endif /* CONFIG_HIBERNATE_CALLBACKS */
struct shutdown_handler {
- const char *command;
+#define SHUTDOWN_CMD_SIZE 11
+ const char command[SHUTDOWN_CMD_SIZE];
+ bool flag;
void (*cb)(void);
};
@@ -206,22 +208,22 @@ static void do_reboot(void)
ctrl_alt_del();
}
+static struct shutdown_handler shutdown_handlers[] = {
+ { "poweroff", true, do_poweroff },
+ { "halt", false, do_poweroff },
+ { "reboot", true, do_reboot },
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ { "suspend", true, do_suspend },
+#endif
+};
+
static void shutdown_handler(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
char *str;
struct xenbus_transaction xbt;
int err;
- static struct shutdown_handler handlers[] = {
- { "poweroff", do_poweroff },
- { "halt", do_poweroff },
- { "reboot", do_reboot },
-#ifdef CONFIG_HIBERNATE_CALLBACKS
- { "suspend", do_suspend },
-#endif
- {NULL, NULL},
- };
- static struct shutdown_handler *handler;
+ int idx;
if (shutting_down != SHUTDOWN_INVALID)
return;
@@ -238,13 +240,13 @@ static void shutdown_handler(struct xenbus_watch *watch,
return;
}
- for (handler = &handlers[0]; handler->command; handler++) {
- if (strcmp(str, handler->command) == 0)
+ for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
+ if (strcmp(str, shutdown_handlers[idx].command) == 0)
break;
}
/* Only acknowledge commands which we are prepared to handle. */
- if (handler->cb)
+ if (idx < ARRAY_SIZE(shutdown_handlers))
xenbus_write(xbt, "control", "shutdown", "");
err = xenbus_transaction_end(xbt, 0);
@@ -253,8 +255,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
goto again;
}
- if (handler->cb) {
- handler->cb();
+ if (idx < ARRAY_SIZE(shutdown_handlers)) {
+ shutdown_handlers[idx].cb();
} else {
pr_info("Ignoring shutdown request: %s\n", str);
shutting_down = SHUTDOWN_INVALID;
@@ -310,6 +312,9 @@ static struct notifier_block xen_reboot_nb = {
static int setup_shutdown_watcher(void)
{
int err;
+ int idx;
+#define FEATURE_PATH_SIZE (SHUTDOWN_CMD_SIZE + sizeof("feature-"))
+ char node[FEATURE_PATH_SIZE];
err = register_xenbus_watch(&shutdown_watch);
if (err) {
@@ -326,6 +331,14 @@ static int setup_shutdown_watcher(void)
}
#endif
+ for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
+ if (!shutdown_handlers[idx].flag)
+ continue;
+ snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
+ shutdown_handlers[idx].command);
+ xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+ }
+
return 0;
}
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index c1010f018bd8..1e8be12ebb55 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -364,7 +364,7 @@ out:
static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
{
- struct watch_adapter *watch, *tmp_watch;
+ struct watch_adapter *watch;
char *path, *token;
int err, rc;
LIST_HEAD(staging_q);
@@ -399,7 +399,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
}
list_add(&watch->list, &u->watches);
} else {
- list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
+ list_for_each_entry(watch, &u->watches, list) {
if (!strcmp(watch->token, token) &&
!strcmp(watch->watch.node, path)) {
unregister_xenbus_watch(&watch->watch);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 611a23119675..6d40a972ffb2 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -335,7 +335,9 @@ static int backend_state;
static void xenbus_reset_backend_state_changed(struct xenbus_watch *w,
const char **v, unsigned int l)
{
- xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state);
+ if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i",
+ &backend_state) != 1)
+ backend_state = XenbusStateUnknown;
printk(KERN_DEBUG "XENBUS: backend %s %s\n",
v[XS_WATCH_PATH], xenbus_strstate(backend_state));
wake_up(&backend_state_wq);