aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/acnamesp.h4
-rw-r--r--drivers/acpi/acpica/exconfig.c14
-rw-r--r--drivers/acpi/acpica/nsinit.c76
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/libata-core.c9
-rw-r--r--drivers/ata/libata-eh.c51
-rw-r--r--drivers/atm/zatm.c4
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/node.c5
-rw-r--r--drivers/base/power/main.c7
-rw-r--r--drivers/bcma/driver_mips.c2
-rw-r--r--drivers/block/DAC960.c11
-rw-r--r--drivers/block/DAC960.h1
-rw-r--r--drivers/block/aoe/aoeblk.c11
-rw-r--r--drivers/block/aoe/aoecmd.c3
-rw-r--r--drivers/block/brd.c10
-rw-r--r--drivers/block/drbd/drbd_bitmap.c5
-rw-r--r--drivers/block/drbd/drbd_debugfs.c20
-rw-r--r--drivers/block/drbd/drbd_int.h10
-rw-r--r--drivers/block/drbd/drbd_main.c74
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c18
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c29
-rw-r--r--drivers/block/nbd.c77
-rw-r--r--drivers/block/null_blk.c36
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/pktcdvd.c60
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/rbd.c44
-rw-r--r--drivers/block/rsxx/core.c6
-rw-r--r--drivers/block/sx8.c2
-rw-r--r--drivers/block/virtio_blk.c8
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c4
-rw-r--r--drivers/block/xen-blkfront.c7
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/clk/Kconfig6
-rw-r--r--drivers/clk/imx/clk-imx6ul.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c4
-rw-r--r--drivers/dma/qcom/bam_dma.c18
-rw-r--r--drivers/firmware/arm_scmi/driver.c1
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c10
-rw-r--r--drivers/firmware/qcom_scm-32.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c44
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c15
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c7
-rw-r--r--drivers/gpu/drm/drm_file.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/i915_query.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c51
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c48
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/k10temp.c51
-rw-r--r--drivers/hwtracing/intel_th/msu.c6
-rw-r--r--drivers/hwtracing/stm/core.c7
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c5
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c4
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c13
-rw-r--r--drivers/ide/ide-atapi.c2
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-cd_ioctl.c2
-rw-r--r--drivers/ide/ide-devsets.c2
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-ioctls.c4
-rw-r--r--drivers/ide/ide-park.c4
-rw-r--r--drivers/ide/ide-pm.c5
-rw-r--r--drivers/ide/ide-tape.c4
-rw-r--r--drivers/ide/ide-taskfile.c4
-rw-r--r--drivers/iio/adc/Kconfig1
-rw-r--r--drivers/iio/adc/ad7793.c75
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c41
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c17
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c2
-rw-r--r--drivers/iio/buffer/kfifo_buf.c11
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c8
-rw-r--r--drivers/infiniband/core/cache.c2
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c55
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c94
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c61
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h3
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c7
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c18
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c60
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c10
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c22
-rw-r--r--drivers/input/mouse/synaptics.c6
-rw-r--r--drivers/isdn/hardware/eicon/diva.c22
-rw-r--r--drivers/isdn/hardware/eicon/diva.h5
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c18
-rw-r--r--drivers/lightnvm/core.c10
-rw-r--r--drivers/lightnvm/pblk-cache.c10
-rw-r--r--drivers/lightnvm/pblk-core.c233
-rw-r--r--drivers/lightnvm/pblk-gc.c112
-rw-r--r--drivers/lightnvm/pblk-init.c172
-rw-r--r--drivers/lightnvm/pblk-map.c33
-rw-r--r--drivers/lightnvm/pblk-rb.c48
-rw-r--r--drivers/lightnvm/pblk-read.c146
-rw-r--r--drivers/lightnvm/pblk-recovery.c121
-rw-r--r--drivers/lightnvm/pblk-rl.c29
-rw-r--r--drivers/lightnvm/pblk-sysfs.c15
-rw-r--r--drivers/lightnvm/pblk-write.c269
-rw-r--r--drivers/lightnvm/pblk.h58
-rw-r--r--drivers/md/bcache/bcache.h14
-rw-r--r--drivers/md/bcache/bset.c13
-rw-r--r--drivers/md/bcache/bset.h2
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/debug.c4
-rw-r--r--drivers/md/bcache/io.c4
-rw-r--r--drivers/md/bcache/request.c18
-rw-r--r--drivers/md/bcache/super.c109
-rw-r--r--drivers/md/bcache/sysfs.c51
-rw-r--r--drivers/md/bcache/util.c35
-rw-r--r--drivers/md/bcache/util.h5
-rw-r--r--drivers/md/dm-bio-prison-v1.c13
-rw-r--r--drivers/md/dm-bio-prison-v2.c13
-rw-r--r--drivers/md/dm-cache-target.c25
-rw-r--r--drivers/md/dm-core.h4
-rw-r--r--drivers/md/dm-crypt.c59
-rw-r--r--drivers/md/dm-integrity.c15
-rw-r--r--drivers/md/dm-io.c29
-rw-r--r--drivers/md/dm-kcopyd.c22
-rw-r--r--drivers/md/dm-log-userspace-base.c19
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-region-hash.c23
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-snap.c17
-rw-r--r--drivers/md/dm-thin.c32
-rw-r--r--drivers/md/dm-verity-fec.c55
-rw-r--r--drivers/md/dm-verity-fec.h8
-rw-r--r--drivers/md/dm-zoned-target.c13
-rw-r--r--drivers/md/dm.c55
-rw-r--r--drivers/md/md-faulty.c2
-rw-r--r--drivers/md/md-linear.c2
-rw-r--r--drivers/md/md-multipath.c17
-rw-r--r--drivers/md/md-multipath.h2
-rw-r--r--drivers/md/md.c61
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/raid0.c5
-rw-r--r--drivers/md/raid1.c76
-rw-r--r--drivers/md/raid1.h6
-rw-r--r--drivers/md/raid10.c60
-rw-r--r--drivers/md/raid10.h6
-rw-r--r--drivers/md/raid5-cache.c43
-rw-r--r--drivers/md/raid5-ppl.c42
-rw-r--r--drivers/md/raid5.c12
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/memstick/core/ms_block.c6
-rw-r--r--drivers/memstick/core/mspro_block.c6
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/mfd/cros_ec_spi.c24
-rw-r--r--drivers/misc/cxl/cxl.h1
-rw-r--r--drivers/misc/cxl/pci.c12
-rw-r--r--drivers/misc/cxl/sysfs.c10
-rw-r--r--drivers/misc/eeprom/at24.c2
-rw-r--r--drivers/mmc/core/block.c14
-rw-r--r--drivers/mmc/core/queue.c5
-rw-r--r--drivers/mmc/host/sdhci-iproc.c33
-rw-r--r--drivers/mtd/mtd_blkdevs.c20
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c8
-rw-r--r--drivers/net/dsa/b53/b53_common.c13
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c5
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c36
-rw-r--r--drivers/net/ethernet/3com/3c59x.c104
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c88
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c5
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c61
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c22
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/phy/bcm-cygnus.c6
-rw-r--r--drivers/net/phy/bcm-phy-lib.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.h7
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/phy/micrel.c31
-rw-r--r--drivers/net/ppp/ppp_generic.c27
-rw-r--r--drivers/net/tun.c61
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c21
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c72
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c7
-rw-r--r--drivers/nvme/host/core.c159
-rw-r--r--drivers/nvme/host/fabrics.c100
-rw-r--r--drivers/nvme/host/fabrics.h4
-rw-r--r--drivers/nvme/host/fc.c15
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c258
-rw-r--r--drivers/nvme/host/rdma.c12
-rw-r--r--drivers/nvme/host/trace.h4
-rw-r--r--drivers/nvme/target/Makefile4
-rw-r--r--drivers/nvme/target/admin-cmd.c143
-rw-r--r--drivers/nvme/target/core.c128
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c4
-rw-r--r--drivers/nvme/target/fc.c2
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c (renamed from drivers/nvme/target/io-cmd.c)77
-rw-r--r--drivers/nvme/target/io-cmd-file.c304
-rw-r--r--drivers/nvme/target/loop.c52
-rw-r--r--drivers/nvme/target/nvmet.h51
-rw-r--r--drivers/parisc/ccio-dma.c2
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c2
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/asus-wmi.c23
-rw-r--r--drivers/reset/reset-uniphier.c6
-rw-r--r--drivers/s390/block/dasd.c13
-rw-r--r--drivers/s390/cio/qdio_setup.c12
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c13
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c23
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c14
-rw-r--r--drivers/sbus/char/Kconfig7
-rw-r--r--drivers/sbus/char/Makefile1
-rw-r--r--drivers/sbus/char/jsflash.c658
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/commsup.c8
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/osd/osd_initiator.c24
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_error.c10
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c16
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c19
-rw-r--r--drivers/scsi/scsi_transport_srp.c26
-rw-r--r--drivers/scsi/sg.c4
-rw-r--r--drivers/scsi/sr_ioctl.c10
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c6
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/soc/lantiq/gphy.c36
-rw-r--r--drivers/spi/spi-bcm-qspi.c28
-rw-r--r--drivers/spi/spi-bcm2835aux.c5
-rw-r--r--drivers/spi/spi-cadence.c8
-rw-r--r--drivers/spi/spi-imx.c2
-rw-r--r--drivers/spi/spi-pxa2xx.h2
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/ssb/Kconfig4
-rw-r--r--drivers/target/target_core_iblock.c16
-rw-r--r--drivers/target/target_core_iblock.h2
-rw-r--r--drivers/target/target_core_pscsi.c3
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/tee/tee_core.c11
-rw-r--r--drivers/tee/tee_shm.c5
-rw-r--r--drivers/thunderbolt/icm.c2
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/musb/musb_host.c5
-rw-r--r--drivers/usb/musb/musb_host.h7
-rw-r--r--drivers/usb/musb/musb_virthub.c25
-rw-r--r--drivers/usb/usbip/stub.h2
-rw-r--r--drivers/usb/usbip/stub_dev.c43
-rw-r--r--drivers/usb/usbip/stub_main.c105
-rw-r--r--drivers/vfio/vfio_iommu_type1.c25
-rw-r--r--drivers/vhost/net.c37
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/xen/swiotlb-xen.c2
319 files changed, 4140 insertions, 3437 deletions
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 514aaf948ea9..3825df923480 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -56,6 +56,10 @@ acpi_status acpi_ns_initialize_objects(void);
acpi_status acpi_ns_initialize_devices(u32 flags);
+acpi_status
+acpi_ns_init_one_package(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
/*
* nsload - Namespace loading
*/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 99d92cb32803..f85c6f3271f6 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -174,6 +174,13 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
+ /* Complete the initialization/resolution of package objects */
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, 0,
+ acpi_ns_init_one_package, NULL, NULL,
+ NULL);
+
/* Parameter Data (optional) */
if (parameter_node) {
@@ -430,6 +437,13 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(status);
}
+ /* Complete the initialization/resolution of package objects */
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, 0,
+ acpi_ns_init_one_package, NULL, NULL,
+ NULL);
+
/* Store the ddb_handle into the Target operand */
status = acpi_ex_store(ddb_handle, target, walk_state);
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 77f2b5f4948a..d77257d1c827 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -242,6 +242,58 @@ error_exit:
/*******************************************************************************
*
+ * FUNCTION: acpi_ns_init_one_package
+ *
+ * PARAMETERS: obj_handle - Node
+ * level - Current nesting level
+ * context - Not used
+ * return_value - Not used
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every package
+ * within the namespace. Used during dynamic load of an SSDT.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_init_one_package(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node =
+ (struct acpi_namespace_node *)obj_handle;
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ return (AE_OK);
+ }
+
+ /* Exit if package is already initialized */
+
+ if (obj_desc->package.flags & AOPOBJ_DATA_VALID) {
+ return (AE_OK);
+ }
+
+ status = acpi_ds_get_package_arguments(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return (AE_OK);
+ }
+
+ status =
+ acpi_ut_walk_package_tree(obj_desc, NULL,
+ acpi_ds_init_package_element, NULL);
+ if (ACPI_FAILURE(status)) {
+ return (AE_OK);
+ }
+
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ns_init_one_object
*
* PARAMETERS: obj_handle - Node
@@ -360,27 +412,11 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
case ACPI_TYPE_PACKAGE:
- info->package_init++;
- status = acpi_ds_get_package_arguments(obj_desc);
- if (ACPI_FAILURE(status)) {
- break;
- }
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE,
- "%s: Completing resolution of Package elements\n",
- ACPI_GET_FUNCTION_NAME));
+ /* Complete the initialization/resolution of the package object */
- /*
- * Resolve all named references in package objects (and all
- * sub-packages). This action has been deferred until the entire
- * namespace has been loaded, in order to support external and
- * forward references from individual package elements (05/2017).
- */
- status = acpi_ut_walk_package_tree(obj_desc, NULL,
- acpi_ds_init_package_element,
- NULL);
-
- obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ info->package_init++;
+ status =
+ acpi_ns_init_one_package(obj_handle, level, NULL, NULL);
break;
default:
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6389c88b3500..738fb22978dd 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -334,6 +334,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 68596bd4cf06..346b163f6e89 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4493,6 +4493,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
{ "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
+ /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
+ SD7SN6S256G and SD8SN8U256G */
+ { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
+
/* devices which puke on READ_NATIVE_MAX */
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
@@ -4549,13 +4553,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM, },
- /* This specific Samsung model/firmware-rev does not handle LPM well */
+ /* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
+ { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
/* Sandisk devices which are known to not handle LPM well */
{ "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
+ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 513b260bcff1..a2398e28c295 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -500,57 +500,6 @@ void ata_eh_release(struct ata_port *ap)
mutex_unlock(&ap->host->eh_mutex);
}
-/**
- * ata_scsi_timed_out - SCSI layer time out callback
- * @cmd: timed out SCSI command
- *
- * Handles SCSI layer timeout. We race with normal completion of
- * the qc for @cmd. If the qc is already gone, we lose and let
- * the scsi command finish (EH_HANDLED). Otherwise, the qc has
- * timed out and EH should be invoked. Prevent ata_qc_complete()
- * from finishing it by setting EH_SCHEDULED and return
- * EH_NOT_HANDLED.
- *
- * TODO: kill this function once old EH is gone.
- *
- * LOCKING:
- * Called from timer context
- *
- * RETURNS:
- * EH_HANDLED or EH_NOT_HANDLED
- */
-enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
-{
- struct Scsi_Host *host = cmd->device->host;
- struct ata_port *ap = ata_shost_to_port(host);
- unsigned long flags;
- struct ata_queued_cmd *qc;
- enum blk_eh_timer_return ret;
-
- DPRINTK("ENTER\n");
-
- if (ap->ops->error_handler) {
- ret = BLK_EH_NOT_HANDLED;
- goto out;
- }
-
- ret = BLK_EH_HANDLED;
- spin_lock_irqsave(ap->lock, flags);
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc) {
- WARN_ON(qc->scsicmd != cmd);
- qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
- qc->err_mask |= AC_ERR_TIMEOUT;
- ret = BLK_EH_NOT_HANDLED;
- }
- spin_unlock_irqrestore(ap->lock, flags);
-
- out:
- DPRINTK("EXIT, ret=%d\n", ret);
- return ret;
-}
-EXPORT_SYMBOL(ata_scsi_timed_out);
-
static void ata_eh_unload(struct ata_port *ap)
{
struct ata_link *link;
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 9c9a22958717..a8d2eb0ceb8d 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1151,8 +1151,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
}
-static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd,
- int offset, int swap)
+static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
+ int swap)
{
unsigned char buf[ZEPROM_SIZE];
struct zatm_dev *zatm_dev;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 2da998baa75c..30cc9c877ebb 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -534,14 +534,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
&dev_attr_spectre_v1.attr,
&dev_attr_spectre_v2.attr,
+ &dev_attr_spec_store_bypass.attr,
NULL
};
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 7a3a580821e0..a5e821d09656 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -490,7 +490,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
return 0;
}
-int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
+int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ bool check_nid)
{
unsigned long end_pfn = start_pfn + nr_pages;
unsigned long pfn;
@@ -514,7 +515,7 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
- ret = register_mem_sect_under_node(mem_blk, nid, true);
+ ret = register_mem_sect_under_node(mem_blk, nid, check_nid);
if (!err)
err = ret;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 02a497e7c785..e5e067091572 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1923,10 +1923,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
dev->power.wakeup_path = false;
- if (dev->power.no_pm_callbacks) {
- ret = 1; /* Let device go direct_complete */
+ if (dev->power.no_pm_callbacks)
goto unlock;
- }
if (dev->pm_domain)
callback = dev->pm_domain->ops.prepare;
@@ -1960,7 +1958,8 @@ unlock:
*/
spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
- pm_runtime_suspended(dev) && ret > 0 &&
+ ((pm_runtime_suspended(dev) && ret > 0) ||
+ dev->power.no_pm_callbacks) &&
!dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
spin_unlock_irq(&dev->power.lock);
return 0;
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index f040aba48d50..27e9686b6d3a 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -184,7 +184,7 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
{
int i;
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
- char interrupts[20];
+ char interrupts[25];
char *ints = interrupts;
for (i = 0; i < ARRAY_SIZE(irq_name); i++)
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 6918c3d9482e..6ca77d6047d6 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -1179,7 +1179,6 @@ static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T
if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32)))
return DAC960_Failure(Controller, "DMA mask out of range");
- Controller->BounceBufferLimit = DMA_BIT_MASK(32);
if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) {
CommandMailboxesSize = 0;
@@ -1380,11 +1379,8 @@ static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T
dma_addr_t CommandMailboxDMA;
DAC960_V2_CommandStatus_T CommandStatus;
- if (!pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(64)))
- Controller->BounceBufferLimit = DMA_BIT_MASK(64);
- else if (!pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32)))
- Controller->BounceBufferLimit = DMA_BIT_MASK(32);
- else
+ if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(64)) &&
+ pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32)))
return DAC960_Failure(Controller, "DMA mask out of range");
/* This is a temporary dma mapping, used only in the scope of this function */
@@ -2540,7 +2536,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
continue;
}
Controller->RequestQueue[n] = RequestQueue;
- blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
RequestQueue->queuedata = Controller;
blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
@@ -6557,7 +6552,7 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
dac960_initial_status_proc_show, Controller);
proc_create_single_data("current_status", 0, ControllerProcEntry,
dac960_current_status_proc_show, Controller);
- proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
+ proc_create_data("user_command", 0600, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
Controller->ControllerProcEntry = ControllerProcEntry;
}
diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h
index 21aff470d268..1439e651928b 100644
--- a/drivers/block/DAC960.h
+++ b/drivers/block/DAC960.h
@@ -2295,7 +2295,6 @@ typedef struct DAC960_Controller
unsigned short MaxBlocksPerCommand;
unsigned short ControllerScatterGatherLimit;
unsigned short DriverScatterGatherLimit;
- u64 BounceBufferLimit;
unsigned int CombinedStatusBufferLength;
unsigned int InitialStatusLength;
unsigned int CurrentStatusLength;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 6797e6c23c8a..429ebb84b592 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -159,14 +159,14 @@ static int aoe_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, aoedisk_debugfs_show, inode->i_private);
}
-static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
-static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
-static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
+static DEVICE_ATTR(state, 0444, aoedisk_show_state, NULL);
+static DEVICE_ATTR(mac, 0444, aoedisk_show_mac, NULL);
+static DEVICE_ATTR(netif, 0444, aoedisk_show_netif, NULL);
static struct device_attribute dev_attr_firmware_version = {
- .attr = { .name = "firmware-version", .mode = S_IRUGO },
+ .attr = { .name = "firmware-version", .mode = 0444 },
.show = aoedisk_show_fwver,
};
-static DEVICE_ATTR(payload, S_IRUGO, aoedisk_show_payload, NULL);
+static DEVICE_ATTR(payload, 0444, aoedisk_show_payload, NULL);
static struct attribute *aoe_attrs[] = {
&dev_attr_state.attr,
@@ -388,7 +388,6 @@ aoeblk_gdalloc(void *vp)
d->aoemajor, d->aoeminor);
goto err_mempool;
}
- blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
spin_lock_irqsave(&d->lock, flags);
WARN_ON(!(d->flags & DEVFL_GD_NOW));
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 540bb60cd071..096882e54095 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1032,8 +1032,9 @@ bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
iter.bi_size = cnt;
__bio_for_each_segment(bv, bio, iter, iter) {
- char *p = page_address(bv.bv_page) + bv.bv_offset;
+ char *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
skb_copy_bits(skb, soff, p, bv.bv_len);
+ kunmap_atomic(p);
soff += bv.bv_len;
}
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 66cb0f857f64..bb976598ee43 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -331,15 +331,15 @@ static const struct block_device_operations brd_fops = {
* And now the modules code and kernel interface.
*/
static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
-module_param(rd_nr, int, S_IRUGO);
+module_param(rd_nr, int, 0444);
MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
-module_param(rd_size, ulong, S_IRUGO);
+module_param(rd_size, ulong, 0444);
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
static int max_part = 1;
-module_param(max_part, int, S_IRUGO);
+module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
MODULE_LICENSE("GPL");
@@ -402,6 +402,10 @@ static struct brd_device *brd_alloc(int i)
set_capacity(disk, rd_size * 2);
disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
+ /* Tell the block layer that this is not a rotational device */
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
+
return brd;
out_free_queue:
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 9f4e6f502b84..11a85b740327 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -977,7 +977,7 @@ static void drbd_bm_endio(struct bio *bio)
bm_page_unlock_io(device, idx);
if (ctx->flags & BM_AIO_COPY_PAGES)
- mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
+ mempool_free(bio->bi_io_vec[0].bv_page, &drbd_md_io_page_pool);
bio_put(bio);
@@ -1014,7 +1014,8 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) {
- page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM);
+ page = mempool_alloc(&drbd_md_io_page_pool,
+ GFP_NOIO | __GFP_HIGHMEM);
copy_highpage(page, b->bm_pages[page_nr]);
bm_store_page_idx(page, page_nr);
} else
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index ab21976a87b2..5d5e8d6a8a56 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -481,9 +481,9 @@ void drbd_debugfs_resource_add(struct drbd_resource *resource)
goto fail;
resource->debugfs_res_connections = dentry;
- dentry = debugfs_create_file("in_flight_summary", S_IRUSR|S_IRGRP,
- resource->debugfs_res, resource,
- &in_flight_summary_fops);
+ dentry = debugfs_create_file("in_flight_summary", 0440,
+ resource->debugfs_res, resource,
+ &in_flight_summary_fops);
if (IS_ERR_OR_NULL(dentry))
goto fail;
resource->debugfs_res_in_flight_summary = dentry;
@@ -645,16 +645,16 @@ void drbd_debugfs_connection_add(struct drbd_connection *connection)
goto fail;
connection->debugfs_conn = dentry;
- dentry = debugfs_create_file("callback_history", S_IRUSR|S_IRGRP,
- connection->debugfs_conn, connection,
- &connection_callback_history_fops);
+ dentry = debugfs_create_file("callback_history", 0440,
+ connection->debugfs_conn, connection,
+ &connection_callback_history_fops);
if (IS_ERR_OR_NULL(dentry))
goto fail;
connection->debugfs_conn_callback_history = dentry;
- dentry = debugfs_create_file("oldest_requests", S_IRUSR|S_IRGRP,
- connection->debugfs_conn, connection,
- &connection_oldest_requests_fops);
+ dentry = debugfs_create_file("oldest_requests", 0440,
+ connection->debugfs_conn, connection,
+ &connection_oldest_requests_fops);
if (IS_ERR_OR_NULL(dentry))
goto fail;
connection->debugfs_conn_oldest_requests = dentry;
@@ -824,7 +824,7 @@ void drbd_debugfs_device_add(struct drbd_device *device)
device->debugfs_minor = dentry;
#define DCF(name) do { \
- dentry = debugfs_create_file(#name, S_IRUSR|S_IRGRP, \
+ dentry = debugfs_create_file(#name, 0440, \
device->debugfs_vol, device, \
&device_ ## name ## _fops); \
if (IS_ERR_OR_NULL(dentry)) \
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 461ddec04e7c..bc4ed2ed40a2 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1405,8 +1405,8 @@ extern struct kmem_cache *drbd_request_cache;
extern struct kmem_cache *drbd_ee_cache; /* peer requests */
extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
-extern mempool_t *drbd_request_mempool;
-extern mempool_t *drbd_ee_mempool;
+extern mempool_t drbd_request_mempool;
+extern mempool_t drbd_ee_mempool;
/* drbd's page pool, used to buffer data received from the peer,
* or data requested by the peer.
@@ -1432,16 +1432,16 @@ extern wait_queue_head_t drbd_pp_wait;
* 128 should be plenty, currently we probably can get away with as few as 1.
*/
#define DRBD_MIN_POOL_PAGES 128
-extern mempool_t *drbd_md_io_page_pool;
+extern mempool_t drbd_md_io_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
-extern struct bio_set *drbd_md_io_bio_set;
+extern struct bio_set drbd_md_io_bio_set;
/* to allocate from that set */
extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
/* And a bio_set for cloning */
-extern struct bio_set *drbd_io_bio_set;
+extern struct bio_set drbd_io_bio_set;
extern struct mutex resources_mutex;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index c2d154faac02..7655d6133139 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -124,11 +124,11 @@ struct kmem_cache *drbd_request_cache;
struct kmem_cache *drbd_ee_cache; /* peer requests */
struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
-mempool_t *drbd_request_mempool;
-mempool_t *drbd_ee_mempool;
-mempool_t *drbd_md_io_page_pool;
-struct bio_set *drbd_md_io_bio_set;
-struct bio_set *drbd_io_bio_set;
+mempool_t drbd_request_mempool;
+mempool_t drbd_ee_mempool;
+mempool_t drbd_md_io_page_pool;
+struct bio_set drbd_md_io_bio_set;
+struct bio_set drbd_io_bio_set;
/* I do not use a standard mempool, because:
1) I want to hand out the pre-allocated objects first.
@@ -153,10 +153,10 @@ struct bio *bio_alloc_drbd(gfp_t gfp_mask)
{
struct bio *bio;
- if (!drbd_md_io_bio_set)
+ if (!bioset_initialized(&drbd_md_io_bio_set))
return bio_alloc(gfp_mask, 1);
- bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+ bio = bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set);
if (!bio)
return NULL;
return bio;
@@ -2097,16 +2097,11 @@ static void drbd_destroy_mempools(void)
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
- if (drbd_io_bio_set)
- bioset_free(drbd_io_bio_set);
- if (drbd_md_io_bio_set)
- bioset_free(drbd_md_io_bio_set);
- if (drbd_md_io_page_pool)
- mempool_destroy(drbd_md_io_page_pool);
- if (drbd_ee_mempool)
- mempool_destroy(drbd_ee_mempool);
- if (drbd_request_mempool)
- mempool_destroy(drbd_request_mempool);
+ bioset_exit(&drbd_io_bio_set);
+ bioset_exit(&drbd_md_io_bio_set);
+ mempool_exit(&drbd_md_io_page_pool);
+ mempool_exit(&drbd_ee_mempool);
+ mempool_exit(&drbd_request_mempool);
if (drbd_ee_cache)
kmem_cache_destroy(drbd_ee_cache);
if (drbd_request_cache)
@@ -2116,11 +2111,6 @@ static void drbd_destroy_mempools(void)
if (drbd_al_ext_cache)
kmem_cache_destroy(drbd_al_ext_cache);
- drbd_io_bio_set = NULL;
- drbd_md_io_bio_set = NULL;
- drbd_md_io_page_pool = NULL;
- drbd_ee_mempool = NULL;
- drbd_request_mempool = NULL;
drbd_ee_cache = NULL;
drbd_request_cache = NULL;
drbd_bm_ext_cache = NULL;
@@ -2133,18 +2123,7 @@ static int drbd_create_mempools(void)
{
struct page *page;
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
- int i;
-
- /* prepare our caches and mempools */
- drbd_request_mempool = NULL;
- drbd_ee_cache = NULL;
- drbd_request_cache = NULL;
- drbd_bm_ext_cache = NULL;
- drbd_al_ext_cache = NULL;
- drbd_pp_pool = NULL;
- drbd_md_io_page_pool = NULL;
- drbd_md_io_bio_set = NULL;
- drbd_io_bio_set = NULL;
+ int i, ret;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -2168,26 +2147,26 @@ static int drbd_create_mempools(void)
goto Enomem;
/* mempools */
- drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
- if (drbd_io_bio_set == NULL)
+ ret = bioset_init(&drbd_io_bio_set, BIO_POOL_SIZE, 0, 0);
+ if (ret)
goto Enomem;
- drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0,
- BIOSET_NEED_BVECS);
- if (drbd_md_io_bio_set == NULL)
+ ret = bioset_init(&drbd_md_io_bio_set, DRBD_MIN_POOL_PAGES, 0,
+ BIOSET_NEED_BVECS);
+ if (ret)
goto Enomem;
- drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
- if (drbd_md_io_page_pool == NULL)
+ ret = mempool_init_page_pool(&drbd_md_io_page_pool, DRBD_MIN_POOL_PAGES, 0);
+ if (ret)
goto Enomem;
- drbd_request_mempool = mempool_create_slab_pool(number,
- drbd_request_cache);
- if (drbd_request_mempool == NULL)
+ ret = mempool_init_slab_pool(&drbd_request_mempool, number,
+ drbd_request_cache);
+ if (ret)
goto Enomem;
- drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache);
- if (drbd_ee_mempool == NULL)
+ ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache);
+ if (ret)
goto Enomem;
/* drbd's page pool */
@@ -3010,8 +2989,7 @@ static int __init drbd_init(void)
goto fail;
err = -ENOMEM;
- drbd_proc = proc_create_single("drbd", S_IFREG | S_IRUGO , NULL,
- drbd_seq_show);
+ drbd_proc = proc_create_single("drbd", S_IFREG | 0444 , NULL, drbd_seq_show);
if (!drbd_proc) {
pr_err("unable to register proc file\n");
goto fail;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c72dee0ef083..be9450f5ad1c 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -378,7 +378,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
return NULL;
- peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
+ peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
if (!peer_req) {
if (!(gfp_mask & __GFP_NOWARN))
drbd_err(device, "%s: allocation failed\n", __func__);
@@ -409,7 +409,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return peer_req;
fail:
- mempool_free(peer_req, drbd_ee_mempool);
+ mempool_free(peer_req, &drbd_ee_mempool);
return NULL;
}
@@ -426,7 +426,7 @@ void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *
peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
drbd_al_complete_io(device, &peer_req->i);
}
- mempool_free(peer_req, drbd_ee_mempool);
+ mempool_free(peer_req, &drbd_ee_mempool);
}
int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index a500e738d929..a47e4987ee46 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -55,7 +55,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
{
struct drbd_request *req;
- req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
+ req = mempool_alloc(&drbd_request_mempool, GFP_NOIO);
if (!req)
return NULL;
memset(req, 0, sizeof(*req));
@@ -184,7 +184,7 @@ void drbd_req_destroy(struct kref *kref)
}
}
- mempool_free(req, drbd_request_mempool);
+ mempool_free(req, &drbd_request_mempool);
}
static void wake_all_senders(struct drbd_connection *connection)
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index cb97b3b30962..94c654020f0f 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -269,7 +269,7 @@ enum drbd_req_state_bits {
static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
{
struct bio *bio;
- bio = bio_clone_fast(bio_src, GFP_NOIO, drbd_io_bio_set);
+ bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
req->private_bio = bio;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 8ec7235fc93b..8871b5044d9e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4450,7 +4450,7 @@ static ssize_t floppy_cmos_show(struct device *dev,
return sprintf(buf, "%X\n", UDP->cmos);
}
-static DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL);
+static DEVICE_ATTR(cmos, 0444, floppy_cmos_show, NULL);
static struct attribute *floppy_dev_attrs[] = {
&dev_attr_cmos.attr,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 5d4e31655d96..4838b0dbaad3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -732,7 +732,7 @@ static ssize_t loop_attr_do_show_##_name(struct device *d, \
return loop_attr_show(d, b, loop_attr_##_name##_show); \
} \
static struct device_attribute loop_attr_##_name = \
- __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL);
+ __ATTR(_name, 0444, loop_attr_do_show_##_name, NULL);
static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
{
@@ -809,16 +809,17 @@ static struct attribute_group loop_attribute_group = {
.attrs= loop_attrs,
};
-static int loop_sysfs_init(struct loop_device *lo)
+static void loop_sysfs_init(struct loop_device *lo)
{
- return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
- &loop_attribute_group);
+ lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
+ &loop_attribute_group);
}
static void loop_sysfs_exit(struct loop_device *lo)
{
- sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
- &loop_attribute_group);
+ if (lo->sysfs_inited)
+ sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
+ &loop_attribute_group);
}
static void loop_config_discard(struct loop_device *lo)
@@ -1068,6 +1069,7 @@ static int loop_clr_fd(struct loop_device *lo)
if (bdev) {
bdput(bdev);
invalidate_bdev(bdev);
+ bdev->bd_inode->i_mapping->wb_err = 0;
}
set_capacity(lo->lo_disk, 0);
loop_sysfs_exit(lo);
@@ -1676,9 +1678,9 @@ static const struct block_device_operations lo_fops = {
* And now the modules code and kernel interface.
*/
static int max_loop;
-module_param(max_loop, int, S_IRUGO);
+module_param(max_loop, int, 0444);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
-module_param(max_part, int, S_IRUGO);
+module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index b78de9879f4f..4d42c7af7de7 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -58,6 +58,7 @@ struct loop_device {
struct kthread_worker worker;
struct task_struct *worker_task;
bool use_dio;
+ bool sysfs_inited;
struct request_queue *lo_queue;
struct blk_mq_tag_set tag_set;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 769c551e3d71..c73626decb46 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2285,7 +2285,7 @@ static ssize_t mtip_hw_show_status(struct device *dev,
return size;
}
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+static DEVICE_ATTR(status, 0444, mtip_hw_show_status, NULL);
/* debugsfs entries */
@@ -2566,10 +2566,9 @@ static int mtip_hw_debugfs_init(struct driver_data *dd)
return -1;
}
- debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
- &mtip_flags_fops);
- debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
- &mtip_regs_fops);
+ debugfs_create_file("flags", 0444, dd->dfs_node, dd, &mtip_flags_fops);
+ debugfs_create_file("registers", 0444, dd->dfs_node, dd,
+ &mtip_regs_fops);
return 0;
}
@@ -2726,15 +2725,11 @@ static void mtip_softirq_done_fn(struct request *rq)
blk_mq_end_request(rq, cmd->status);
}
-static void mtip_abort_cmd(struct request *req, void *data,
- bool reserved)
+static void mtip_abort_cmd(struct request *req, void *data, bool reserved)
{
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
struct driver_data *dd = data;
- if (!blk_mq_request_started(req))
- return;
-
dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
clear_bit(req->tag, dd->port->cmds_to_issue);
@@ -2742,14 +2737,10 @@ static void mtip_abort_cmd(struct request *req, void *data,
mtip_softirq_done_fn(req);
}
-static void mtip_queue_cmd(struct request *req, void *data,
- bool reserved)
+static void mtip_queue_cmd(struct request *req, void *data, bool reserved)
{
struct driver_data *dd = data;
- if (!blk_mq_request_started(req))
- return;
-
set_bit(req->tag, dd->port->cmds_to_issue);
blk_abort_request(req);
}
@@ -3720,7 +3711,8 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
cmd->status = BLK_STS_TIMEOUT;
- return BLK_EH_HANDLED;
+ blk_mq_complete_request(req);
+ return BLK_EH_DONE;
}
if (test_bit(req->tag, dd->port->cmds_to_issue))
@@ -3862,7 +3854,6 @@ skip_create_disk:
blk_queue_max_hw_sectors(dd->queue, 0xffff);
blk_queue_max_segment_size(dd->queue, 0x400000);
blk_queue_io_min(dd->queue, 4096);
- blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
/* Signal trim support */
if (dd->trim_supp == true) {
@@ -4273,7 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
if (!dd->isr_workq) {
dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
rv = -ENOMEM;
- goto block_initialize_err;
+ goto setmask_err;
}
memset(cpu_list, 0, sizeof(cpu_list));
@@ -4614,7 +4605,7 @@ static int __init mtip_init(void)
}
if (dfs_parent) {
dfs_device_status = debugfs_create_file("device_status",
- S_IRUGO, dfs_parent, NULL,
+ 0444, dfs_parent, NULL,
&mtip_device_status_fops);
if (IS_ERR_OR_NULL(dfs_device_status)) {
pr_err("Error creating device_status node\n");
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index afbc202ca6fd..3ed1ef8ee528 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -166,16 +166,19 @@ static ssize_t pid_show(struct device *dev,
}
static const struct device_attribute pid_attr = {
- .attr = { .name = "pid", .mode = S_IRUGO},
+ .attr = { .name = "pid", .mode = 0444},
.show = pid_show,
};
static void nbd_dev_remove(struct nbd_device *nbd)
{
struct gendisk *disk = nbd->disk;
+ struct request_queue *q;
+
if (disk) {
+ q = disk->queue;
del_gendisk(disk);
- blk_cleanup_queue(disk->queue);
+ blk_cleanup_queue(q);
blk_mq_free_tag_set(&nbd->tag_set);
disk->private_data = NULL;
put_disk(disk);
@@ -213,7 +216,15 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
}
if (!nsock->dead) {
kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
- atomic_dec(&nbd->config->live_connections);
+ if (atomic_dec_return(&nbd->config->live_connections) == 0) {
+ if (test_and_clear_bit(NBD_DISCONNECT_REQUESTED,
+ &nbd->config->runtime_flags)) {
+ set_bit(NBD_DISCONNECTED,
+ &nbd->config->runtime_flags);
+ dev_info(nbd_to_dev(nbd),
+ "Disconnected due to user request.\n");
+ }
+ }
}
nsock->dead = true;
nsock->pending = NULL;
@@ -231,9 +242,22 @@ static void nbd_size_clear(struct nbd_device *nbd)
static void nbd_size_update(struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
+ struct block_device *bdev = bdget_disk(nbd->disk, 0);
+
+ if (config->flags & NBD_FLAG_SEND_TRIM) {
+ nbd->disk->queue->limits.discard_granularity = config->blksize;
+ blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
+ }
blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
set_capacity(nbd->disk, config->bytesize >> 9);
+ if (bdev) {
+ if (bdev->bd_disk)
+ bd_set_size(bdev, config->bytesize);
+ else
+ bdev->bd_invalidated = 1;
+ bdput(bdev);
+ }
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
}
@@ -243,6 +267,8 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
struct nbd_config *config = nbd->config;
config->blksize = blocksize;
config->bytesize = blocksize * nr_blocks;
+ if (nbd->task_recv != NULL)
+ nbd_size_update(nbd);
}
static void nbd_complete_rq(struct request *req)
@@ -286,13 +312,15 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = BLK_STS_TIMEOUT;
- return BLK_EH_HANDLED;
+ goto done;
}
config = nbd->config;
if (config->num_connections > 1) {
dev_err_ratelimited(nbd_to_dev(nbd),
- "Connection timed out, retrying\n");
+ "Connection timed out, retrying (%d/%d alive)\n",
+ atomic_read(&config->live_connections),
+ config->num_connections);
/*
* Hooray we have more connections, requeue this IO, the submit
* path will put it on a real connection.
@@ -314,7 +342,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
blk_mq_requeue_request(req, true);
nbd_config_put(nbd);
- return BLK_EH_NOT_HANDLED;
+ return BLK_EH_DONE;
}
} else {
dev_err_ratelimited(nbd_to_dev(nbd),
@@ -324,8 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
cmd->status = BLK_STS_IOERR;
sock_shutdown(nbd);
nbd_config_put(nbd);
-
- return BLK_EH_HANDLED;
+done:
+ blk_mq_complete_request(req);
+ return BLK_EH_DONE;
}
/*
@@ -647,11 +676,8 @@ static void recv_work(struct work_struct *work)
static void nbd_clear_req(struct request *req, void *data, bool reserved)
{
- struct nbd_cmd *cmd;
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
- if (!blk_mq_request_started(req))
- return;
- cmd = blk_mq_rq_to_pdu(req);
cmd->status = BLK_STS_IOERR;
blk_mq_complete_request(req);
}
@@ -714,10 +740,9 @@ static int wait_for_reconnect(struct nbd_device *nbd)
return 0;
if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
return 0;
- wait_event_timeout(config->conn_wait,
- atomic_read(&config->live_connections),
- config->dead_conn_timeout);
- return atomic_read(&config->live_connections);
+ return wait_event_timeout(config->conn_wait,
+ atomic_read(&config->live_connections) > 0,
+ config->dead_conn_timeout) > 0;
}
static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
@@ -950,10 +975,6 @@ static void nbd_bdev_reset(struct block_device *bdev)
if (bdev->bd_openers > 1)
return;
bd_set_size(bdev, 0);
- if (max_part > 0) {
- blkdev_reread_part(bdev);
- bdev->bd_invalidated = 1;
- }
}
static void nbd_parse_flags(struct nbd_device *nbd)
@@ -1040,6 +1061,8 @@ static void nbd_config_put(struct nbd_device *nbd)
nbd->config = NULL;
nbd->tag_set.timeout = 0;
+ nbd->disk->queue->limits.discard_granularity = 0;
+ blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
mutex_unlock(&nbd->config_lock);
@@ -1109,7 +1132,6 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
if (ret)
return ret;
- bd_set_size(bdev, config->bytesize);
if (max_part)
bdev->bd_invalidated = 1;
mutex_unlock(&nbd->config_lock);
@@ -1118,7 +1140,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
if (ret)
sock_shutdown(nbd);
mutex_lock(&nbd->config_lock);
- bd_set_size(bdev, 0);
+ nbd_bdev_reset(bdev);
/* user requested, ignore socket errors */
if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
ret = 0;
@@ -1269,6 +1291,9 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
+ bdev->bd_invalidated = 1;
+ } else if (nbd_disconnected(nbd->config)) {
+ bdev->bd_invalidated = 1;
}
out:
mutex_unlock(&nbd_index_mutex);
@@ -1490,8 +1515,8 @@ static int nbd_dev_add(int index)
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
- disk->queue->limits.discard_granularity = 512;
- blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
+ disk->queue->limits.discard_granularity = 0;
+ blk_queue_max_discard_sectors(disk->queue, 0);
blk_queue_max_segment_size(disk->queue, UINT_MAX);
blk_queue_max_segments(disk->queue, USHRT_MAX);
blk_queue_max_hw_sectors(disk->queue, 65536);
@@ -1755,6 +1780,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
}
mutex_lock(&nbd->config_lock);
nbd_disconnect(nbd);
+ nbd_clear_sock(nbd);
mutex_unlock(&nbd->config_lock);
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
@@ -2093,7 +2119,8 @@ static int __init nbd_init(void)
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
recv_workqueue = alloc_workqueue("knbd-recv",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ WQ_MEM_RECLAIM | WQ_HIGHPRI |
+ WQ_UNBOUND, 0);
if (!recv_workqueue)
return -ENOMEM;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a76553293a31..2bdadd7f1454 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -157,23 +157,23 @@ enum {
};
static int g_no_sched;
-module_param_named(no_sched, g_no_sched, int, S_IRUGO);
+module_param_named(no_sched, g_no_sched, int, 0444);
MODULE_PARM_DESC(no_sched, "No io scheduler");
static int g_submit_queues = 1;
-module_param_named(submit_queues, g_submit_queues, int, S_IRUGO);
+module_param_named(submit_queues, g_submit_queues, int, 0444);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
static int g_home_node = NUMA_NO_NODE;
-module_param_named(home_node, g_home_node, int, S_IRUGO);
+module_param_named(home_node, g_home_node, int, 0444);
MODULE_PARM_DESC(home_node, "Home node for the device");
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
static char g_timeout_str[80];
-module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), S_IRUGO);
+module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
static char g_requeue_str[80];
-module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), S_IRUGO);
+module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
#endif
static int g_queue_mode = NULL_Q_MQ;
@@ -203,27 +203,27 @@ static const struct kernel_param_ops null_queue_mode_param_ops = {
.get = param_get_int,
};
-device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO);
+device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
static int g_gb = 250;
-module_param_named(gb, g_gb, int, S_IRUGO);
+module_param_named(gb, g_gb, int, 0444);
MODULE_PARM_DESC(gb, "Size in GB");
static int g_bs = 512;
-module_param_named(bs, g_bs, int, S_IRUGO);
+module_param_named(bs, g_bs, int, 0444);
MODULE_PARM_DESC(bs, "Block size (in bytes)");
static int nr_devices = 1;
-module_param(nr_devices, int, S_IRUGO);
+module_param(nr_devices, int, 0444);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
static bool g_blocking;
-module_param_named(blocking, g_blocking, bool, S_IRUGO);
+module_param_named(blocking, g_blocking, bool, 0444);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
static bool shared_tags;
-module_param(shared_tags, bool, S_IRUGO);
+module_param(shared_tags, bool, 0444);
MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
static int g_irqmode = NULL_IRQ_SOFTIRQ;
@@ -239,19 +239,19 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
.get = param_get_int,
};
-device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO);
+device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
static unsigned long g_completion_nsec = 10000;
-module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO);
+module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
static int g_hw_queue_depth = 64;
-module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO);
+module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
static bool g_use_per_node_hctx;
-module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO);
+module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
static struct nullb_device *null_alloc_dev(void);
@@ -1365,7 +1365,8 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
{
pr_info("null: rq %p timed out\n", rq);
- return BLK_EH_HANDLED;
+ blk_mq_complete_request(rq);
+ return BLK_EH_DONE;
}
static int null_rq_prep_fn(struct request_queue *q, struct request *req)
@@ -1427,7 +1428,8 @@ static void null_request_fn(struct request_queue *q)
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
pr_info("null: rq %p timed out\n", rq);
- return BLK_EH_HANDLED;
+ blk_mq_complete_request(rq);
+ return BLK_EH_DONE;
}
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 27a44b97393a..8961b190e256 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -740,7 +740,7 @@ static int pd_special_command(struct pd_unit *disk,
{
struct request *rq;
- rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 8dce5c04dbf2..b3f83cd96f33 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -97,8 +97,8 @@ static int pktdev_major;
static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
-static mempool_t *psd_pool;
-static struct bio_set *pkt_bio_set;
+static mempool_t psd_pool;
+static struct bio_set pkt_bio_set;
static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
@@ -478,8 +478,8 @@ static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
if (!pd->dfs_d_root)
return;
- pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
- pd->dfs_d_root, pd, &debug_fops);
+ pd->dfs_f_info = debugfs_create_file("info", 0444,
+ pd->dfs_d_root, pd, &debug_fops);
}
static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
@@ -631,7 +631,7 @@ static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
{
rb_erase(&node->rb_node, &pd->bio_queue);
- mempool_free(node, pd->rb_pool);
+ mempool_free(node, &pd->rb_pool);
pd->bio_queue_size--;
BUG_ON(pd->bio_queue_size < 0);
}
@@ -704,13 +704,13 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
int ret = 0;
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
if (cgc->buflen) {
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
- __GFP_RECLAIM);
+ GFP_NOIO);
if (ret)
goto out;
}
@@ -1285,7 +1285,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
* Fill-in bvec with data from orig_bios.
*/
spin_lock(&pkt->lock);
- bio_copy_data(pkt->w_bio, pkt->orig_bios.head);
+ bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
spin_unlock(&pkt->lock);
@@ -2303,14 +2303,14 @@ static void pkt_end_io_read_cloned(struct bio *bio)
psd->bio->bi_status = bio->bi_status;
bio_put(bio);
bio_endio(psd->bio);
- mempool_free(psd, psd_pool);
+ mempool_free(psd, &psd_pool);
pkt_bio_finished(pd);
}
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
- struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, pkt_bio_set);
- struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
+ struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set);
+ struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
psd->pd = pd;
psd->bio = bio;
@@ -2381,7 +2381,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
/*
* No matching packet found. Store the bio in the work queue.
*/
- node = mempool_alloc(pd->rb_pool, GFP_NOIO);
+ node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
node->bio = bio;
spin_lock(&pd->lock);
BUG_ON(pd->bio_queue_size < 0);
@@ -2451,7 +2451,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
split = bio_split(bio, last_zone -
bio->bi_iter.bi_sector,
- GFP_NOIO, pkt_bio_set);
+ GFP_NOIO, &pkt_bio_set);
bio_chain(split, bio);
} else {
split = bio;
@@ -2695,9 +2695,9 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
if (!pd)
goto out_mutex;
- pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
- sizeof(struct pkt_rb_node));
- if (!pd->rb_pool)
+ ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
+ sizeof(struct pkt_rb_node));
+ if (ret)
goto out_mem;
INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
@@ -2754,7 +2754,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
out_mem2:
put_disk(disk);
out_mem:
- mempool_destroy(pd->rb_pool);
+ mempool_exit(&pd->rb_pool);
kfree(pd);
out_mutex:
mutex_unlock(&ctl_mutex);
@@ -2805,7 +2805,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
blk_cleanup_queue(pd->disk->queue);
put_disk(pd->disk);
- mempool_destroy(pd->rb_pool);
+ mempool_exit(&pd->rb_pool);
kfree(pd);
/* This is safe: open() is still holding a reference. */
@@ -2902,14 +2902,14 @@ static int __init pkt_init(void)
mutex_init(&ctl_mutex);
- psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
- sizeof(struct packet_stacked_data));
- if (!psd_pool)
- return -ENOMEM;
- pkt_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
- if (!pkt_bio_set) {
- mempool_destroy(psd_pool);
- return -ENOMEM;
+ ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
+ sizeof(struct packet_stacked_data));
+ if (ret)
+ return ret;
+ ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
+ if (ret) {
+ mempool_exit(&psd_pool);
+ return ret;
}
ret = register_blkdev(pktdev_major, DRIVER_NAME);
@@ -2942,8 +2942,8 @@ out_misc:
out:
unregister_blkdev(pktdev_major, DRIVER_NAME);
out2:
- mempool_destroy(psd_pool);
- bioset_free(pkt_bio_set);
+ mempool_exit(&psd_pool);
+ bioset_exit(&pkt_bio_set);
return ret;
}
@@ -2956,8 +2956,8 @@ static void __exit pkt_exit(void)
pkt_sysfs_cleanup();
unregister_blkdev(pktdev_major, DRIVER_NAME);
- mempool_destroy(psd_pool);
- bioset_free(pkt_bio_set);
+ mempool_exit(&psd_pool);
+ bioset_exit(&pkt_bio_set);
}
MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 075662f2cf46..afe1508d82c6 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -465,8 +465,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
priv->queue = queue;
queue->queuedata = dev;
- blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH);
-
blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 33b36fea1d73..af354047ac4b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -424,7 +424,7 @@ static struct workqueue_struct *rbd_wq;
* single-major requires >= 0.75 version of userspace rbd utility.
*/
static bool single_major = true;
-module_param(single_major, bool, S_IRUGO);
+module_param(single_major, bool, 0444);
MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
@@ -468,11 +468,11 @@ static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
}
-static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
-static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
-static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
-static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
-static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL);
+static BUS_ATTR(add, 0200, NULL, rbd_add);
+static BUS_ATTR(remove, 0200, NULL, rbd_remove);
+static BUS_ATTR(add_single_major, 0200, NULL, rbd_add_single_major);
+static BUS_ATTR(remove_single_major, 0200, NULL, rbd_remove_single_major);
+static BUS_ATTR(supported_features, 0444, rbd_supported_features_show, NULL);
static struct attribute *rbd_bus_attrs[] = {
&bus_attr_add.attr,
@@ -4204,22 +4204,22 @@ static ssize_t rbd_image_refresh(struct device *dev,
return size;
}
-static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
-static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
-static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
-static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
-static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL);
-static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
-static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL);
-static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL);
-static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
-static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
-static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
-static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
-static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
-static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
-static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
-static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
+static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
+static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
+static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
+static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
+static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
+static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
+static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
+static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
+static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
+static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
+static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
+static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
+static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
+static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
+static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
+static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 34997df132e2..09537bee387f 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -247,19 +247,19 @@ static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card)
if (IS_ERR_OR_NULL(card->debugfs_dir))
goto failed_debugfs_dir;
- debugfs_stats = debugfs_create_file("stats", S_IRUGO,
+ debugfs_stats = debugfs_create_file("stats", 0444,
card->debugfs_dir, card,
&debugfs_stats_fops);
if (IS_ERR_OR_NULL(debugfs_stats))
goto failed_debugfs_stats;
- debugfs_pci_regs = debugfs_create_file("pci_regs", S_IRUGO,
+ debugfs_pci_regs = debugfs_create_file("pci_regs", 0444,
card->debugfs_dir, card,
&debugfs_pci_regs_fops);
if (IS_ERR_OR_NULL(debugfs_pci_regs))
goto failed_debugfs_pci_regs;
- debugfs_cram = debugfs_create_file("cram", S_IRUGO | S_IWUSR,
+ debugfs_cram = debugfs_create_file("cram", 0644,
card->debugfs_dir, card,
&debugfs_cram_fops);
if (IS_ERR_OR_NULL(debugfs_cram))
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 08586dc14e85..4d90e5eba2f5 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -567,7 +567,7 @@ static struct carm_request *carm_get_special(struct carm_host *host)
if (!crq)
return NULL;
- rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, GFP_KERNEL);
+ rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(rq)) {
spin_lock_irqsave(&host->lock, flags);
carm_put_request(host, crq);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4a07593c2efd..23752dc99b00 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -298,7 +298,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
struct request *req;
int err;
- req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
+ req = blk_get_request(q, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -371,7 +371,7 @@ static ssize_t virtblk_serial_show(struct device *dev,
return err;
}
-static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
+static DEVICE_ATTR(serial, 0444, virtblk_serial_show, NULL);
/* The queue's logical block size must be set before calling this */
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
@@ -576,10 +576,10 @@ virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
}
static const struct device_attribute dev_attr_cache_type_ro =
- __ATTR(cache_type, S_IRUGO,
+ __ATTR(cache_type, 0444,
virtblk_cache_type_show, NULL);
static const struct device_attribute dev_attr_cache_type_rw =
- __ATTR(cache_type, S_IRUGO|S_IWUSR,
+ __ATTR(cache_type, 0644,
virtblk_cache_type_show, virtblk_cache_type_store);
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 987d665e82de..b55b245e8052 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -98,7 +98,7 @@ MODULE_PARM_DESC(max_queues,
* backend, 4KB page granularity is used.
*/
unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
-module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
+module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
/*
* The LRU mechanism to clean the lists of persistent grants needs to
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 21c1be1eb226..66412eededda 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -367,7 +367,7 @@ int __init xen_blkif_interface_init(void)
out: \
return sprintf(buf, format, result); \
} \
- static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+ static DEVICE_ATTR(name, 0444, show_##name, NULL)
VBD_SHOW_ALLRING(oo_req, "%llu\n");
VBD_SHOW_ALLRING(rd_req, "%llu\n");
@@ -403,7 +403,7 @@ static const struct attribute_group xen_vbdstat_group = {
\
return sprintf(buf, format, ##args); \
} \
- static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+ static DEVICE_ATTR(name, 0444, show_##name, NULL)
VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
VBD_SHOW(mode, "%s\n", be->mode);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2a8e7813bd1a..ae00a82f350b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -129,13 +129,12 @@ static const struct block_device_operations xlvbd_block_fops;
*/
static unsigned int xen_blkif_max_segments = 32;
-module_param_named(max_indirect_segments, xen_blkif_max_segments, uint,
- S_IRUGO);
+module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, 0444);
MODULE_PARM_DESC(max_indirect_segments,
"Maximum amount of segments in indirect requests (default is 32)");
static unsigned int xen_blkif_max_queues = 4;
-module_param_named(max_queues, xen_blkif_max_queues, uint, S_IRUGO);
+module_param_named(max_queues, xen_blkif_max_queues, uint, 0444);
MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
/*
@@ -143,7 +142,7 @@ MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per v
* backend, 4KB page granularity is used.
*/
static unsigned int xen_blkif_max_ring_order;
-module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
+module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
#define BLK_RING_SIZE(info) \
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index bfc566d3f31a..9adc8c3eb0fa 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2192,7 +2192,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
len = nr * CD_FRAMESIZE_RAW;
- rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
+ rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
break;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 41492e980ef4..34968a381d0f 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -266,15 +266,13 @@ config COMMON_CLK_STM32MP157
Support for stm32mp157 SoC family clocks
config COMMON_CLK_STM32F
- bool "Clock driver for stm32f4 and stm32f7 SoC families"
- depends on MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746
+ def_bool COMMON_CLK && (MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746)
help
---help---
Support for stm32f4 and stm32f7 SoC families clocks
config COMMON_CLK_STM32H7
- bool "Clock driver for stm32h7 SoC family"
- depends on MACH_STM32H743
+ def_bool COMMON_CLK && MACH_STM32H743
help
---help---
Support for stm32h7 SoC family clocks
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 114ecbb94ec5..12320118f8de 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -464,7 +464,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000);
/* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */
- clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
+ clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]);
clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]);
clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]);
clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index de55c7d57438..96b35b8b3606 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -20,7 +20,7 @@ config ACPI_CPPC_CPUFREQ
config ARM_ARMADA_37XX_CPUFREQ
tristate "Armada 37xx CPUFreq support"
- depends on ARCH_MVEBU
+ depends on ARCH_MVEBU && CPUFREQ_DT
help
This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
The Armada 37xx PMU supports 4 frequency and VDD levels.
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index d4a81be0d7d2..b6be62025325 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -152,8 +152,8 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
- memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
- EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
+ memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
+ EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index d29275b97e84..4a828c18099a 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -524,6 +524,14 @@ static int bam_alloc_chan(struct dma_chan *chan)
return 0;
}
+static int bam_pm_runtime_get_sync(struct device *dev)
+{
+ if (pm_runtime_enabled(dev))
+ return pm_runtime_get_sync(dev);
+
+ return 0;
+}
+
/**
* bam_free_chan - Frees dma resources associated with specific channel
* @chan: specified channel
@@ -539,7 +547,7 @@ static void bam_free_chan(struct dma_chan *chan)
unsigned long flags;
int ret;
- ret = pm_runtime_get_sync(bdev->dev);
+ ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
@@ -720,7 +728,7 @@ static int bam_pause(struct dma_chan *chan)
unsigned long flag;
int ret;
- ret = pm_runtime_get_sync(bdev->dev);
+ ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
@@ -746,7 +754,7 @@ static int bam_resume(struct dma_chan *chan)
unsigned long flag;
int ret;
- ret = pm_runtime_get_sync(bdev->dev);
+ ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
@@ -852,7 +860,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
if (srcs & P_IRQ)
tasklet_schedule(&bdev->task);
- ret = pm_runtime_get_sync(bdev->dev);
+ ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
@@ -969,7 +977,7 @@ static void bam_start_dma(struct bam_chan *bchan)
if (!vd)
return;
- ret = pm_runtime_get_sync(bdev->dev);
+ ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 14b147135a0c..2455be8cbc4f 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -778,6 +778,7 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
+ return;
}
/* setup handle now as the transport is ready */
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index b9bd827caa22..1b4d465cc5d9 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -98,6 +98,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
(phys_seed >> 32) & mask : TEXT_OFFSET;
/*
+ * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
+ * be a multiple of EFI_KIMG_ALIGN, and we must ensure that
+ * we preserve the misalignment of 'offset' relative to
+ * EFI_KIMG_ALIGN so that statically allocated objects whose
+ * alignment exceeds PAGE_SIZE appear correctly aligned in
+ * memory.
+ */
+ offset |= TEXT_OFFSET % EFI_KIMG_ALIGN;
+
+ /*
* If KASLR is enabled, and we have some randomness available,
* locate the kernel at a randomized offset in physical memory.
*/
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index dfbd894d5bb7..4e24e591ae74 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -147,7 +147,7 @@ static u32 smc(u32 cmd_addr)
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2)
- : "r3");
+ : "r3", "r12");
} while (r0 == QCOM_SCM_INTERRUPTED);
return r0;
@@ -263,7 +263,7 @@ static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2)
- : "r3");
+ : "r3", "r12");
return r0;
}
@@ -298,7 +298,7 @@ static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3)
- );
+ : "r12");
return r0;
}
@@ -328,7 +328,7 @@ u32 qcom_scm_get_version(void)
"smc #0 @ switch to secure world\n"
: "=r" (r0), "=r" (r1)
: "r" (r0), "r" (r1)
- : "r2", "r3");
+ : "r2", "r3", "r12");
} while (r0 == QCOM_SCM_INTERRUPTED);
version = r1;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1dd1142246c2..27579443cdc5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4555,8 +4555,8 @@ static int dm_update_crtcs_state(struct dc *dc,
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_dm_connector *aconnector = NULL;
- struct drm_connector_state *new_con_state = NULL;
- struct dm_connector_state *dm_conn_state = NULL;
+ struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
+ struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
struct drm_plane_state *new_plane_state = NULL;
new_stream = NULL;
@@ -4577,19 +4577,23 @@ static int dm_update_crtcs_state(struct dc *dc,
/* TODO This hack should go away */
if (aconnector && enable) {
// Make sure fake sink is created in plug-in scenario
- new_con_state = drm_atomic_get_connector_state(state,
+ drm_new_conn_state = drm_atomic_get_new_connector_state(state,
&aconnector->base);
+ drm_old_conn_state = drm_atomic_get_old_connector_state(state,
+ &aconnector->base);
- if (IS_ERR(new_con_state)) {
- ret = PTR_ERR_OR_ZERO(new_con_state);
+
+ if (IS_ERR(drm_new_conn_state)) {
+ ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
break;
}
- dm_conn_state = to_dm_connector_state(new_con_state);
+ dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
+ dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
new_stream = create_stream_for_sink(aconnector,
&new_crtc_state->mode,
- dm_conn_state);
+ dm_new_conn_state);
/*
* we can have no stream on ACTION_SET if a display
@@ -4695,20 +4699,30 @@ next_crtc:
* We want to do dc stream updates that do not require a
* full modeset below.
*/
- if (!enable || !aconnector || modereset_required(new_crtc_state))
+ if (!(enable && aconnector && new_crtc_state->enable &&
+ new_crtc_state->active))
continue;
/*
* Given above conditions, the dc state cannot be NULL because:
- * 1. We're attempting to enable a CRTC. Which has a...
- * 2. Valid connector attached, and
- * 3. User does not want to reset it (disable or mark inactive,
- * which can happen on a CRTC that's already disabled).
- * => It currently exists.
+ * 1. We're in the process of enabling CRTCs (just been added
+ * to the dc context, or already is on the context)
+ * 2. Has a valid connector attached, and
+ * 3. Is currently active and enabled.
+ * => The dc stream state currently exists.
*/
BUG_ON(dm_new_crtc_state->stream == NULL);
- /* Color managment settings */
- if (dm_new_crtc_state->base.color_mgmt_changed) {
+ /* Scaling or underscan settings */
+ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
+ update_stream_scaling_settings(
+ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.color_mgmt_changed ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index ec8d0006ef7c..3c136f2b954f 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2077,7 +2077,7 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
return ret;
}
-void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
+void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
{
mutex_lock(&hdmi->mutex);
@@ -2103,13 +2103,6 @@ void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
}
mutex_unlock(&hdmi->mutex);
}
-
-void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense)
-{
- struct dw_hdmi *hdmi = dev_get_drvdata(dev);
-
- __dw_hdmi_setup_rx_sense(hdmi, hpd, rx_sense);
-}
EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense);
static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
@@ -2145,9 +2138,9 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
*/
if (intr_stat &
(HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
- __dw_hdmi_setup_rx_sense(hdmi,
- phy_stat & HDMI_PHY_HPD,
- phy_stat & HDMI_PHY_RX_SENSE);
+ dw_hdmi_setup_rx_sense(hdmi,
+ phy_stat & HDMI_PHY_HPD,
+ phy_stat & HDMI_PHY_RX_SENSE);
if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0)
cec_notifier_set_phys_addr(hdmi->cec_notifier,
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index ffe14ec3e7f2..70ae1f232331 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1145,6 +1145,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
static const u16 psr_setup_time_us[] = {
PSR_SETUP_TIME(330),
PSR_SETUP_TIME(275),
+ PSR_SETUP_TIME(220),
PSR_SETUP_TIME(165),
PSR_SETUP_TIME(110),
PSR_SETUP_TIME(55),
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a1b9338736e3..c2c21d839727 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -716,7 +716,7 @@ static void remove_compat_control_link(struct drm_device *dev)
if (!minor)
return;
- name = kasprintf(GFP_KERNEL, "controlD%d", minor->index);
+ name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
if (!name)
return;
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 39ac15ce4702..9e2ae02f31e0 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
return -EINVAL;
/* overflow checks for 32bit size calculations */
- /* NOTE: DIV_ROUND_UP() can overflow */
+ if (args->bpp > U32_MAX - 8)
+ return -EINVAL;
cpp = DIV_ROUND_UP(args->bpp, 8);
- if (!cpp || cpp > 0xffffffffU / args->width)
+ if (cpp > U32_MAX / args->width)
return -EINVAL;
stride = cpp * args->width;
- if (args->height > 0xffffffffU / stride)
+ if (args->height > U32_MAX / stride)
return -EINVAL;
/* test for wrap-around */
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index e394799979a6..6d9b9453707c 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
return -ENOMEM;
filp->private_data = priv;
+ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
priv->filp = filp;
priv->pid = get_pid(task_pid(current));
priv->minor = minor;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d596a8302ca3..854bd51b9478 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -778,6 +778,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
+ if (!args->user_size)
+ return -EINVAL;
+
if (offset_in_page(args->user_ptr | args->user_size))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3ace929dd90f..3f502eef2431 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -4,6 +4,8 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/nospec.h>
+
#include "i915_drv.h"
#include "i915_query.h"
#include <uapi/drm/i915_drm.h>
@@ -100,7 +102,7 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < args->num_items; i++, user_item_ptr++) {
struct drm_i915_query_item item;
- u64 func_idx;
+ unsigned long func_idx;
int ret;
if (copy_from_user(&item, user_item_ptr, sizeof(item)))
@@ -109,12 +111,17 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (item.query_id == 0)
return -EINVAL;
+ if (overflows_type(item.query_id - 1, unsigned long))
+ return -EINVAL;
+
func_idx = item.query_id - 1;
- if (func_idx < ARRAY_SIZE(i915_query_funcs))
+ ret = -EINVAL;
+ if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
+ func_idx = array_index_nospec(func_idx,
+ ARRAY_SIZE(i915_query_funcs));
ret = i915_query_funcs[func_idx](dev_priv, &item);
- else
- ret = -EINVAL;
+ }
/* Only write the length back to userspace if they differ. */
if (ret != item.length && put_user(ret, &user_item_ptr->length))
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e6a8c0ee7df1..8a69a9275e28 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7326,6 +7326,9 @@ enum {
#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
+#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
+#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
+
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 4ba139c27fba..f7c25828d3bb 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1149,6 +1149,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+ /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
+ if (IS_GEN9_LP(dev_priv))
+ WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
+
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e3a5f673ff67..8704f7f8d072 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -884,6 +884,7 @@ static void execlists_submission_tasklet(unsigned long data)
head = execlists->csb_head;
tail = READ_ONCE(buf[write_idx]);
+ rmb(); /* Hopefully paired with a wmb() in HW */
}
GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
engine->name,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8691c86f579c..e125d16a1aa7 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -574,6 +574,36 @@ exit:
return NOTIFY_OK;
}
+static int
+intel_lvds_connector_register(struct drm_connector *connector)
+{
+ struct intel_lvds_connector *lvds = to_lvds_connector(connector);
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ lvds->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
+ DRM_DEBUG_KMS("lid notifier registration failed\n");
+ lvds->lid_notifier.notifier_call = NULL;
+ }
+
+ return 0;
+}
+
+static void
+intel_lvds_connector_unregister(struct drm_connector *connector)
+{
+ struct intel_lvds_connector *lvds = to_lvds_connector(connector);
+
+ if (lvds->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&lvds->lid_notifier);
+
+ intel_connector_unregister(connector);
+}
+
/**
* intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
@@ -586,9 +616,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
struct intel_lvds_connector *lvds_connector =
to_lvds_connector(connector);
- if (lvds_connector->lid_notifier.notifier_call)
- acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
-
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
kfree(lvds_connector->base.edid);
@@ -609,8 +636,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_connector_register,
- .early_unregister = intel_connector_unregister,
+ .late_register = intel_lvds_connector_register,
+ .early_unregister = intel_lvds_connector_unregister,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -827,6 +854,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Radiant P845",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
+ },
+ },
{ } /* terminating entry */
};
@@ -1150,12 +1185,6 @@ out:
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
- lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
- if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
- DRM_DEBUG_KMS("lid notifier registration failed\n");
- lvds_connector->lid_notifier.notifier_call = NULL;
- }
-
return;
failed:
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index a393095aac1a..c9ad45686e7a 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -529,7 +529,7 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
if (stat & HDMITX_TOP_INTR_HPD_RISE)
hpd_connected = true;
- dw_hdmi_setup_rx_sense(dw_hdmi->dev, hpd_connected,
+ dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected,
hpd_connected);
drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 68a40ae26f5b..1e2c931f6acf 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -82,7 +82,7 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
struct dispc_clock_info *dispc_cinfo)
{
int i;
- struct sdi_clk_calc_ctx ctx = { .sdi = sdi };
+ struct sdi_clk_calc_ctx ctx;
/*
* DSS fclk gives us very few possibilities, so finding a good pixel
@@ -95,6 +95,9 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
bool ok;
memset(&ctx, 0, sizeof(ctx));
+
+ ctx.sdi = sdi;
+
if (pclk > 1000 * i * i * i)
ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu);
else
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 3d2d3bbd1342..155ad840f3c5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -88,6 +88,9 @@ static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
const struct drm_display_mode *panel_mode;
struct drm_crtc_state *crtc_state;
+ if (!state->crtc)
+ return 0;
+
if (list_empty(&connector->modes)) {
dev_dbg(lvds->dev, "connector: empty modes list\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 94b99c90425a..7c95ed5c5cac 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -130,6 +130,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
struct vc4_file *vc4file = file->driver_priv;
vc4_perfmon_close_file(vc4file);
+ kfree(vc4file);
}
static const struct vm_operations_struct vc4_vm_ops = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 70e1a8820a7c..8b770a8e02cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1278,8 +1278,6 @@ static void vmw_master_drop(struct drm_device *dev,
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
-
- vmw_fb_refresh(dev_priv);
}
/**
@@ -1483,7 +1481,6 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_kms_resume(dev);
if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
- vmw_fb_refresh(dev_priv);
return -EBUSY;
}
@@ -1523,8 +1520,6 @@ static int vmw_pm_restore(struct device *kdev)
if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
- vmw_fb_refresh(dev_priv);
-
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index f34f368c1a2e..5fcbe1620d50 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -910,7 +910,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv);
int vmw_fb_close(struct vmw_private *dev_priv);
int vmw_fb_off(struct vmw_private *vmw_priv);
int vmw_fb_on(struct vmw_private *vmw_priv);
-void vmw_fb_refresh(struct vmw_private *vmw_priv);
/**
* Kernel modesetting - vmwgfx_kms.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index ba0cdb743c3e..54e300365a5c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -866,21 +866,13 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = true;
spin_unlock_irqrestore(&par->dirty.lock, flags);
-
- return 0;
-}
-/**
- * vmw_fb_refresh - Refresh fb display
- *
- * @vmw_priv: Pointer to device private
- *
- * Call into kms to show the fbdev display(s).
- */
-void vmw_fb_refresh(struct vmw_private *vmw_priv)
-{
- if (!vmw_priv->fb_info)
- return;
+ /*
+ * Need to reschedule a dirty update, because otherwise that's
+ * only done in dirty_mark() if the previous coalesced
+ * dirty region was empty.
+ */
+ schedule_delayed_work(&par->local_work, 0);
- vmw_fb_set_par(vmw_priv->fb_info);
+ return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index cdff99211602..21d746bdc922 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -329,8 +329,6 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
struct rpc_channel channel;
char *msg, *reply = NULL;
size_t reply_len = 0;
- int ret = 0;
-
if (!vmw_msg_enabled)
return -ENODEV;
@@ -344,15 +342,14 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
return -ENOMEM;
}
- if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
- vmw_send_msg(&channel, msg) ||
- vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
- vmw_close_channel(&channel)) {
- DRM_ERROR("Failed to get %s", guest_info_param);
+ if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
+ goto out_open;
- ret = -EINVAL;
- }
+ if (vmw_send_msg(&channel, msg) ||
+ vmw_recv_msg(&channel, (void *) &reply, &reply_len))
+ goto out_msg;
+ vmw_close_channel(&channel);
if (buffer && reply && reply_len > 0) {
/* Remove reply code, which are the first 2 characters of
* the reply
@@ -369,7 +366,17 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
kfree(reply);
kfree(msg);
- return ret;
+ return 0;
+
+out_msg:
+ vmw_close_channel(&channel);
+ kfree(reply);
+out_open:
+ *length = 0;
+ kfree(msg);
+ DRM_ERROR("Failed to get %s", guest_info_param);
+
+ return -EINVAL;
}
@@ -400,15 +407,22 @@ int vmw_host_log(const char *log)
return -ENOMEM;
}
- if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
- vmw_send_msg(&channel, msg) ||
- vmw_close_channel(&channel)) {
- DRM_ERROR("Failed to send log\n");
+ if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
+ goto out_open;
- ret = -EINVAL;
- }
+ if (vmw_send_msg(&channel, msg))
+ goto out_msg;
+ vmw_close_channel(&channel);
kfree(msg);
- return ret;
+ return 0;
+
+out_msg:
+ vmw_close_channel(&channel);
+out_open:
+ kfree(msg);
+ DRM_ERROR("Failed to send log\n");
+
+ return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 557a033fb610..8545488aa0cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -135,17 +135,24 @@
#else
-/* In the 32-bit version of this macro, we use "m" because there is no
- * more register left for bp
+/*
+ * In the 32-bit version of this macro, we store bp in a memory location
+ * because we've ran out of registers.
+ * Now we can't reference that memory location while we've modified
+ * %esp or %ebp, so we first push it on the stack, just before we push
+ * %ebp, and then when we need it we read it from the stack where we
+ * just pushed it.
*/
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
port_num, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
- asm volatile ("push %%ebp;" \
- "mov %12, %%ebp;" \
+ asm volatile ("push %12;" \
+ "push %%ebp;" \
+ "mov 0x04(%%esp), %%ebp;" \
"rep outsb;" \
- "pop %%ebp;" : \
+ "pop %%ebp;" \
+ "add $0x04, %%esp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
@@ -167,10 +174,12 @@
port_num, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
- asm volatile ("push %%ebp;" \
- "mov %12, %%ebp;" \
+ asm volatile ("push %12;" \
+ "push %%ebp;" \
+ "mov 0x04(%%esp), %%ebp;" \
"rep insb;" \
- "pop %%ebp" : \
+ "pop %%ebp;" \
+ "add $0x04, %%esp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 648f8127f65a..3d667e903beb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -482,6 +482,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
return ret;
}
+ vps->dmabuf_size = size;
+
/*
* TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped.
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f249a4428458..6ec307c93ece 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -272,7 +272,7 @@ config SENSORS_K8TEMP
config SENSORS_K10TEMP
tristate "AMD Family 10h+ temperature sensor"
- depends on X86 && PCI
+ depends on X86 && PCI && AMD_NB
help
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index d2cc55e21374..3b73dee6fdc6 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <asm/amd_nb.h>
#include <asm/processor.h>
MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
@@ -40,8 +41,8 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
#endif
-#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB
-#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0
+#ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#endif
/* CPUID function 0x80000001, ebx */
@@ -63,10 +64,12 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define NB_CAP_HTC 0x00000400
/*
- * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE
- * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature
- * Control]
+ * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
+ * and REG_REPORTED_TEMPERATURE have been moved to
+ * D0F0xBC_xD820_0C64 [Hardware Temperature Control]
+ * D0F0xBC_xD820_0CA4 [Reported Temperature Control]
*/
+#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64
#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
/* F17h M01h Access througn SMN */
@@ -74,6 +77,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
struct k10temp_data {
struct pci_dev *pdev;
+ void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
int temp_offset;
u32 temp_adjust_mask;
@@ -98,6 +102,11 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen Threadripper 1910", 10000 },
};
+static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
+{
+ pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
+}
+
static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
{
pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
@@ -114,6 +123,12 @@ static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
mutex_unlock(&nb_smu_ind_mutex);
}
+static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
+{
+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
+ F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval);
+}
+
static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
{
amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
@@ -122,8 +137,8 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
{
- amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
- F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
+ amd_smn_read(amd_pci_dev_to_node_id(pdev),
+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
}
static ssize_t temp1_input_show(struct device *dev,
@@ -160,8 +175,7 @@ static ssize_t show_temp_crit(struct device *dev,
u32 regval;
int value;
- pci_read_config_dword(data->pdev,
- REG_HARDWARE_THERMAL_CONTROL, &regval);
+ data->read_htcreg(data->pdev, &regval);
value = ((regval >> 16) & 0x7f) * 500 + 52000;
if (show_hyst)
value -= ((regval >> 24) & 0xf) * 500;
@@ -181,13 +195,18 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
struct pci_dev *pdev = data->pdev;
if (index >= 2) {
- u32 reg_caps, reg_htc;
+ u32 reg;
+
+ if (!data->read_htcreg)
+ return 0;
pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
- &reg_caps);
- pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL,
- &reg_htc);
- if (!(reg_caps & NB_CAP_HTC) || !(reg_htc & HTC_ENABLE))
+ &reg);
+ if (!(reg & NB_CAP_HTC))
+ return 0;
+
+ data->read_htcreg(data->pdev, &reg);
+ if (!(reg & HTC_ENABLE))
return 0;
}
return attr->mode;
@@ -268,11 +287,13 @@ static int k10temp_probe(struct pci_dev *pdev,
if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
boot_cpu_data.x86_model == 0x70)) {
+ data->read_htcreg = read_htcreg_nb_f15;
data->read_tempreg = read_tempreg_nb_f15;
} else if (boot_cpu_data.x86 == 0x17) {
data->temp_adjust_mask = 0x80000;
data->read_tempreg = read_tempreg_nb_f17;
} else {
+ data->read_htcreg = read_htcreg_pci;
data->read_tempreg = read_tempreg_pci;
}
@@ -302,7 +323,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index ede388309376..634f58042c77 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -733,8 +733,8 @@ err_nomem:
/* Reset the page to write-back before releasing */
set_memory_wb((unsigned long)win->block[i].bdesc, 1);
#endif
- dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc,
- win->block[i].addr);
+ dma_free_coherent(msc_dev(msc)->parent->parent, size,
+ win->block[i].bdesc, win->block[i].addr);
}
kfree(win);
@@ -769,7 +769,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
/* Reset the page to write-back before releasing */
set_memory_wb((unsigned long)win->block[i].bdesc, 1);
#endif
- dma_free_coherent(msc_dev(win->msc), PAGE_SIZE,
+ dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
win->block[i].bdesc, win->block[i].addr);
}
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 05386b76465e..10bcb5d73f90 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -19,6 +19,7 @@
#include <linux/stm.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/vmalloc.h>
#include "stm.h"
#include <uapi/linux/stm.h>
@@ -674,7 +675,7 @@ static void stm_device_release(struct device *dev)
{
struct stm_device *stm = to_stm_device(dev);
- kfree(stm);
+ vfree(stm);
}
int stm_register_device(struct device *parent, struct stm_data *stm_data,
@@ -691,7 +692,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
return -EINVAL;
nmasters = stm_data->sw_end - stm_data->sw_start + 1;
- stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
+ stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
if (!stm)
return -ENOMEM;
@@ -744,7 +745,7 @@ err_device:
/* matches device_initialize() above */
put_device(&stm->dev);
err_free:
- kfree(stm);
+ vfree(stm);
return err;
}
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index fd36c39ddf4e..0cdba29ae0a9 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -209,7 +209,10 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
i2c_dw_disable_int(dev);
/* Enable the adapter */
- __i2c_dw_enable_and_wait(dev, true);
+ __i2c_dw_enable(dev, true);
+
+ /* Dummy read to avoid the register getting stuck on Bay Trail */
+ dw_readl(dev, DW_IC_ENABLE_STATUS);
/* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 8c42ca7107b2..45ae3c025bf6 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -1,6 +1,6 @@
/*
* i2c-ocores.c: I2C bus driver for OpenCores I2C controller
- * (http://www.opencores.org/projects.cgi/web/i2c/overview).
+ * (https://opencores.org/project/i2c/overview)
*
* Peter Korsgaard <jacmet@sunsite.dk>
*
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 2aa0e83174c5..dae8ac618a52 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
* TODO: We could potentially loop and retry in the case
* of MSP_TWI_XFER_TIMEOUT.
*/
- return -1;
+ return -EIO;
}
- return 0;
+ return num;
}
static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index e4be86b3de9a..7235c7302bb7 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
}
mutex_unlock(&vb->lock);
}
- return 0;
+ return num;
error:
mutex_unlock(&vb->lock);
return error;
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index a9126b3cda61..7c3b4740b94b 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -445,10 +445,17 @@ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
msgs[1].buf = buffer;
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret < 0)
- dev_err(&client->adapter->dev, "i2c read failed\n");
- else
+ if (ret < 0) {
+ /* Getting a NACK is unfortunately normal with some DSTDs */
+ if (ret == -EREMOTEIO)
+ dev_dbg(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
+ data_len, client->addr, cmd, ret);
+ else
+ dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
+ data_len, client->addr, cmd, ret);
+ } else {
memcpy(data, buffer, data_len);
+ }
kfree(buffer);
return ret;
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 0e6bc631a1ca..8b2b72b93885 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -92,7 +92,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
struct request *rq;
int error;
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_MISC;
rq->special = (char *)pc;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index b52a7bdace52..5f178384876f 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -437,7 +437,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
bool delay = false;
rq = blk_get_request(drive->queue,
- write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
+ write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
ide_req(rq)->type = ATA_PRIV_PC;
rq->rq_flags |= rq_flags;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 2acca12b9c94..b1322400887b 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -304,7 +304,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
struct request *rq;
int ret;
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_MISC;
rq->rq_flags = RQF_QUIET;
blk_execute_rq(drive->queue, cd->disk, rq, 0);
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 4e20747af32e..f4f8afdf8bbe 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -166,7 +166,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
if (!(setting->flags & DS_SYNC))
return setting->set(drive, arg);
- rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_MISC;
scsi_req(rq)->cmd_len = 5;
scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index f1a7c58fe418..e3b4e659082d 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -478,7 +478,7 @@ static int set_multcount(ide_drive_t *drive, int arg)
if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
return -EBUSY;
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_TASKFILE;
drive->mult_req = arg;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 3661abb16a5f..af5119a73689 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -125,7 +125,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
if (NULL == (void *) arg) {
struct request *rq;
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_TASKFILE;
blk_execute_rq(drive->queue, NULL, rq, 0);
err = scsi_req(rq)->result ? -EIO : 0;
@@ -222,7 +222,7 @@ static int generic_drive_reset(ide_drive_t *drive)
struct request *rq;
int ret = 0;
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_MISC;
scsi_req(rq)->cmd_len = 1;
scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 6465bcc7cea6..622f0edb3945 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -32,7 +32,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
}
spin_unlock_irq(&hwif->lock);
- rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
scsi_req(rq)->cmd_len = 1;
ide_req(rq)->type = ATA_PRIV_MISC;
@@ -47,7 +47,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
* Make sure that *some* command is sent to the drive after the
* timeout has expired, so power management will be reenabled.
*/
- rq = blk_get_request(q, REQ_OP_DRV_IN, GFP_NOWAIT);
+ rq = blk_get_request(q, REQ_OP_DRV_IN, BLK_MQ_REQ_NOWAIT);
if (IS_ERR(rq))
goto out;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index ad8a125defdd..59217aa1d1fb 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -19,7 +19,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND;
@@ -90,8 +90,7 @@ int generic_ide_resume(struct device *dev)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request_flags(drive->queue, REQ_OP_DRV_IN,
- BLK_MQ_REQ_PREEMPT);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index ac2c6820c65a..aee7b46d2330 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -854,7 +854,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
BUG_ON(size < 0 || size % tape->blk_size);
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_MISC;
scsi_req(rq)->cmd[13] = cmd;
rq->rq_disk = tape->disk;
@@ -862,7 +862,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
if (size) {
ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
- __GFP_RECLAIM);
+ GFP_NOIO);
if (ret)
goto out_put;
}
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index abe0822dd429..c034cd965831 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -431,7 +431,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
rq = blk_get_request(drive->queue,
(cmd->tf_flags & IDE_TFLAG_WRITE) ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_TASKFILE;
/*
@@ -442,7 +442,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
*/
if (nsect) {
error = blk_rq_map_kern(drive->queue, rq, buf,
- nsect * SECTOR_SIZE, __GFP_RECLAIM);
+ nsect * SECTOR_SIZE, GFP_NOIO);
if (error)
goto put_req;
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 15606f237480..9da79070357c 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC
depends on ARCH_AT91 || COMPILE_TEST
depends on HAS_IOMEM
depends on HAS_DMA
+ select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Atmel SAMA5D2 ADC which is
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 801afb61310b..d4bbe5b53318 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39,
static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
33, 0, 17, 16, 12, 10, 8, 6, 4};
-static ssize_t ad7793_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7793_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n",
- st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
-}
-
-static ssize_t ad7793_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7793_state *st = iio_priv(indio_dev);
- long lval;
- int i, ret;
-
- ret = kstrtol(buf, 10, &lval);
- if (ret)
- return ret;
-
- if (lval == 0)
- return -EINVAL;
-
- for (i = 0; i < 16; i++)
- if (lval == st->chip_info->sample_freq_avail[i])
- break;
- if (i == 16)
- return -EINVAL;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- st->mode &= ~AD7793_MODE_RATE(-1);
- st->mode |= AD7793_MODE_RATE(i);
- ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
- iio_device_release_direct_mode(indio_dev);
-
- return len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ad7793_read_frequency,
- ad7793_write_frequency);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
"470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
@@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
ad7793_show_scale_available, NULL, 0);
static struct attribute *ad7793_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
NULL
@@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = {
};
static struct attribute *ad7797_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
NULL
};
@@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
*val -= offset;
}
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->chip_info
+ ->sample_freq_avail[AD7793_MODE_RATE(st->mode)];
+ return IIO_VAL_INT;
}
return -EINVAL;
}
@@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
break;
}
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val) {
+ ret = -EINVAL;
+ break;
+ }
+
+ for (i = 0; i < 16; i++)
+ if (val == st->chip_info->sample_freq_avail[i])
+ break;
+
+ if (i == 16) {
+ ret = -EINVAL;
+ break;
+ }
+
+ st->mode &= ~AD7793_MODE_RATE(-1);
+ st->mode |= AD7793_MODE_RATE(i);
+ ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode),
+ st->mode);
+ break;
default:
ret = -EINVAL;
}
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 4eff8351ce29..8729d6524b4d 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -333,6 +333,27 @@ static const struct iio_chan_spec at91_adc_channels[] = {
+ AT91_SAMA5D2_DIFF_CHAN_CNT + 1),
};
+static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ if (indio_dev->channels[i].scan_index == chan)
+ return i;
+ }
+ return -EINVAL;
+}
+
+static inline struct iio_chan_spec const *
+at91_adc_chan_get(struct iio_dev *indio_dev, int chan)
+{
+ int index = at91_adc_chan_xlate(indio_dev, chan);
+
+ if (index < 0)
+ return NULL;
+ return indio_dev->channels + index;
+}
+
static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
{
struct iio_dev *indio = iio_trigger_get_drvdata(trig);
@@ -350,8 +371,10 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
- struct iio_chan_spec const *chan = indio->channels + bit;
+ struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
+ if (!chan)
+ continue;
if (state) {
at91_adc_writel(st, AT91_SAMA5D2_CHER,
BIT(chan->channel));
@@ -448,7 +471,11 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev)
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->num_channels) {
- struct iio_chan_spec const *chan = indio_dev->channels + bit;
+ struct iio_chan_spec const *chan =
+ at91_adc_chan_get(indio_dev, bit);
+
+ if (!chan)
+ continue;
st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8;
}
@@ -526,8 +553,11 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
*/
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->num_channels) {
- struct iio_chan_spec const *chan = indio_dev->channels + bit;
+ struct iio_chan_spec const *chan =
+ at91_adc_chan_get(indio_dev, bit);
+ if (!chan)
+ continue;
if (st->dma_st.dma_chan)
at91_adc_readl(st, chan->address);
}
@@ -587,8 +617,11 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->num_channels) {
- struct iio_chan_spec const *chan = indio_dev->channels + bit;
+ struct iio_chan_spec const *chan =
+ at91_adc_chan_get(indio_dev, bit);
+ if (!chan)
+ continue;
st->buffer[i] = at91_adc_readl(st, chan->address);
i++;
}
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 01422d11753c..b28a716a23b2 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -144,6 +144,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
* Leave as soon as if exact resolution if reached.
* Otherwise the higher resolution below 32 bits is kept.
*/
+ fl->res = 0;
for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
if (fast)
@@ -193,7 +194,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
}
}
- if (!fl->fosr)
+ if (!fl->res)
return -EINVAL;
return 0;
@@ -770,7 +771,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
- unsigned int spi_freq = adc->spi_freq;
+ unsigned int spi_freq;
int ret = -EINVAL;
switch (mask) {
@@ -784,8 +785,18 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SAMP_FREQ:
if (!val)
return -EINVAL;
- if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+
+ switch (ch->src) {
+ case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
spi_freq = adc->dfsdm->spi_master_freq;
+ break;
+ case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING:
+ case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING:
+ spi_freq = adc->dfsdm->spi_master_freq / 2;
+ break;
+ default:
+ spi_freq = adc->spi_freq;
+ }
if (spi_freq % val)
dev_warn(&indio_dev->dev,
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 05e0c353e089..b32bf57910ca 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
* Should be used as the set_length callback for iio_buffer_access_ops
* struct for DMA buffers.
*/
-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length)
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
{
/* Avoid an invalid state */
if (length < 2)
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index 047fe757ab97..70c302a93d7f 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -22,11 +22,18 @@ struct iio_kfifo {
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
- int bytes_per_datum, int length)
+ size_t bytes_per_datum, unsigned int length)
{
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
+ /*
+ * Make sure we don't overflow an unsigned int after kfifo rounds up to
+ * the next power of 2.
+ */
+ if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
+ return -EINVAL;
+
return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
bytes_per_datum, GFP_KERNEL);
}
@@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
return 0;
}
-static int iio_set_length_kfifo(struct iio_buffer *r, int length)
+static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
{
/* Avoid an invalid state */
if (length < 2)
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index cfb6588565ba..4905a997a7ec 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -178,14 +178,14 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
#ifdef CONFIG_PM
int ret;
- atomic_set(&st->user_requested_state, state);
-
if (atomic_add_unless(&st->runtime_pm_enable, 1, 1))
pm_runtime_enable(&st->pdev->dev);
- if (state)
+ if (state) {
+ atomic_inc(&st->user_requested_state);
ret = pm_runtime_get_sync(&st->pdev->dev);
- else {
+ } else {
+ atomic_dec(&st->user_requested_state);
pm_runtime_mark_last_busy(&st->pdev->dev);
pm_runtime_use_autosuspend(&st->pdev->dev);
ret = pm_runtime_put_autosuspend(&st->pdev->dev);
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index fb2d347f760f..ecc55e98ddd3 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -502,7 +502,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
return -EINVAL;
if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
- return -EAGAIN;
+ return -EINVAL;
memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
if (attr) {
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 9a4e899d94b3..2b6c9b516070 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
- umem->pid = get_task_pid(current, PIDTYPE_PID);
/*
* We ask for writable memory if any of the following
* access flags are set. "Local write" and "remote write"
@@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
if (access & IB_ACCESS_ON_DEMAND) {
- put_pid(umem->pid);
ret = ib_umem_odp_get(context, umem, access);
if (ret) {
kfree(umem);
@@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) {
- put_pid(umem->pid);
kfree(umem);
return ERR_PTR(-ENOMEM);
}
@@ -231,7 +228,6 @@ out:
if (ret < 0) {
if (need_release)
__ib_umem_release(context->device, umem, 0);
- put_pid(umem->pid);
kfree(umem);
} else
current->mm->pinned_vm = locked;
@@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem)
__ib_umem_release(umem->context->device, umem, 1);
- task = get_pid_task(umem->pid, PIDTYPE_PID);
- put_pid(umem->pid);
+ task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
if (!task)
goto out;
mm = get_task_mm(task);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index f6c739ec8b62..20b9f31052bf 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -185,12 +185,65 @@ static void bnxt_re_shutdown(void *p)
bnxt_re_ib_unreg(rdev, false);
}
+static void bnxt_re_stop_irq(void *handle)
+{
+ struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_qplib_nq *nq;
+ int indx;
+
+ for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
+ nq = &rdev->nq[indx - 1];
+ bnxt_qplib_nq_stop_irq(nq, false);
+ }
+
+ bnxt_qplib_rcfw_stop_irq(rcfw, false);
+}
+
+static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+{
+ struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
+ struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_qplib_nq *nq;
+ int indx, rc;
+
+ if (!ent) {
+ /* Not setting the f/w timeout bit in rcfw.
+ * During the driver unload the first command
+ * to f/w will timeout and that will set the
+ * timeout bit.
+ */
+ dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
+ return;
+ }
+
+ /* Vectors may change after restart, so update with new vectors
+ * in device sctructure.
+ */
+ for (indx = 0; indx < rdev->num_msix; indx++)
+ rdev->msix_entries[indx].vector = ent[indx].vector;
+
+ bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
+ false);
+ for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
+ nq = &rdev->nq[indx - 1];
+ rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
+ msix_ent[indx].vector, false);
+ if (rc)
+ dev_warn(rdev_to_dev(rdev),
+ "Failed to reinit NQ index %d\n", indx - 1);
+ }
+}
+
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
.ulp_async_notifier = NULL,
.ulp_stop = bnxt_re_stop,
.ulp_start = bnxt_re_start,
.ulp_sriov_config = bnxt_re_sriov_config,
- .ulp_shutdown = bnxt_re_shutdown
+ .ulp_shutdown = bnxt_re_shutdown,
+ .ulp_irq_stop = bnxt_re_stop_irq,
+ .ulp_irq_restart = bnxt_re_start_irq
};
/* RoCE -> Net driver */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 3a78faba8d91..50d8f1fc98d5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -336,22 +336,32 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
+{
+ tasklet_disable(&nq->worker);
+ /* Mask h/w interrupt */
+ NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+ /* Sync with last running IRQ handler */
+ synchronize_irq(nq->vector);
+ if (kill)
+ tasklet_kill(&nq->worker);
+ if (nq->requested) {
+ irq_set_affinity_hint(nq->vector, NULL);
+ free_irq(nq->vector, nq);
+ nq->requested = false;
+ }
+}
+
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
{
if (nq->cqn_wq) {
destroy_workqueue(nq->cqn_wq);
nq->cqn_wq = NULL;
}
+
/* Make sure the HW is stopped! */
- synchronize_irq(nq->vector);
- tasklet_disable(&nq->worker);
- tasklet_kill(&nq->worker);
+ bnxt_qplib_nq_stop_irq(nq, true);
- if (nq->requested) {
- irq_set_affinity_hint(nq->vector, NULL);
- free_irq(nq->vector, nq);
- nq->requested = false;
- }
if (nq->bar_reg_iomem)
iounmap(nq->bar_reg_iomem);
nq->bar_reg_iomem = NULL;
@@ -361,6 +371,40 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
nq->vector = 0;
}
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ int msix_vector, bool need_init)
+{
+ int rc;
+
+ if (nq->requested)
+ return -EFAULT;
+
+ nq->vector = msix_vector;
+ if (need_init)
+ tasklet_init(&nq->worker, bnxt_qplib_service_nq,
+ (unsigned long)nq);
+ else
+ tasklet_enable(&nq->worker);
+
+ snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
+ rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
+ if (rc)
+ return rc;
+
+ cpumask_clear(&nq->mask);
+ cpumask_set_cpu(nq_indx, &nq->mask);
+ rc = irq_set_affinity_hint(nq->vector, &nq->mask);
+ if (rc) {
+ dev_warn(&nq->pdev->dev,
+ "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
+ nq->vector, nq_indx);
+ }
+ nq->requested = true;
+ NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+
+ return rc;
+}
+
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int nq_idx, int msix_vector, int bar_reg_offset,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
@@ -372,41 +416,17 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
resource_size_t nq_base;
int rc = -1;
- nq->pdev = pdev;
- nq->vector = msix_vector;
if (cqn_handler)
nq->cqn_handler = cqn_handler;
if (srqn_handler)
nq->srqn_handler = srqn_handler;
- tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
-
/* Have a task to schedule CQ notifiers in post send case */
nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
if (!nq->cqn_wq)
- goto fail;
-
- nq->requested = false;
- memset(nq->name, 0, 32);
- sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
- rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
- if (rc) {
- dev_err(&nq->pdev->dev,
- "Failed to request IRQ for NQ: %#x", rc);
- goto fail;
- }
-
- cpumask_clear(&nq->mask);
- cpumask_set_cpu(nq_idx, &nq->mask);
- rc = irq_set_affinity_hint(nq->vector, &nq->mask);
- if (rc) {
- dev_warn(&nq->pdev->dev,
- "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
- nq->vector, nq_idx);
- }
+ return -ENOMEM;
- nq->requested = true;
nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
nq->bar_reg_off = bar_reg_offset;
nq_base = pci_resource_start(pdev, nq->bar_reg);
@@ -419,7 +439,13 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
rc = -ENOMEM;
goto fail;
}
- NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+
+ rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
+ if (rc) {
+ dev_err(&nq->pdev->dev,
+ "QPLIB: Failed to request irq for nq-idx %d", nq_idx);
+ goto fail;
+ }
return 0;
fail:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index ade9f13c0fd1..72352ca80ace 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -467,7 +467,10 @@ struct bnxt_qplib_nq_work {
struct bnxt_qplib_cq *cq;
};
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ int msix_vector, bool need_init);
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int nq_idx, int msix_vector, int bar_reg_offset,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 80027a494730..2852d350ada1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -582,19 +582,29 @@ fail:
return -ENOMEM;
}
-void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
{
- unsigned long indx;
-
- /* Make sure the HW channel is stopped! */
- synchronize_irq(rcfw->vector);
tasklet_disable(&rcfw->worker);
- tasklet_kill(&rcfw->worker);
+ /* Mask h/w interrupts */
+ CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
+ rcfw->creq.max_elements);
+ /* Sync with last running IRQ-handler */
+ synchronize_irq(rcfw->vector);
+ if (kill)
+ tasklet_kill(&rcfw->worker);
if (rcfw->requested) {
free_irq(rcfw->vector, rcfw);
rcfw->requested = false;
}
+}
+
+void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+{
+ unsigned long indx;
+
+ bnxt_qplib_rcfw_stop_irq(rcfw, true);
+
if (rcfw->cmdq_bar_reg_iomem)
iounmap(rcfw->cmdq_bar_reg_iomem);
rcfw->cmdq_bar_reg_iomem = NULL;
@@ -614,6 +624,31 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
rcfw->vector = 0;
}
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ bool need_init)
+{
+ int rc;
+
+ if (rcfw->requested)
+ return -EFAULT;
+
+ rcfw->vector = msix_vector;
+ if (need_init)
+ tasklet_init(&rcfw->worker,
+ bnxt_qplib_service_creq, (unsigned long)rcfw);
+ else
+ tasklet_enable(&rcfw->worker);
+ rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
+ "bnxt_qplib_creq", rcfw);
+ if (rc)
+ return rc;
+ rcfw->requested = true;
+ CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
+ rcfw->creq.max_elements);
+
+ return 0;
+}
+
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
@@ -675,27 +710,17 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
rcfw->creq_qp_event_processed = 0;
rcfw->creq_func_event_processed = 0;
- rcfw->vector = msix_vector;
if (aeq_handler)
rcfw->aeq_handler = aeq_handler;
+ init_waitqueue_head(&rcfw->waitq);
- tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
- (unsigned long)rcfw);
-
- rcfw->requested = false;
- rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
- "bnxt_qplib_creq", rcfw);
+ rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
if (rc) {
dev_err(&rcfw->pdev->dev,
"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
bnxt_qplib_disable_rcfw_channel(rcfw);
return rc;
}
- rcfw->requested = true;
-
- init_waitqueue_head(&rcfw->waitq);
-
- CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
init.cmdq_size_cmdq_lvl = cpu_to_le16(
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index c7cce2e4185e..46416dfe8830 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -195,7 +195,10 @@ struct bnxt_qplib_rcfw {
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ bool need_init);
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index e90f2fd8dc16..1445918e3239 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -489,10 +489,10 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
err_dereg_mem:
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
-err_free_wr_wait:
- c4iw_put_wr_wait(mhp->wr_waitp);
err_free_skb:
kfree_skb(mhp->dereg_skb);
+err_free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
err_free_mhp:
kfree(mhp);
return ERR_PTR(ret);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e6a60fa59f2b..e6bdd0c1e80a 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -5944,6 +5944,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
u64 status;
u32 sw_index;
int i = 0;
+ unsigned long irq_flags;
sw_index = dd->hw_to_sw[hw_context];
if (sw_index >= dd->num_send_contexts) {
@@ -5953,10 +5954,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
return;
}
sci = &dd->send_contexts[sw_index];
+ spin_lock_irqsave(&dd->sc_lock, irq_flags);
sc = sci->sc;
if (!sc) {
dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
sw_index, hw_context);
+ spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
return;
}
@@ -5978,6 +5981,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
*/
if (sc->type != SC_USER)
queue_work(dd->pport->hfi1_wq, &sc->halt_work);
+ spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
/*
* Update the counters for the corresponding status bits.
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 14734d0d0b76..3a485f50fede 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -377,6 +377,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
hr_cq->set_ci_db = hr_cq->db.db_record;
*hr_cq->set_ci_db = 0;
+ hr_cq->db_en = 1;
}
/* Init mmt table and write buff address to mtt table */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 47e1b6ac1e1a..8013d69c5ac4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -722,6 +722,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
free_mr->mr_free_pd = to_hr_pd(pd);
free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
free_mr->mr_free_pd->ibpd.uobject = NULL;
+ free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
@@ -1036,7 +1037,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
do {
ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
- if (ret < 0) {
+ if (ret < 0 && hr_qp) {
dev_err(dev,
"(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
hr_qp->qpn, ret, hr_mr->key, ne);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 25916e8522ed..1f0965bb64ee 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -142,8 +142,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned long flags;
unsigned int ind;
void *wqe = NULL;
- u32 tmp_len = 0;
bool loopback;
+ u32 tmp_len;
int ret = 0;
u8 *smac;
int nreq;
@@ -189,6 +189,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
owner_bit =
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+ tmp_len = 0;
/* Corresponding to the QP type, wqe process separately */
if (ibqp->qp_type == IB_QPT_GSI) {
@@ -547,16 +548,20 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
if (i < hr_qp->rq.max_gs) {
- dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
- dseg[i].addr = 0;
+ dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ dseg->addr = 0;
}
/* rq support inline data */
- sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
- hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge;
- for (i = 0; i < wr->num_sge; i++) {
- sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
- sge_list[i].len = wr->sg_list[i].length;
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
+ hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
+ (u32)wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ sge_list[i].addr =
+ (void *)(u64)wr->sg_list[i].addr;
+ sge_list[i].len = wr->sg_list[i].length;
+ }
}
hr_qp->rq.wrid[ind] = wr->wr_id;
@@ -613,6 +618,8 @@ static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
ring->desc_num * sizeof(struct hns_roce_cmq_desc),
DMA_BIDIRECTIONAL);
+
+ ring->desc_dma_addr = 0;
kfree(ring->desc);
}
@@ -1081,6 +1088,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
if (ret) {
dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
ret);
+ return ret;
}
/* Get pf resource owned by every pf */
@@ -1372,6 +1380,8 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
mr->type == MR_TYPE_MR ? 0 : 1);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
+ 1);
mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
@@ -2169,6 +2179,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
/*
@@ -2281,7 +2292,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
qpc_mask->rq_db_record_addr = 0;
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
+ (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
@@ -4703,6 +4715,8 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
{0, }
};
+MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
+
static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 9d48bc07a9e6..96fb6a9ed93c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -199,7 +199,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
memset(props, 0, sizeof(*props));
- props->sys_image_guid = cpu_to_be32(hr_dev->sys_image_guid);
+ props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
props->max_mr_size = (u64)(~(0ULL));
props->page_size_cap = hr_dev->caps.page_size_cap;
props->vendor_id = hr_dev->vendor_id;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index d4aad34c21e2..baaf906f7c2e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -660,6 +660,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_rq_sge_list;
}
*hr_qp->rdb.db_record = 0;
+ hr_qp->rdb_en = 1;
}
/* Allocate QP buf */
@@ -955,7 +956,14 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
- ret = 0;
+ if (hr_dev->caps.min_wqes) {
+ ret = -EPERM;
+ dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
+ new_state);
+ } else {
+ ret = 0;
+ }
+
goto out;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index d5d8c1be345a..2f2b4426ded7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -207,6 +207,7 @@ struct i40iw_msix_vector {
u32 irq;
u32 cpu_affinity;
u32 ceq_id;
+ cpumask_t mask;
};
struct l2params_work {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 4cfa8f4647e2..f7c6fd9ff6e2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -2093,7 +2093,7 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
if (netif_is_bond_slave(netdev))
netdev = netdev_master_upper_dev_get(netdev);
- neigh = dst_neigh_lookup(dst, &dst_addr);
+ neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
rcu_read_lock();
if (neigh) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 6139836fb533..c9f62ca7643c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -331,7 +331,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
switch (info->ae_id) {
case I40IW_AE_LLP_FIN_RECEIVED:
if (qp->term_flags)
- continue;
+ break;
if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
@@ -360,7 +360,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
break;
case I40IW_AE_LLP_CONNECTION_RESET:
if (atomic_read(&iwqp->close_timer_started))
- continue;
+ break;
i40iw_cm_disconn(iwqp);
break;
case I40IW_AE_QP_SUSPEND_COMPLETE:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 9cd0d3ef9057..05001e6da1f8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -687,7 +687,6 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
struct i40iw_msix_vector *msix_vec)
{
enum i40iw_status_code status;
- cpumask_t mask;
if (iwdev->msix_shared && !ceq_id) {
tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
@@ -697,9 +696,9 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
}
- cpumask_clear(&mask);
- cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
- irq_set_affinity_hint(msix_vec->irq, &mask);
+ cpumask_clear(&msix_vec->mask);
+ cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
+ irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
if (status) {
i40iw_pr_err("ceq irq config fail\n");
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 40e4f5ab2b46..68679ad4c6da 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -394,6 +394,7 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
list_for_each_entry(iwpbl, pbl_list, list) {
if (iwpbl->user_base == va) {
+ iwpbl->on_list = false;
list_del(&iwpbl->list);
return iwpbl;
}
@@ -614,6 +615,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
return ERR_PTR(-ENOMEM);
iwqp = (struct i40iw_qp *)mem;
+ iwqp->allocated_buffer = mem;
qp = &iwqp->sc_qp;
qp->back_qp = (void *)iwqp;
qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
@@ -642,7 +644,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
goto error;
}
- iwqp->allocated_buffer = mem;
iwqp->iwdev = iwdev;
iwqp->iwpd = iwpd;
iwqp->ibqp.qp_num = qp_num;
@@ -1898,6 +1899,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
goto error;
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+ iwpbl->on_list = true;
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
break;
case IW_MEMREG_TYPE_CQ:
@@ -1908,6 +1910,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+ iwpbl->on_list = true;
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break;
case IW_MEMREG_TYPE_MEM:
@@ -2045,14 +2048,18 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr,
switch (iwmr->type) {
case IW_MEMREG_TYPE_CQ:
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
- if (!list_empty(&ucontext->cq_reg_mem_list))
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
list_del(&iwpbl->list);
+ }
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break;
case IW_MEMREG_TYPE_QP:
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
- if (!list_empty(&ucontext->qp_reg_mem_list))
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
list_del(&iwpbl->list);
+ }
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
break;
default:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 9067443cd311..76cf173377ab 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -78,6 +78,7 @@ struct i40iw_pbl {
};
bool pbl_allocated;
+ bool on_list;
u64 user_base;
struct i40iw_pble_alloc pble_alloc;
struct i40iw_mr *iwmr;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b4d8ff8ab807..69716a7ea993 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2416,7 +2416,7 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
}
-static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
+static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
bool inner)
{
if (inner) {
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 87b7c1be2a11..2193dc1765fb 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -484,11 +484,6 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
return 1;
}
-static int first_med_bfreg(void)
-{
- return 1;
-}
-
enum {
/* this is the first blue flame register in the array of bfregs assigned
* to a processes. Since we do not use it for blue flame but rather
@@ -514,6 +509,12 @@ static int num_med_bfreg(struct mlx5_ib_dev *dev,
return n >= 0 ? n : 0;
}
+static int first_med_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
+{
+ return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
+}
+
static int first_hi_bfreg(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi)
{
@@ -541,10 +542,13 @@ static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi)
{
- int minidx = first_med_bfreg();
+ int minidx = first_med_bfreg(dev, bfregi);
int i;
- for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) {
+ if (minidx < 0)
+ return minidx;
+
+ for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
if (bfregi->count[i] < bfregi->count[minidx])
minidx = i;
if (!bfregi->count[minidx])
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 7d3763b2e01c..3f9afc02d166 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -401,49 +401,47 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
struct qedr_dev *dev = get_qedr_dev(context->device);
- unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
- u64 unmapped_db = dev->db_phys_addr;
+ unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
unsigned long len = (vma->vm_end - vma->vm_start);
- int rc = 0;
- bool found;
+ unsigned long dpi_start;
+
+ dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
DP_DEBUG(dev, QEDR_MSG_INIT,
- "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
- vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
- if (vma->vm_start & (PAGE_SIZE - 1)) {
- DP_ERR(dev, "Vma_start not page aligned = %ld\n",
- vma->vm_start);
+ "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
+ (void *)vma->vm_start, (void *)vma->vm_end,
+ (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
+
+ if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
+ DP_ERR(dev,
+ "failed mmap, adrresses must be page aligned: start=0x%pK, end=0x%pK\n",
+ (void *)vma->vm_start, (void *)vma->vm_end);
return -EINVAL;
}
- found = qedr_search_mmap(ucontext, vm_page, len);
- if (!found) {
- DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
+ if (!qedr_search_mmap(ucontext, phys_addr, len)) {
+ DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
vma->vm_pgoff);
return -EINVAL;
}
- DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
-
- if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
- dev->db_size))) {
- DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
- if (vma->vm_flags & VM_READ) {
- DP_ERR(dev, "Trying to map doorbell bar for read\n");
- return -EPERM;
- }
-
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ if (phys_addr < dpi_start ||
+ ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
+ DP_ERR(dev,
+ "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
+ (void *)phys_addr, (void *)dpi_start,
+ ucontext->dpi_size);
+ return -EINVAL;
+ }
- rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- PAGE_SIZE, vma->vm_page_prot);
- } else {
- DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
- rc = remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff, len, vma->vm_page_prot);
+ if (vma->vm_flags & VM_READ) {
+ DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
+ return -EINVAL;
}
- DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
- return rc;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
+ vma->vm_page_prot);
}
struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 2cb52fd48cf1..73a00a1c06f6 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -761,7 +761,6 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
unsigned int mask;
unsigned int length = 0;
int i;
- int must_sched;
while (wr) {
mask = wr_opcode_mask(wr->opcode, qp);
@@ -791,14 +790,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
wr = wr->next;
}
- /*
- * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
- * and the requester call ip_local_out_sk() that takes spin_lock_bh.
- */
- must_sched = (qp_type(qp) == IB_QPT_GSI) ||
- (queue_count(qp->sq.queue) > 1);
-
- rxe_run_task(&qp->req.task, must_sched);
+ rxe_run_task(&qp->req.task, 1);
if (unlikely(qp->req.state == QP_STATE_ERROR))
rxe_run_task(&qp->comp.task, 1);
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index 29f99529b187..cfcb32559925 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
bool max_baseline, u8 *value)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
max_baseline ?
@@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
bool iap, u8 *version)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
iap ? ETP_SMBUS_IAP_VERSION_CMD :
@@ -170,7 +170,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
u8 *clickpad)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
ETP_SMBUS_SM_VERSION_CMD, val);
@@ -188,7 +188,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
ETP_SMBUS_UNIQUEID_CMD, val);
@@ -205,7 +205,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
bool iap, u16 *csum)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
@@ -226,7 +226,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
{
int ret;
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
if (ret != 3) {
@@ -246,7 +246,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
{
int ret;
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
if (ret != 3) {
@@ -267,7 +267,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
{
int ret;
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
if (ret != 3) {
@@ -294,7 +294,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
{
int error;
u16 constant;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
if (error < 0) {
@@ -345,7 +345,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
int len;
int error;
enum tp_mode mode;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
u16 password;
@@ -419,7 +419,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
struct device *dev = &client->dev;
int error;
u16 result;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
/*
* Due to the limitation of smbus protocol limiting
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 60f2c463d1cc..a9591d278145 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = {
"LEN0048", /* X1 Carbon 3 */
"LEN0046", /* X250 */
"LEN004a", /* W541 */
+ "LEN0071", /* T480 */
+ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
+ "LEN0073", /* X1 Carbon G5 (Elantech) */
+ "LEN0092", /* X1 Carbon 6 */
+ "LEN0096", /* X280 */
+ "LEN0097", /* X280 -> ALPS trackpoint */
"LEN200f", /* T450s */
NULL
};
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
index 944a7f338099..1b25d8bc153a 100644
--- a/drivers/isdn/hardware/eicon/diva.c
+++ b/drivers/isdn/hardware/eicon/diva.c
@@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void)
** Receive and process command from user mode utility
*/
void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
- int length,
+ int length, void *mptr,
divas_xdi_copy_from_user_fn_t cp_fn)
{
- diva_xdi_um_cfg_cmd_t msg;
+ diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
diva_os_xdi_adapter_t *a = NULL;
diva_os_spin_lock_magic_t old_irql;
struct list_head *tmp;
@@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
length, sizeof(diva_xdi_um_cfg_cmd_t)))
return NULL;
}
- if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) {
+ if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
DBG_ERR(("A: A(?) open, write error"))
return NULL;
}
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
list_for_each(tmp, &adapter_queue) {
a = list_entry(tmp, diva_os_xdi_adapter_t, link);
- if (a->controller == (int)msg.adapter)
+ if (a->controller == (int)msg->adapter)
break;
a = NULL;
}
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
if (!a) {
- DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter))
+ DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
}
return (a);
@@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
int
diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
- int length, divas_xdi_copy_from_user_fn_t cp_fn)
+ int length, void *mptr,
+ divas_xdi_copy_from_user_fn_t cp_fn)
{
+ diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
void *data;
@@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
return (-2);
}
- length = (*cp_fn) (os_handle, data, src, length);
+ if (msg) {
+ *(diva_xdi_um_cfg_cmd_t *)data = *msg;
+ length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
+ src + sizeof(*msg), length - sizeof(*msg));
+ } else {
+ length = (*cp_fn) (os_handle, data, src, length);
+ }
if (length > 0) {
if ((*(a->interface.cmd_proc))
(a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
index b067032093a8..1ad76650fbf9 100644
--- a/drivers/isdn/hardware/eicon/diva.h
+++ b/drivers/isdn/hardware/eicon/diva.h
@@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
- int length, divas_xdi_copy_from_user_fn_t cp_fn);
+ int length, void *msg,
+ divas_xdi_copy_from_user_fn_t cp_fn);
void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
- int length,
+ int length, void *msg,
divas_xdi_copy_from_user_fn_t cp_fn);
void diva_xdi_close_adapter(void *adapter, void *os_handle);
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index b9980e84f9db..b6a3950b2564 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
static ssize_t divas_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ diva_xdi_um_cfg_cmd_t msg;
int ret = -EINVAL;
if (!file->private_data) {
file->private_data = diva_xdi_open_adapter(file, buf,
- count,
+ count, &msg,
xdi_copy_from_user);
- }
- if (!file->private_data) {
- return (-ENODEV);
+ if (!file->private_data)
+ return (-ENODEV);
+ ret = diva_xdi_write(file->private_data, file,
+ buf, count, &msg, xdi_copy_from_user);
+ } else {
+ ret = diva_xdi_write(file->private_data, file,
+ buf, count, NULL, xdi_copy_from_user);
}
- ret = diva_xdi_write(file->private_data, file,
- buf, count, xdi_copy_from_user);
switch (ret) {
case -1: /* Message should be removed from rx mailbox first */
ret = -EBUSY;
@@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
static ssize_t divas_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ diva_xdi_um_cfg_cmd_t msg;
int ret = -EINVAL;
if (!file->private_data) {
file->private_data = diva_xdi_open_adapter(file, buf,
- count,
+ count, &msg,
xdi_copy_from_user);
}
if (!file->private_data) {
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 63171cdce270..60aa7bc5a630 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -431,7 +431,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
return 0;
err_sysfs:
if (tt->exit)
- tt->exit(targetdata);
+ tt->exit(targetdata, true);
err_init:
blk_cleanup_queue(tqueue);
tdisk->queue = NULL;
@@ -446,7 +446,7 @@ err_reserve:
return ret;
}
-static void __nvm_remove_target(struct nvm_target *t)
+static void __nvm_remove_target(struct nvm_target *t, bool graceful)
{
struct nvm_tgt_type *tt = t->type;
struct gendisk *tdisk = t->disk;
@@ -459,7 +459,7 @@ static void __nvm_remove_target(struct nvm_target *t)
tt->sysfs_exit(tdisk);
if (tt->exit)
- tt->exit(tdisk->private_data);
+ tt->exit(tdisk->private_data, graceful);
nvm_remove_tgt_dev(t->dev, 1);
put_disk(tdisk);
@@ -489,7 +489,7 @@ static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
mutex_unlock(&dev->mlock);
return 1;
}
- __nvm_remove_target(t);
+ __nvm_remove_target(t, true);
mutex_unlock(&dev->mlock);
return 0;
@@ -963,7 +963,7 @@ void nvm_unregister(struct nvm_dev *dev)
list_for_each_entry_safe(t, tmp, &dev->targets, list) {
if (t->dev->parent != dev)
continue;
- __nvm_remove_target(t);
+ __nvm_remove_target(t, false);
}
mutex_unlock(&dev->mlock);
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index 29a23111b31c..b1c6d7eb6115 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -44,13 +44,15 @@ retry:
goto out;
}
- if (unlikely(!bio_has_data(bio)))
- goto out;
-
pblk_ppa_set_empty(&w_ctx.ppa);
w_ctx.flags = flags;
- if (bio->bi_opf & REQ_PREFLUSH)
+ if (bio->bi_opf & REQ_PREFLUSH) {
w_ctx.flags |= PBLK_FLUSH_ENTRY;
+ pblk_write_kick(pblk);
+ }
+
+ if (unlikely(!bio_has_data(bio)))
+ goto out;
for (i = 0; i < nr_entries; i++) {
void *data = bio_data(bio);
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 94d5d97c9d8a..ed9cc977c8b3 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -40,7 +40,7 @@ static void pblk_line_mark_bb(struct work_struct *work)
}
kfree(ppa);
- mempool_free(line_ws, pblk->gen_ws_pool);
+ mempool_free(line_ws, &pblk->gen_ws_pool);
}
static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
@@ -102,7 +102,7 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
struct pblk *pblk = rqd->private;
__pblk_end_io_erase(pblk, rqd);
- mempool_free(rqd, pblk->e_rq_pool);
+ mempool_free(rqd, &pblk->e_rq_pool);
}
/*
@@ -237,15 +237,15 @@ struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
switch (type) {
case PBLK_WRITE:
case PBLK_WRITE_INT:
- pool = pblk->w_rq_pool;
+ pool = &pblk->w_rq_pool;
rq_size = pblk_w_rq_size;
break;
case PBLK_READ:
- pool = pblk->r_rq_pool;
+ pool = &pblk->r_rq_pool;
rq_size = pblk_g_rq_size;
break;
default:
- pool = pblk->e_rq_pool;
+ pool = &pblk->e_rq_pool;
rq_size = pblk_g_rq_size;
}
@@ -265,20 +265,22 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
case PBLK_WRITE:
kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
case PBLK_WRITE_INT:
- pool = pblk->w_rq_pool;
+ pool = &pblk->w_rq_pool;
break;
case PBLK_READ:
- pool = pblk->r_rq_pool;
+ pool = &pblk->r_rq_pool;
break;
case PBLK_ERASE:
- pool = pblk->e_rq_pool;
+ pool = &pblk->e_rq_pool;
break;
default:
pr_err("pblk: trying to free unknown rqd type\n");
return;
}
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
+ if (rqd->meta_list)
+ nvm_dev_dma_free(dev->parent, rqd->meta_list,
+ rqd->dma_meta_list);
mempool_free(rqd, pool);
}
@@ -292,7 +294,7 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
for (i = off; i < nr_pages + off; i++) {
bv = bio->bi_io_vec[i];
- mempool_free(bv.bv_page, pblk->page_bio_pool);
+ mempool_free(bv.bv_page, &pblk->page_bio_pool);
}
}
@@ -304,23 +306,23 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
int i, ret;
for (i = 0; i < nr_pages; i++) {
- page = mempool_alloc(pblk->page_bio_pool, flags);
+ page = mempool_alloc(&pblk->page_bio_pool, flags);
ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
if (ret != PBLK_EXPOSED_PAGE_SIZE) {
pr_err("pblk: could not add page to bio\n");
- mempool_free(page, pblk->page_bio_pool);
+ mempool_free(page, &pblk->page_bio_pool);
goto err;
}
}
return 0;
err:
- pblk_bio_free_pages(pblk, bio, 0, i - 1);
+ pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
return -1;
}
-static void pblk_write_kick(struct pblk *pblk)
+void pblk_write_kick(struct pblk *pblk)
{
wake_up_process(pblk->writer_ts);
mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
@@ -342,13 +344,6 @@ void pblk_write_should_kick(struct pblk *pblk)
pblk_write_kick(pblk);
}
-void pblk_end_io_sync(struct nvm_rq *rqd)
-{
- struct completion *waiting = rqd->private;
-
- complete(waiting);
-}
-
static void pblk_wait_for_meta(struct pblk *pblk)
{
do {
@@ -380,7 +375,13 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
lockdep_assert_held(&line->lock);
- if (!vsc) {
+ if (line->w_err_gc->has_write_err) {
+ if (line->gc_group != PBLK_LINEGC_WERR) {
+ line->gc_group = PBLK_LINEGC_WERR;
+ move_list = &l_mg->gc_werr_list;
+ pblk_rl_werr_line_in(&pblk->rl);
+ }
+ } else if (!vsc) {
if (line->gc_group != PBLK_LINEGC_FULL) {
line->gc_group = PBLK_LINEGC_FULL;
move_list = &l_mg->gc_full_list;
@@ -467,16 +468,13 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
{
struct nvm_tgt_dev *dev = pblk->dev;
-#ifdef CONFIG_NVM_DEBUG
- int ret;
+ atomic_inc(&pblk->inflight_io);
- ret = pblk_check_io(pblk, rqd);
- if (ret)
- return ret;
+#ifdef CONFIG_NVM_DEBUG
+ if (pblk_check_io(pblk, rqd))
+ return NVM_IO_ERR;
#endif
- atomic_inc(&pblk->inflight_io);
-
return nvm_submit_io(dev, rqd);
}
@@ -484,16 +482,13 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
{
struct nvm_tgt_dev *dev = pblk->dev;
-#ifdef CONFIG_NVM_DEBUG
- int ret;
+ atomic_inc(&pblk->inflight_io);
- ret = pblk_check_io(pblk, rqd);
- if (ret)
- return ret;
+#ifdef CONFIG_NVM_DEBUG
+ if (pblk_check_io(pblk, rqd))
+ return NVM_IO_ERR;
#endif
- atomic_inc(&pblk->inflight_io);
-
return nvm_submit_io_sync(dev, rqd);
}
@@ -856,9 +851,10 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
atomic_dec(&pblk->inflight_io);
if (rqd.error) {
- if (dir == PBLK_WRITE)
+ if (dir == PBLK_WRITE) {
pblk_log_write_err(pblk, &rqd);
- else if (dir == PBLK_READ)
+ ret = 1;
+ } else if (dir == PBLK_READ)
pblk_log_read_err(pblk, &rqd);
}
@@ -1071,6 +1067,25 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
return 1;
}
+static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
+{
+ struct pblk_line_meta *lm = &pblk->lm;
+
+ line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
+ if (!line->map_bitmap)
+ return -ENOMEM;
+
+ /* will be initialized using bb info from map_bitmap */
+ line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
+ if (!line->invalid_bitmap) {
+ kfree(line->map_bitmap);
+ line->map_bitmap = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
/* For now lines are always assumed full lines. Thus, smeta former and current
* lun bitmaps are omitted.
*/
@@ -1108,7 +1123,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
pr_debug("pblk: line smeta I/O failed. Retry\n");
- return 1;
+ return 0;
}
bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
@@ -1174,19 +1189,9 @@ static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
{
struct pblk_line_meta *lm = &pblk->lm;
+ int blk_in_line = atomic_read(&line->blk_in_line);
int blk_to_erase;
- line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_ATOMIC);
- if (!line->map_bitmap)
- return -ENOMEM;
-
- /* will be initialized using bb info from map_bitmap */
- line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_ATOMIC);
- if (!line->invalid_bitmap) {
- kfree(line->map_bitmap);
- return -ENOMEM;
- }
-
/* Bad blocks do not need to be erased */
bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
@@ -1199,16 +1204,19 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
blk_to_erase = pblk_prepare_new_line(pblk, line);
line->state = PBLK_LINESTATE_FREE;
} else {
- blk_to_erase = atomic_read(&line->blk_in_line);
+ blk_to_erase = blk_in_line;
}
- if (line->state != PBLK_LINESTATE_FREE) {
- kfree(line->map_bitmap);
- kfree(line->invalid_bitmap);
+ if (blk_in_line < lm->min_blk_line) {
spin_unlock(&line->lock);
+ return -EAGAIN;
+ }
+
+ if (line->state != PBLK_LINESTATE_FREE) {
WARN(1, "pblk: corrupted line %d, state %d\n",
line->id, line->state);
- return -EAGAIN;
+ spin_unlock(&line->lock);
+ return -EINTR;
}
line->state = PBLK_LINESTATE_OPEN;
@@ -1241,13 +1249,16 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
}
spin_unlock(&l_mg->free_lock);
- pblk_rl_free_lines_dec(&pblk->rl, line, true);
+ ret = pblk_line_alloc_bitmaps(pblk, line);
+ if (ret)
+ return ret;
if (!pblk_line_init_bb(pblk, line, 0)) {
list_add(&line->list, &l_mg->free_list);
return -EINTR;
}
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
return 0;
}
@@ -1259,6 +1270,24 @@ void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
line->emeta = NULL;
}
+static void pblk_line_reinit(struct pblk_line *line)
+{
+ *line->vsc = cpu_to_le32(EMPTY_ENTRY);
+
+ line->map_bitmap = NULL;
+ line->invalid_bitmap = NULL;
+ line->smeta = NULL;
+ line->emeta = NULL;
+}
+
+void pblk_line_free(struct pblk_line *line)
+{
+ kfree(line->map_bitmap);
+ kfree(line->invalid_bitmap);
+
+ pblk_line_reinit(line);
+}
+
struct pblk_line *pblk_line_get(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
@@ -1292,10 +1321,14 @@ retry:
ret = pblk_line_prepare(pblk, line);
if (ret) {
- if (ret == -EAGAIN) {
+ switch (ret) {
+ case -EAGAIN:
+ list_add(&line->list, &l_mg->bad_list);
+ goto retry;
+ case -EINTR:
list_add(&line->list, &l_mg->corrupt_list);
goto retry;
- } else {
+ default:
pr_err("pblk: failed to prepare line %d\n", line->id);
list_add(&line->list, &l_mg->free_list);
l_mg->nr_free_lines++;
@@ -1321,11 +1354,14 @@ retry:
return NULL;
}
+ retry_line->map_bitmap = line->map_bitmap;
+ retry_line->invalid_bitmap = line->invalid_bitmap;
retry_line->smeta = line->smeta;
retry_line->emeta = line->emeta;
retry_line->meta_line = line->meta_line;
- pblk_line_free(pblk, line);
+ pblk_line_reinit(line);
+
l_mg->data_line = retry_line;
spin_unlock(&l_mg->free_lock);
@@ -1378,6 +1414,9 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
}
spin_unlock(&l_mg->free_lock);
+ if (pblk_line_alloc_bitmaps(pblk, line))
+ return NULL;
+
if (pblk_line_erase(pblk, line)) {
line = pblk_line_retry(pblk, line);
if (!line)
@@ -1449,7 +1488,7 @@ static void pblk_line_close_meta_sync(struct pblk *pblk)
flush_workqueue(pblk->close_wq);
}
-void pblk_pipeline_stop(struct pblk *pblk)
+void __pblk_pipeline_flush(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
int ret;
@@ -1474,6 +1513,11 @@ void pblk_pipeline_stop(struct pblk *pblk)
flush_workqueue(pblk->bb_wq);
pblk_line_close_meta_sync(pblk);
+}
+
+void __pblk_pipeline_stop(struct pblk *pblk)
+{
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
spin_lock(&l_mg->free_lock);
pblk->state = PBLK_STATE_STOPPED;
@@ -1482,6 +1526,12 @@ void pblk_pipeline_stop(struct pblk *pblk)
spin_unlock(&l_mg->free_lock);
}
+void pblk_pipeline_stop(struct pblk *pblk)
+{
+ __pblk_pipeline_flush(pblk);
+ __pblk_pipeline_stop(pblk);
+}
+
struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
@@ -1511,6 +1561,9 @@ retry_erase:
goto retry_erase;
}
+ if (pblk_line_alloc_bitmaps(pblk, new))
+ return NULL;
+
retry_setup:
if (!pblk_line_init_metadata(pblk, new, cur)) {
new = pblk_line_retry(pblk, new);
@@ -1550,19 +1603,6 @@ out:
return new;
}
-void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
-{
- kfree(line->map_bitmap);
- kfree(line->invalid_bitmap);
-
- *line->vsc = cpu_to_le32(EMPTY_ENTRY);
-
- line->map_bitmap = NULL;
- line->invalid_bitmap = NULL;
- line->smeta = NULL;
- line->emeta = NULL;
-}
-
static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
@@ -1572,9 +1612,14 @@ static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
WARN_ON(line->state != PBLK_LINESTATE_GC);
line->state = PBLK_LINESTATE_FREE;
line->gc_group = PBLK_LINEGC_NONE;
- pblk_line_free(pblk, line);
- spin_unlock(&line->lock);
+ pblk_line_free(line);
+
+ if (line->w_err_gc->has_write_err) {
+ pblk_rl_werr_line_out(&pblk->rl);
+ line->w_err_gc->has_write_err = 0;
+ }
+ spin_unlock(&line->lock);
atomic_dec(&gc->pipeline_gc);
spin_lock(&l_mg->free_lock);
@@ -1593,7 +1638,7 @@ static void pblk_line_put_ws(struct work_struct *work)
struct pblk_line *line = line_put_ws->line;
__pblk_line_put(pblk, line);
- mempool_free(line_put_ws, pblk->gen_ws_pool);
+ mempool_free(line_put_ws, &pblk->gen_ws_pool);
}
void pblk_line_put(struct kref *ref)
@@ -1610,7 +1655,7 @@ void pblk_line_put_wq(struct kref *ref)
struct pblk *pblk = line->pblk;
struct pblk_line_ws *line_put_ws;
- line_put_ws = mempool_alloc(pblk->gen_ws_pool, GFP_ATOMIC);
+ line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
if (!line_put_ws)
return;
@@ -1737,11 +1782,34 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
spin_lock(&l_mg->close_lock);
spin_lock(&line->lock);
+
+ /* Update the in-memory start address for emeta, in case it has
+ * shifted due to write errors
+ */
+ if (line->emeta_ssec != line->cur_sec)
+ line->emeta_ssec = line->cur_sec;
+
list_add_tail(&line->list, &l_mg->emeta_list);
spin_unlock(&line->lock);
spin_unlock(&l_mg->close_lock);
pblk_line_should_sync_meta(pblk);
+
+
+}
+
+static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
+{
+ struct pblk_line_meta *lm = &pblk->lm;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ unsigned int lba_list_size = lm->emeta_len[2];
+ struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
+ struct pblk_emeta *emeta = line->emeta;
+
+ w_err_gc->lba_list = pblk_malloc(lba_list_size,
+ l_mg->emeta_alloc_type, GFP_KERNEL);
+ memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
+ lba_list_size);
}
void pblk_line_close_ws(struct work_struct *work)
@@ -1750,9 +1818,16 @@ void pblk_line_close_ws(struct work_struct *work)
ws);
struct pblk *pblk = line_ws->pblk;
struct pblk_line *line = line_ws->line;
+ struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
+
+ /* Write errors makes the emeta start address stored in smeta invalid,
+ * so keep a copy of the lba list until we've gc'd the line
+ */
+ if (w_err_gc->has_write_err)
+ pblk_save_lba_list(pblk, line);
pblk_line_close(pblk, line);
- mempool_free(line_ws, pblk->gen_ws_pool);
+ mempool_free(line_ws, &pblk->gen_ws_pool);
}
void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
@@ -1761,7 +1836,7 @@ void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
{
struct pblk_line_ws *line_ws;
- line_ws = mempool_alloc(pblk->gen_ws_pool, gfp_mask);
+ line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
line_ws->pblk = pblk;
line_ws->line = line;
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 6851a5c67189..df88f1bdd921 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -129,6 +129,53 @@ out:
kfree(gc_rq_ws);
}
+static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
+ struct pblk_line *line)
+{
+ struct line_emeta *emeta_buf;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_line_meta *lm = &pblk->lm;
+ unsigned int lba_list_size = lm->emeta_len[2];
+ __le64 *lba_list;
+ int ret;
+
+ emeta_buf = pblk_malloc(lm->emeta_len[0],
+ l_mg->emeta_alloc_type, GFP_KERNEL);
+ if (!emeta_buf)
+ return NULL;
+
+ ret = pblk_line_read_emeta(pblk, line, emeta_buf);
+ if (ret) {
+ pr_err("pblk: line %d read emeta failed (%d)\n",
+ line->id, ret);
+ pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
+ return NULL;
+ }
+
+ /* If this read fails, it means that emeta is corrupted.
+ * For now, leave the line untouched.
+ * TODO: Implement a recovery routine that scans and moves
+ * all sectors on the line.
+ */
+
+ ret = pblk_recov_check_emeta(pblk, emeta_buf);
+ if (ret) {
+ pr_err("pblk: inconsistent emeta (line %d)\n",
+ line->id);
+ pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
+ return NULL;
+ }
+
+ lba_list = pblk_malloc(lba_list_size,
+ l_mg->emeta_alloc_type, GFP_KERNEL);
+ if (lba_list)
+ memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
+
+ pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
+
+ return lba_list;
+}
+
static void pblk_gc_line_prepare_ws(struct work_struct *work)
{
struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
@@ -138,46 +185,26 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_gc *gc = &pblk->gc;
- struct line_emeta *emeta_buf;
struct pblk_line_ws *gc_rq_ws;
struct pblk_gc_rq *gc_rq;
__le64 *lba_list;
unsigned long *invalid_bitmap;
int sec_left, nr_secs, bit;
- int ret;
invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
if (!invalid_bitmap)
goto fail_free_ws;
- emeta_buf = pblk_malloc(lm->emeta_len[0], l_mg->emeta_alloc_type,
- GFP_KERNEL);
- if (!emeta_buf) {
- pr_err("pblk: cannot use GC emeta\n");
- goto fail_free_bitmap;
- }
-
- ret = pblk_line_read_emeta(pblk, line, emeta_buf);
- if (ret) {
- pr_err("pblk: line %d read emeta failed (%d)\n", line->id, ret);
- goto fail_free_emeta;
- }
-
- /* If this read fails, it means that emeta is corrupted. For now, leave
- * the line untouched. TODO: Implement a recovery routine that scans and
- * moves all sectors on the line.
- */
-
- ret = pblk_recov_check_emeta(pblk, emeta_buf);
- if (ret) {
- pr_err("pblk: inconsistent emeta (line %d)\n", line->id);
- goto fail_free_emeta;
- }
-
- lba_list = emeta_to_lbas(pblk, emeta_buf);
- if (!lba_list) {
- pr_err("pblk: could not interpret emeta (line %d)\n", line->id);
- goto fail_free_emeta;
+ if (line->w_err_gc->has_write_err) {
+ lba_list = line->w_err_gc->lba_list;
+ line->w_err_gc->lba_list = NULL;
+ } else {
+ lba_list = get_lba_list_from_emeta(pblk, line);
+ if (!lba_list) {
+ pr_err("pblk: could not interpret emeta (line %d)\n",
+ line->id);
+ goto fail_free_ws;
+ }
}
spin_lock(&line->lock);
@@ -187,14 +214,14 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
if (sec_left < 0) {
pr_err("pblk: corrupted GC line (%d)\n", line->id);
- goto fail_free_emeta;
+ goto fail_free_lba_list;
}
bit = -1;
next_rq:
gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
if (!gc_rq)
- goto fail_free_emeta;
+ goto fail_free_lba_list;
nr_secs = 0;
do {
@@ -240,7 +267,7 @@ next_rq:
goto next_rq;
out:
- pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
+ pblk_mfree(lba_list, l_mg->emeta_alloc_type);
kfree(line_ws);
kfree(invalid_bitmap);
@@ -251,9 +278,8 @@ out:
fail_free_gc_rq:
kfree(gc_rq);
-fail_free_emeta:
- pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
-fail_free_bitmap:
+fail_free_lba_list:
+ pblk_mfree(lba_list, l_mg->emeta_alloc_type);
kfree(invalid_bitmap);
fail_free_ws:
kfree(line_ws);
@@ -349,12 +375,14 @@ static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
{
unsigned int nr_blocks_free, nr_blocks_need;
+ unsigned int werr_lines = atomic_read(&rl->werr_lines);
nr_blocks_need = pblk_rl_high_thrs(rl);
nr_blocks_free = pblk_rl_nr_free_blks(rl);
/* This is not critical, no need to take lock here */
- return ((gc->gc_active) && (nr_blocks_need > nr_blocks_free));
+ return ((werr_lines > 0) ||
+ ((gc->gc_active) && (nr_blocks_need > nr_blocks_free)));
}
void pblk_gc_free_full_lines(struct pblk *pblk)
@@ -649,7 +677,7 @@ fail_free_main_kthread:
return ret;
}
-void pblk_gc_exit(struct pblk *pblk)
+void pblk_gc_exit(struct pblk *pblk, bool graceful)
{
struct pblk_gc *gc = &pblk->gc;
@@ -663,10 +691,12 @@ void pblk_gc_exit(struct pblk *pblk)
if (gc->gc_reader_ts)
kthread_stop(gc->gc_reader_ts);
- flush_workqueue(gc->gc_reader_wq);
- destroy_workqueue(gc->gc_reader_wq);
+ if (graceful) {
+ flush_workqueue(gc->gc_reader_wq);
+ flush_workqueue(gc->gc_line_reader_wq);
+ }
- flush_workqueue(gc->gc_line_reader_wq);
+ destroy_workqueue(gc->gc_reader_wq);
destroy_workqueue(gc->gc_line_reader_wq);
if (gc->gc_writer_ts)
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 91a5bc2556a3..ce561f5d48ce 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -20,10 +20,15 @@
#include "pblk.h"
+unsigned int write_buffer_size;
+
+module_param(write_buffer_size, uint, 0644);
+MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
+
static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
*pblk_w_rq_cache;
static DECLARE_RWSEM(pblk_lock);
-struct bio_set *pblk_bio_set;
+struct bio_set pblk_bio_set;
static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
struct bio *bio)
@@ -127,10 +132,8 @@ static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
if (!line) {
/* Configure next line for user data */
line = pblk_line_get_first_data(pblk);
- if (!line) {
- pr_err("pblk: line list corrupted\n");
+ if (!line)
return -EFAULT;
- }
}
return 0;
@@ -141,6 +144,7 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
sector_t i;
struct ppa_addr ppa;
size_t map_size;
+ int ret = 0;
map_size = pblk_trans_map_size(pblk);
pblk->trans_map = vmalloc(map_size);
@@ -152,7 +156,11 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
for (i = 0; i < pblk->rl.nr_secs; i++)
pblk_trans_map_set(pblk, i, ppa);
- return pblk_l2p_recover(pblk, factory_init);
+ ret = pblk_l2p_recover(pblk, factory_init);
+ if (ret)
+ vfree(pblk->trans_map);
+
+ return ret;
}
static void pblk_rwb_free(struct pblk *pblk)
@@ -169,10 +177,15 @@ static int pblk_rwb_init(struct pblk *pblk)
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_rb_entry *entries;
- unsigned long nr_entries;
+ unsigned long nr_entries, buffer_size;
unsigned int power_size, power_seg_sz;
- nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
+ if (write_buffer_size && (write_buffer_size > pblk->pgs_in_buffer))
+ buffer_size = write_buffer_size;
+ else
+ buffer_size = pblk->pgs_in_buffer;
+
+ nr_entries = pblk_rb_calculate_size(buffer_size);
entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
if (!entries)
@@ -341,7 +354,7 @@ static int pblk_core_init(struct pblk *pblk)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- int max_write_ppas;
+ int ret, max_write_ppas;
atomic64_set(&pblk->user_wa, 0);
atomic64_set(&pblk->pad_wa, 0);
@@ -375,33 +388,33 @@ static int pblk_core_init(struct pblk *pblk)
goto fail_free_pad_dist;
/* Internal bios can be at most the sectors signaled by the device. */
- pblk->page_bio_pool = mempool_create_page_pool(NVM_MAX_VLBA, 0);
- if (!pblk->page_bio_pool)
+ ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0);
+ if (ret)
goto free_global_caches;
- pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
- pblk_ws_cache);
- if (!pblk->gen_ws_pool)
+ ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
+ pblk_ws_cache);
+ if (ret)
goto free_page_bio_pool;
- pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
- pblk_rec_cache);
- if (!pblk->rec_pool)
+ ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
+ pblk_rec_cache);
+ if (ret)
goto free_gen_ws_pool;
- pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
- pblk_g_rq_cache);
- if (!pblk->r_rq_pool)
+ ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
+ pblk_g_rq_cache);
+ if (ret)
goto free_rec_pool;
- pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
- pblk_g_rq_cache);
- if (!pblk->e_rq_pool)
+ ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
+ pblk_g_rq_cache);
+ if (ret)
goto free_r_rq_pool;
- pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
- pblk_w_rq_cache);
- if (!pblk->w_rq_pool)
+ ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
+ pblk_w_rq_cache);
+ if (ret)
goto free_e_rq_pool;
pblk->close_wq = alloc_workqueue("pblk-close-wq",
@@ -423,6 +436,7 @@ static int pblk_core_init(struct pblk *pblk)
goto free_r_end_wq;
INIT_LIST_HEAD(&pblk->compl_list);
+ INIT_LIST_HEAD(&pblk->resubmit_list);
return 0;
@@ -433,17 +447,17 @@ free_bb_wq:
free_close_wq:
destroy_workqueue(pblk->close_wq);
free_w_rq_pool:
- mempool_destroy(pblk->w_rq_pool);
+ mempool_exit(&pblk->w_rq_pool);
free_e_rq_pool:
- mempool_destroy(pblk->e_rq_pool);
+ mempool_exit(&pblk->e_rq_pool);
free_r_rq_pool:
- mempool_destroy(pblk->r_rq_pool);
+ mempool_exit(&pblk->r_rq_pool);
free_rec_pool:
- mempool_destroy(pblk->rec_pool);
+ mempool_exit(&pblk->rec_pool);
free_gen_ws_pool:
- mempool_destroy(pblk->gen_ws_pool);
+ mempool_exit(&pblk->gen_ws_pool);
free_page_bio_pool:
- mempool_destroy(pblk->page_bio_pool);
+ mempool_exit(&pblk->page_bio_pool);
free_global_caches:
pblk_free_global_caches(pblk);
fail_free_pad_dist:
@@ -462,12 +476,12 @@ static void pblk_core_free(struct pblk *pblk)
if (pblk->bb_wq)
destroy_workqueue(pblk->bb_wq);
- mempool_destroy(pblk->page_bio_pool);
- mempool_destroy(pblk->gen_ws_pool);
- mempool_destroy(pblk->rec_pool);
- mempool_destroy(pblk->r_rq_pool);
- mempool_destroy(pblk->e_rq_pool);
- mempool_destroy(pblk->w_rq_pool);
+ mempool_exit(&pblk->page_bio_pool);
+ mempool_exit(&pblk->gen_ws_pool);
+ mempool_exit(&pblk->rec_pool);
+ mempool_exit(&pblk->r_rq_pool);
+ mempool_exit(&pblk->e_rq_pool);
+ mempool_exit(&pblk->w_rq_pool);
pblk_free_global_caches(pblk);
kfree(pblk->pad_dist);
@@ -489,11 +503,17 @@ static void pblk_line_mg_free(struct pblk *pblk)
}
}
-static void pblk_line_meta_free(struct pblk_line *line)
+static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
+ struct pblk_line *line)
{
+ struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
+
kfree(line->blk_bitmap);
kfree(line->erase_bitmap);
kfree(line->chks);
+
+ pblk_mfree(w_err_gc->lba_list, l_mg->emeta_alloc_type);
+ kfree(w_err_gc);
}
static void pblk_lines_free(struct pblk *pblk)
@@ -506,8 +526,8 @@ static void pblk_lines_free(struct pblk *pblk)
for (i = 0; i < l_mg->nr_lines; i++) {
line = &pblk->lines[i];
- pblk_line_free(pblk, line);
- pblk_line_meta_free(line);
+ pblk_line_free(line);
+ pblk_line_meta_free(l_mg, line);
}
spin_unlock(&l_mg->free_lock);
@@ -748,14 +768,14 @@ static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line,
chunk->cnlb = chunk_meta->cnlb;
chunk->wp = chunk_meta->wp;
- if (!(chunk->state & NVM_CHK_ST_OFFLINE))
- continue;
-
if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
continue;
}
+ if (!(chunk->state & NVM_CHK_ST_OFFLINE))
+ continue;
+
set_bit(pos, line->blk_bitmap);
nr_bad_chks++;
}
@@ -809,20 +829,28 @@ static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line)
return -ENOMEM;
line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
- if (!line->erase_bitmap) {
- kfree(line->blk_bitmap);
- return -ENOMEM;
- }
+ if (!line->erase_bitmap)
+ goto free_blk_bitmap;
+
line->chks = kmalloc(lm->blk_per_line * sizeof(struct nvm_chk_meta),
GFP_KERNEL);
- if (!line->chks) {
- kfree(line->erase_bitmap);
- kfree(line->blk_bitmap);
- return -ENOMEM;
- }
+ if (!line->chks)
+ goto free_erase_bitmap;
+
+ line->w_err_gc = kzalloc(sizeof(struct pblk_w_err_gc), GFP_KERNEL);
+ if (!line->w_err_gc)
+ goto free_chks;
return 0;
+
+free_chks:
+ kfree(line->chks);
+free_erase_bitmap:
+ kfree(line->erase_bitmap);
+free_blk_bitmap:
+ kfree(line->blk_bitmap);
+ return -ENOMEM;
}
static int pblk_line_mg_init(struct pblk *pblk)
@@ -847,12 +875,14 @@ static int pblk_line_mg_init(struct pblk *pblk)
INIT_LIST_HEAD(&l_mg->gc_mid_list);
INIT_LIST_HEAD(&l_mg->gc_low_list);
INIT_LIST_HEAD(&l_mg->gc_empty_list);
+ INIT_LIST_HEAD(&l_mg->gc_werr_list);
INIT_LIST_HEAD(&l_mg->emeta_list);
- l_mg->gc_lists[0] = &l_mg->gc_high_list;
- l_mg->gc_lists[1] = &l_mg->gc_mid_list;
- l_mg->gc_lists[2] = &l_mg->gc_low_list;
+ l_mg->gc_lists[0] = &l_mg->gc_werr_list;
+ l_mg->gc_lists[1] = &l_mg->gc_high_list;
+ l_mg->gc_lists[2] = &l_mg->gc_mid_list;
+ l_mg->gc_lists[3] = &l_mg->gc_low_list;
spin_lock_init(&l_mg->free_lock);
spin_lock_init(&l_mg->close_lock);
@@ -1047,6 +1077,11 @@ static int pblk_lines_init(struct pblk *pblk)
nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
}
+ if (!nr_free_chks) {
+ pr_err("pblk: too many bad blocks prevent for sane instance\n");
+ return -EINTR;
+ }
+
pblk_set_provision(pblk, nr_free_chks);
kfree(chunk_meta);
@@ -1054,7 +1089,7 @@ static int pblk_lines_init(struct pblk *pblk)
fail_free_lines:
while (--i >= 0)
- pblk_line_meta_free(&pblk->lines[i]);
+ pblk_line_meta_free(l_mg, &pblk->lines[i]);
kfree(pblk->lines);
fail_free_chunk_meta:
kfree(chunk_meta);
@@ -1110,23 +1145,25 @@ static void pblk_free(struct pblk *pblk)
kfree(pblk);
}
-static void pblk_tear_down(struct pblk *pblk)
+static void pblk_tear_down(struct pblk *pblk, bool graceful)
{
- pblk_pipeline_stop(pblk);
+ if (graceful)
+ __pblk_pipeline_flush(pblk);
+ __pblk_pipeline_stop(pblk);
pblk_writer_stop(pblk);
pblk_rb_sync_l2p(&pblk->rwb);
pblk_rl_free(&pblk->rl);
- pr_debug("pblk: consistent tear down\n");
+ pr_debug("pblk: consistent tear down (graceful:%d)\n", graceful);
}
-static void pblk_exit(void *private)
+static void pblk_exit(void *private, bool graceful)
{
struct pblk *pblk = private;
down_write(&pblk_lock);
- pblk_gc_exit(pblk);
- pblk_tear_down(pblk);
+ pblk_gc_exit(pblk, graceful);
+ pblk_tear_down(pblk, graceful);
#ifdef CONFIG_NVM_DEBUG
pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
@@ -1175,6 +1212,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
pblk->state = PBLK_STATE_RUNNING;
pblk->gc.gc_enabled = 0;
+ spin_lock_init(&pblk->resubmit_lock);
spin_lock_init(&pblk->trans_lock);
spin_lock_init(&pblk->lock);
@@ -1297,18 +1335,18 @@ static int __init pblk_module_init(void)
{
int ret;
- pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
- if (!pblk_bio_set)
- return -ENOMEM;
+ ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0);
+ if (ret)
+ return ret;
ret = nvm_register_tgt_type(&tt_pblk);
if (ret)
- bioset_free(pblk_bio_set);
+ bioset_exit(&pblk_bio_set);
return ret;
}
static void pblk_module_exit(void)
{
- bioset_free(pblk_bio_set);
+ bioset_exit(&pblk_bio_set);
nvm_unregister_tgt_type(&tt_pblk);
}
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 20dbaa89c9df..953ca31dda68 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -18,11 +18,11 @@
#include "pblk.h"
-static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
- struct ppa_addr *ppa_list,
- unsigned long *lun_bitmap,
- struct pblk_sec_meta *meta_list,
- unsigned int valid_secs)
+static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
+ struct ppa_addr *ppa_list,
+ unsigned long *lun_bitmap,
+ struct pblk_sec_meta *meta_list,
+ unsigned int valid_secs)
{
struct pblk_line *line = pblk_line_get_data(pblk);
struct pblk_emeta *emeta;
@@ -35,8 +35,14 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
if (pblk_line_is_full(line)) {
struct pblk_line *prev_line = line;
+ /* If we cannot allocate a new line, make sure to store metadata
+ * on current line and then fail
+ */
line = pblk_line_replace_data(pblk);
pblk_line_close_meta(pblk, prev_line);
+
+ if (!line)
+ return -EINTR;
}
emeta = line->emeta;
@@ -74,6 +80,7 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
}
pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
+ return 0;
}
void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
@@ -87,8 +94,12 @@ void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
for (i = off; i < rqd->nr_ppas; i += min) {
map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
- pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
- lun_bitmap, &meta_list[i], map_secs);
+ if (pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
+ lun_bitmap, &meta_list[i], map_secs)) {
+ bio_put(rqd->bio);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE);
+ pblk_pipeline_stop(pblk);
+ }
}
}
@@ -108,8 +119,12 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
for (i = 0; i < rqd->nr_ppas; i += min) {
map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
- pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
- lun_bitmap, &meta_list[i], map_secs);
+ if (pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
+ lun_bitmap, &meta_list[i], map_secs)) {
+ bio_put(rqd->bio);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE);
+ pblk_pipeline_stop(pblk);
+ }
erase_lun = pblk_ppa_to_pos(geo, rqd->ppa_list[i]);
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 52fdd85dbc97..00cd1f20a196 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -142,10 +142,9 @@ static void clean_wctx(struct pblk_w_ctx *w_ctx)
{
int flags;
-try:
flags = READ_ONCE(w_ctx->flags);
- if (!(flags & PBLK_SUBMITTED_ENTRY))
- goto try;
+ WARN_ONCE(!(flags & PBLK_SUBMITTED_ENTRY),
+ "pblk: overwriting unsubmitted data\n");
/* Release flags on context. Protect from writes and reads */
smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY);
@@ -350,7 +349,7 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
}
static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
- unsigned int pos)
+ unsigned int pos)
{
struct pblk_rb_entry *entry;
unsigned int sync, flush_point;
@@ -420,7 +419,7 @@ void pblk_rb_flush(struct pblk_rb *rb)
if (pblk_rb_flush_point_set(rb, NULL, mem))
return;
- pblk_write_should_kick(pblk);
+ pblk_write_kick(pblk);
}
static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
@@ -504,45 +503,6 @@ int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
}
/*
- * The caller of this function must ensure that the backpointer will not
- * overwrite the entries passed on the list.
- */
-unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
- struct list_head *list,
- unsigned int max)
-{
- struct pblk_rb_entry *entry, *tentry;
- struct page *page;
- unsigned int read = 0;
- int ret;
-
- list_for_each_entry_safe(entry, tentry, list, index) {
- if (read > max) {
- pr_err("pblk: too many entries on list\n");
- goto out;
- }
-
- page = virt_to_page(entry->data);
- if (!page) {
- pr_err("pblk: could not allocate write bio page\n");
- goto out;
- }
-
- ret = bio_add_page(bio, page, rb->seg_size, 0);
- if (ret != rb->seg_size) {
- pr_err("pblk: could not add page to write bio\n");
- goto out;
- }
-
- list_del(&entry->index);
- read++;
- }
-
-out:
- return read;
-}
-
-/*
* Read available entries on rb and add them to the given bio. To avoid a memory
* copy, a page reference to the write buffer is used to be added to the bio.
*
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 9eee10f69df0..18694694e5f0 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -39,10 +39,10 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
}
static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
- sector_t blba, unsigned long *read_bitmap)
+ struct bio *bio, sector_t blba,
+ unsigned long *read_bitmap)
{
struct pblk_sec_meta *meta_list = rqd->meta_list;
- struct bio *bio = rqd->bio;
struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
int nr_secs = rqd->nr_ppas;
bool advanced_bio = false;
@@ -102,32 +102,69 @@ next:
#endif
}
-static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
+
+static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
+ sector_t blba)
{
- int err;
+ struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
+ int nr_lbas = rqd->nr_ppas;
+ int i;
- err = pblk_submit_io(pblk, rqd);
- if (err)
- return NVM_IO_ERR;
+ for (i = 0; i < nr_lbas; i++) {
+ u64 lba = le64_to_cpu(meta_lba_list[i].lba);
+
+ if (lba == ADDR_EMPTY)
+ continue;
+
+ if (lba != blba + i) {
+#ifdef CONFIG_NVM_DEBUG
+ struct ppa_addr *p;
- return NVM_IO_OK;
+ p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
+ print_ppa(&pblk->dev->geo, p, "seq", i);
+#endif
+ pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
+ lba, (u64)blba + i);
+ WARN_ON(1);
+ }
+ }
}
-static void pblk_read_check(struct pblk *pblk, struct nvm_rq *rqd,
- sector_t blba)
+/*
+ * There can be holes in the lba list.
+ */
+static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
+ u64 *lba_list, int nr_lbas)
{
- struct pblk_sec_meta *meta_list = rqd->meta_list;
- int nr_lbas = rqd->nr_ppas;
- int i;
+ struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
+ int i, j;
- for (i = 0; i < nr_lbas; i++) {
- u64 lba = le64_to_cpu(meta_list[i].lba);
+ for (i = 0, j = 0; i < nr_lbas; i++) {
+ u64 lba = lba_list[i];
+ u64 meta_lba;
if (lba == ADDR_EMPTY)
continue;
- WARN(lba != blba + i, "pblk: corrupted read LBA\n");
+ meta_lba = le64_to_cpu(meta_lba_list[j].lba);
+
+ if (lba != meta_lba) {
+#ifdef CONFIG_NVM_DEBUG
+ struct ppa_addr *p;
+ int nr_ppas = rqd->nr_ppas;
+
+ p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
+ print_ppa(&pblk->dev->geo, p, "seq", j);
+#endif
+ pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
+ lba, meta_lba);
+ WARN_ON(1);
+ }
+
+ j++;
}
+
+ WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
}
static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
@@ -152,7 +189,6 @@ static void pblk_end_user_read(struct bio *bio)
WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
#endif
bio_endio(bio);
- bio_put(bio);
}
static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
@@ -160,23 +196,18 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
{
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
- struct bio *bio = rqd->bio;
+ struct bio *int_bio = rqd->bio;
unsigned long start_time = r_ctx->start_time;
generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
if (rqd->error)
pblk_log_read_err(pblk, rqd);
-#ifdef CONFIG_NVM_DEBUG
- else
- WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
-#endif
- pblk_read_check(pblk, rqd, r_ctx->lba);
+ pblk_read_check_seq(pblk, rqd, r_ctx->lba);
- bio_put(bio);
- if (r_ctx->private)
- pblk_end_user_read((struct bio *)r_ctx->private);
+ if (int_bio)
+ bio_put(int_bio);
if (put_line)
pblk_read_put_rqd_kref(pblk, rqd);
@@ -193,16 +224,19 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
static void pblk_end_io_read(struct nvm_rq *rqd)
{
struct pblk *pblk = rqd->private;
+ struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
+ struct bio *bio = (struct bio *)r_ctx->private;
+ pblk_end_user_read(bio);
__pblk_end_io_read(pblk, rqd, true);
}
-static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap)
+static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
+ struct bio *orig_bio, unsigned int bio_init_idx,
+ unsigned long *read_bitmap)
{
- struct bio *new_bio, *bio = rqd->bio;
struct pblk_sec_meta *meta_list = rqd->meta_list;
+ struct bio *new_bio;
struct bio_vec src_bv, dst_bv;
void *ppa_ptr = NULL;
void *src_p, *dst_p;
@@ -219,11 +253,11 @@ static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
new_bio = bio_alloc(GFP_KERNEL, nr_holes);
if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
- goto err;
+ goto fail_add_pages;
if (nr_holes != new_bio->bi_vcnt) {
pr_err("pblk: malformed bio\n");
- goto err;
+ goto fail;
}
for (i = 0; i < nr_secs; i++)
@@ -246,7 +280,7 @@ static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
if (ret) {
bio_put(rqd->bio);
pr_err("pblk: sync read IO submission failed\n");
- goto err;
+ goto fail;
}
if (rqd->error) {
@@ -282,7 +316,7 @@ static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
meta_list[hole].lba = lba_list_media[i];
src_bv = new_bio->bi_io_vec[i++];
- dst_bv = bio->bi_io_vec[bio_init_idx + hole];
+ dst_bv = orig_bio->bi_io_vec[bio_init_idx + hole];
src_p = kmap_atomic(src_bv.bv_page);
dst_p = kmap_atomic(dst_bv.bv_page);
@@ -294,35 +328,33 @@ static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
kunmap_atomic(src_p);
kunmap_atomic(dst_p);
- mempool_free(src_bv.bv_page, pblk->page_bio_pool);
+ mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
} while (hole < nr_secs);
bio_put(new_bio);
- /* Complete the original bio and associated request */
- bio_endio(bio);
- rqd->bio = bio;
+ /* restore original request */
+ rqd->bio = NULL;
rqd->nr_ppas = nr_secs;
__pblk_end_io_read(pblk, rqd, false);
- return NVM_IO_OK;
-
-err:
- pr_err("pblk: failed to perform partial read\n");
+ return NVM_IO_DONE;
+fail:
/* Free allocated pages in new bio */
- pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
+ pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
+fail_add_pages:
+ pr_err("pblk: failed to perform partial read\n");
__pblk_end_io_read(pblk, rqd, false);
return NVM_IO_ERR;
}
-static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
+static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
sector_t lba, unsigned long *read_bitmap)
{
struct pblk_sec_meta *meta_list = rqd->meta_list;
- struct bio *bio = rqd->bio;
struct ppa_addr ppa;
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
@@ -386,14 +418,15 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
rqd->opcode = NVM_OP_PREAD;
- rqd->bio = bio;
rqd->nr_ppas = nr_secs;
+ rqd->bio = NULL; /* cloned bio if needed */
rqd->private = pblk;
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
r_ctx->start_time = jiffies;
r_ctx->lba = blba;
+ r_ctx->private = bio; /* original bio */
/* Save the index for this bio's start. This is needed in case
* we need to fill a partial read.
@@ -411,17 +444,15 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
- pblk_read_ppalist_rq(pblk, rqd, blba, &read_bitmap);
+ pblk_read_ppalist_rq(pblk, rqd, bio, blba, &read_bitmap);
} else {
- pblk_read_rq(pblk, rqd, blba, &read_bitmap);
+ pblk_read_rq(pblk, rqd, bio, blba, &read_bitmap);
}
- bio_get(bio);
if (bitmap_full(&read_bitmap, nr_secs)) {
- bio_endio(bio);
atomic_inc(&pblk->inflight_io);
__pblk_end_io_read(pblk, rqd, false);
- return NVM_IO_OK;
+ return NVM_IO_DONE;
}
/* All sectors are to be read from the device */
@@ -429,20 +460,17 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
struct bio *int_bio = NULL;
/* Clone read bio to deal with read errors internally */
- int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
+ int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
if (!int_bio) {
pr_err("pblk: could not clone read bio\n");
goto fail_end_io;
}
rqd->bio = int_bio;
- r_ctx->private = bio;
- ret = pblk_submit_read_io(pblk, rqd);
- if (ret) {
+ if (pblk_submit_io(pblk, rqd)) {
pr_err("pblk: read IO submission failed\n");
- if (int_bio)
- bio_put(int_bio);
+ ret = NVM_IO_ERR;
goto fail_end_io;
}
@@ -452,7 +480,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
/* The read bio request could be partially filled by the write buffer,
* but there are some holes that need to be read from the drive.
*/
- return pblk_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
+ return pblk_partial_read(pblk, rqd, bio, bio_init_idx, &read_bitmap);
fail_rqd_free:
pblk_free_rqd(pblk, rqd, PBLK_READ);
@@ -585,6 +613,8 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
goto err_free_bio;
}
+ pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
+
atomic_dec(&pblk->inflight_io);
if (rqd.error) {
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index 3e079c2afa6e..598342833d0d 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -16,97 +16,6 @@
#include "pblk.h"
-void pblk_submit_rec(struct work_struct *work)
-{
- struct pblk_rec_ctx *recovery =
- container_of(work, struct pblk_rec_ctx, ws_rec);
- struct pblk *pblk = recovery->pblk;
- struct nvm_rq *rqd = recovery->rqd;
- struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
- struct bio *bio;
- unsigned int nr_rec_secs;
- unsigned int pgs_read;
- int ret;
-
- nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status,
- NVM_MAX_VLBA);
-
- bio = bio_alloc(GFP_KERNEL, nr_rec_secs);
-
- bio->bi_iter.bi_sector = 0;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- rqd->bio = bio;
- rqd->nr_ppas = nr_rec_secs;
-
- pgs_read = pblk_rb_read_to_bio_list(&pblk->rwb, bio, &recovery->failed,
- nr_rec_secs);
- if (pgs_read != nr_rec_secs) {
- pr_err("pblk: could not read recovery entries\n");
- goto err;
- }
-
- if (pblk_setup_w_rec_rq(pblk, rqd, c_ctx)) {
- pr_err("pblk: could not setup recovery request\n");
- goto err;
- }
-
-#ifdef CONFIG_NVM_DEBUG
- atomic_long_add(nr_rec_secs, &pblk->recov_writes);
-#endif
-
- ret = pblk_submit_io(pblk, rqd);
- if (ret) {
- pr_err("pblk: I/O submission failed: %d\n", ret);
- goto err;
- }
-
- mempool_free(recovery, pblk->rec_pool);
- return;
-
-err:
- bio_put(bio);
- pblk_free_rqd(pblk, rqd, PBLK_WRITE);
-}
-
-int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
- struct pblk_rec_ctx *recovery, u64 *comp_bits,
- unsigned int comp)
-{
- struct nvm_rq *rec_rqd;
- struct pblk_c_ctx *rec_ctx;
- int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;
-
- rec_rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
- rec_ctx = nvm_rq_to_pdu(rec_rqd);
-
- /* Copy completion bitmap, but exclude the first X completed entries */
- bitmap_shift_right((unsigned long int *)&rec_rqd->ppa_status,
- (unsigned long int *)comp_bits,
- comp, NVM_MAX_VLBA);
-
- /* Save the context for the entries that need to be re-written and
- * update current context with the completed entries.
- */
- rec_ctx->sentry = pblk_rb_wrap_pos(&pblk->rwb, c_ctx->sentry + comp);
- if (comp >= c_ctx->nr_valid) {
- rec_ctx->nr_valid = 0;
- rec_ctx->nr_padded = nr_entries - comp;
-
- c_ctx->nr_padded = comp - c_ctx->nr_valid;
- } else {
- rec_ctx->nr_valid = c_ctx->nr_valid - comp;
- rec_ctx->nr_padded = c_ctx->nr_padded;
-
- c_ctx->nr_valid = comp;
- c_ctx->nr_padded = 0;
- }
-
- recovery->rqd = rec_rqd;
- recovery->pblk = pblk;
-
- return 0;
-}
-
int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
{
u32 crc;
@@ -865,18 +774,30 @@ static void pblk_recov_wa_counters(struct pblk *pblk,
}
static int pblk_line_was_written(struct pblk_line *line,
- struct pblk_line_meta *lm)
+ struct pblk *pblk)
{
- int i;
- int state_mask = NVM_CHK_ST_OFFLINE | NVM_CHK_ST_FREE;
+ struct pblk_line_meta *lm = &pblk->lm;
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct nvm_chk_meta *chunk;
+ struct ppa_addr bppa;
+ int smeta_blk;
- for (i = 0; i < lm->blk_per_line; i++) {
- if (!(line->chks[i].state & state_mask))
- return 1;
- }
+ if (line->state == PBLK_LINESTATE_BAD)
+ return 0;
- return 0;
+ smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
+ if (smeta_blk >= lm->blk_per_line)
+ return 0;
+
+ bppa = pblk->luns[smeta_blk].bppa;
+ chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
+
+ if (chunk->state & NVM_CHK_ST_FREE)
+ return 0;
+
+ return 1;
}
struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
@@ -915,7 +836,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
line->lun_bitmap = ((void *)(smeta_buf)) +
sizeof(struct line_smeta);
- if (!pblk_line_was_written(line, lm))
+ if (!pblk_line_was_written(line, pblk))
continue;
/* Lines that cannot be read are assumed as not written here */
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index 883a7113b19d..6a0616a6fcaf 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -73,6 +73,16 @@ void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
pblk_rl_kick_u_timer(rl);
}
+void pblk_rl_werr_line_in(struct pblk_rl *rl)
+{
+ atomic_inc(&rl->werr_lines);
+}
+
+void pblk_rl_werr_line_out(struct pblk_rl *rl)
+{
+ atomic_dec(&rl->werr_lines);
+}
+
void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
{
atomic_add(nr_entries, &rl->rb_gc_cnt);
@@ -99,11 +109,21 @@ static void __pblk_rl_update_rates(struct pblk_rl *rl,
{
struct pblk *pblk = container_of(rl, struct pblk, rl);
int max = rl->rb_budget;
+ int werr_gc_needed = atomic_read(&rl->werr_lines);
if (free_blocks >= rl->high) {
- rl->rb_user_max = max;
- rl->rb_gc_max = 0;
- rl->rb_state = PBLK_RL_HIGH;
+ if (werr_gc_needed) {
+ /* Allocate a small budget for recovering
+ * lines with write errors
+ */
+ rl->rb_gc_max = 1 << rl->rb_windows_pw;
+ rl->rb_user_max = max - rl->rb_gc_max;
+ rl->rb_state = PBLK_RL_WERR;
+ } else {
+ rl->rb_user_max = max;
+ rl->rb_gc_max = 0;
+ rl->rb_state = PBLK_RL_OFF;
+ }
} else if (free_blocks < rl->high) {
int shift = rl->high_pw - rl->rb_windows_pw;
int user_windows = free_blocks >> shift;
@@ -124,7 +144,7 @@ static void __pblk_rl_update_rates(struct pblk_rl *rl,
rl->rb_state = PBLK_RL_LOW;
}
- if (rl->rb_state == (PBLK_RL_MID | PBLK_RL_LOW))
+ if (rl->rb_state != PBLK_RL_OFF)
pblk_gc_should_start(pblk);
else
pblk_gc_should_stop(pblk);
@@ -221,6 +241,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
atomic_set(&rl->rb_user_cnt, 0);
atomic_set(&rl->rb_gc_cnt, 0);
atomic_set(&rl->rb_space, -1);
+ atomic_set(&rl->werr_lines, 0);
timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index e61909af23a5..88a0a7c407aa 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -173,6 +173,8 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
int free_line_cnt = 0, closed_line_cnt = 0, emeta_line_cnt = 0;
int d_line_cnt = 0, l_line_cnt = 0;
int gc_full = 0, gc_high = 0, gc_mid = 0, gc_low = 0, gc_empty = 0;
+ int gc_werr = 0;
+
int bad = 0, cor = 0;
int msecs = 0, cur_sec = 0, vsc = 0, sec_in_line = 0;
int map_weight = 0, meta_weight = 0;
@@ -237,6 +239,15 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
gc_empty++;
}
+ list_for_each_entry(line, &l_mg->gc_werr_list, list) {
+ if (line->type == PBLK_LINETYPE_DATA)
+ d_line_cnt++;
+ else if (line->type == PBLK_LINETYPE_LOG)
+ l_line_cnt++;
+ closed_line_cnt++;
+ gc_werr++;
+ }
+
list_for_each_entry(line, &l_mg->bad_list, list)
bad++;
list_for_each_entry(line, &l_mg->corrupt_list, list)
@@ -275,8 +286,8 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
l_mg->nr_lines);
sz += snprintf(page + sz, PAGE_SIZE - sz,
- "GC: full:%d, high:%d, mid:%d, low:%d, empty:%d, queue:%d\n",
- gc_full, gc_high, gc_mid, gc_low, gc_empty,
+ "GC: full:%d, high:%d, mid:%d, low:%d, empty:%d, werr: %d, queue:%d\n",
+ gc_full, gc_high, gc_mid, gc_low, gc_empty, gc_werr,
atomic_read(&pblk->gc.read_inflight_gc));
sz += snprintf(page + sz, PAGE_SIZE - sz,
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 3e6f1ebd743a..f353e52941f5 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -103,68 +103,150 @@ retry:
pblk_rb_sync_end(&pblk->rwb, &flags);
}
-/* When a write fails, we are not sure whether the block has grown bad or a page
- * range is more susceptible to write errors. If a high number of pages fail, we
- * assume that the block is bad and we mark it accordingly. In all cases, we
- * remap and resubmit the failed entries as fast as possible; if a flush is
- * waiting on a completion, the whole stack would stall otherwise.
- */
-static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
+/* Map remaining sectors in chunk, starting from ppa */
+static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
{
- void *comp_bits = &rqd->ppa_status;
- struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
- struct pblk_rec_ctx *recovery;
- struct ppa_addr *ppa_list = rqd->ppa_list;
- int nr_ppas = rqd->nr_ppas;
- unsigned int c_entries;
- int bit, ret;
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line *line;
+ struct ppa_addr map_ppa = *ppa;
+ u64 paddr;
+ int done = 0;
- if (unlikely(nr_ppas == 1))
- ppa_list = &rqd->ppa_addr;
+ line = &pblk->lines[pblk_ppa_to_line(*ppa)];
+ spin_lock(&line->lock);
- recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
+ while (!done) {
+ paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
- INIT_LIST_HEAD(&recovery->failed);
+ if (!test_and_set_bit(paddr, line->map_bitmap))
+ line->left_msecs--;
- bit = -1;
- while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
- struct pblk_rb_entry *entry;
- struct ppa_addr ppa;
+ if (!test_and_set_bit(paddr, line->invalid_bitmap))
+ le32_add_cpu(line->vsc, -1);
- /* Logic error */
- if (bit > c_ctx->nr_valid) {
- WARN_ONCE(1, "pblk: corrupted write request\n");
- mempool_free(recovery, pblk->rec_pool);
- goto out;
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ map_ppa.ppa++;
+ if (map_ppa.g.pg == geo->num_pg)
+ done = 1;
+ } else {
+ map_ppa.m.sec++;
+ if (map_ppa.m.sec == geo->clba)
+ done = 1;
}
+ }
- ppa = ppa_list[bit];
- entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
- if (!entry) {
- pr_err("pblk: could not scan entry on write failure\n");
- mempool_free(recovery, pblk->rec_pool);
- goto out;
- }
+ line->w_err_gc->has_write_err = 1;
+ spin_unlock(&line->lock);
+}
- /* The list is filled first and emptied afterwards. No need for
- * protecting it with a lock
+static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
+ unsigned int nr_entries)
+{
+ struct pblk_rb *rb = &pblk->rwb;
+ struct pblk_rb_entry *entry;
+ struct pblk_line *line;
+ struct pblk_w_ctx *w_ctx;
+ struct ppa_addr ppa_l2p;
+ int flags;
+ unsigned int pos, i;
+
+ spin_lock(&pblk->trans_lock);
+ pos = sentry;
+ for (i = 0; i < nr_entries; i++) {
+ entry = &rb->entries[pos];
+ w_ctx = &entry->w_ctx;
+
+ /* Check if the lba has been overwritten */
+ ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
+ if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
+ w_ctx->lba = ADDR_EMPTY;
+
+ /* Mark up the entry as submittable again */
+ flags = READ_ONCE(w_ctx->flags);
+ flags |= PBLK_WRITTEN_DATA;
+ /* Release flags on write context. Protect from writes */
+ smp_store_release(&w_ctx->flags, flags);
+
+ /* Decrese the reference count to the line as we will
+ * re-map these entries
*/
- list_add_tail(&entry->index, &recovery->failed);
+ line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
+ kref_put(&line->ref, pblk_line_put);
+
+ pos = (pos + 1) & (rb->nr_entries - 1);
}
+ spin_unlock(&pblk->trans_lock);
+}
- c_entries = find_first_bit(comp_bits, nr_ppas);
- ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
- if (ret) {
- pr_err("pblk: could not recover from write failure\n");
- mempool_free(recovery, pblk->rec_pool);
- goto out;
+static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
+{
+ struct pblk_c_ctx *r_ctx;
+
+ r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
+ if (!r_ctx)
+ return;
+
+ r_ctx->lun_bitmap = NULL;
+ r_ctx->sentry = c_ctx->sentry;
+ r_ctx->nr_valid = c_ctx->nr_valid;
+ r_ctx->nr_padded = c_ctx->nr_padded;
+
+ spin_lock(&pblk->resubmit_lock);
+ list_add_tail(&r_ctx->list, &pblk->resubmit_list);
+ spin_unlock(&pblk->resubmit_lock);
+
+#ifdef CONFIG_NVM_DEBUG
+ atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
+#endif
+}
+
+static void pblk_submit_rec(struct work_struct *work)
+{
+ struct pblk_rec_ctx *recovery =
+ container_of(work, struct pblk_rec_ctx, ws_rec);
+ struct pblk *pblk = recovery->pblk;
+ struct nvm_rq *rqd = recovery->rqd;
+ struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
+ struct ppa_addr *ppa_list;
+
+ pblk_log_write_err(pblk, rqd);
+
+ if (rqd->nr_ppas == 1)
+ ppa_list = &rqd->ppa_addr;
+ else
+ ppa_list = rqd->ppa_list;
+
+ pblk_map_remaining(pblk, ppa_list);
+ pblk_queue_resubmit(pblk, c_ctx);
+
+ pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
+ if (c_ctx->nr_padded)
+ pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
+ c_ctx->nr_padded);
+ bio_put(rqd->bio);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE);
+ mempool_free(recovery, &pblk->rec_pool);
+
+ atomic_dec(&pblk->inflight_io);
+}
+
+
+static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct pblk_rec_ctx *recovery;
+
+ recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
+ if (!recovery) {
+ pr_err("pblk: could not allocate recovery work\n");
+ return;
}
+ recovery->pblk = pblk;
+ recovery->rqd = rqd;
+
INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
queue_work(pblk->close_wq, &recovery->ws_rec);
-
-out:
- pblk_complete_write(pblk, rqd, c_ctx);
}
static void pblk_end_io_write(struct nvm_rq *rqd)
@@ -173,8 +255,8 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
if (rqd->error) {
- pblk_log_write_err(pblk, rqd);
- return pblk_end_w_fail(pblk, rqd);
+ pblk_end_w_fail(pblk, rqd);
+ return;
}
#ifdef CONFIG_NVM_DEBUG
else
@@ -198,6 +280,7 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
if (rqd->error) {
pblk_log_write_err(pblk, rqd);
pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
+ line->w_err_gc->has_write_err = 1;
}
sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
@@ -266,31 +349,6 @@ static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
return 0;
}
-int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
- struct pblk_c_ctx *c_ctx)
-{
- struct pblk_line_meta *lm = &pblk->lm;
- unsigned long *lun_bitmap;
- int ret;
-
- lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
- if (!lun_bitmap)
- return -ENOMEM;
-
- c_ctx->lun_bitmap = lun_bitmap;
-
- ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
- if (ret)
- return ret;
-
- pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
-
- rqd->ppa_status = (u64)0;
- rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
-
- return ret;
-}
-
static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
unsigned int secs_to_flush)
{
@@ -339,6 +397,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
l_mg->emeta_alloc_type, GFP_KERNEL);
if (IS_ERR(bio)) {
+ pr_err("pblk: failed to map emeta io");
ret = PTR_ERR(bio);
goto fail_free_rqd;
}
@@ -515,26 +574,54 @@ static int pblk_submit_write(struct pblk *pblk)
unsigned int secs_avail, secs_to_sync, secs_to_com;
unsigned int secs_to_flush;
unsigned long pos;
+ unsigned int resubmit;
- /* If there are no sectors in the cache, flushes (bios without data)
- * will be cleared on the cache threads
- */
- secs_avail = pblk_rb_read_count(&pblk->rwb);
- if (!secs_avail)
- return 1;
-
- secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
- if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
- return 1;
-
- secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
- if (secs_to_sync > pblk->max_write_pgs) {
- pr_err("pblk: bad buffer sync calculation\n");
- return 1;
- }
+ spin_lock(&pblk->resubmit_lock);
+ resubmit = !list_empty(&pblk->resubmit_list);
+ spin_unlock(&pblk->resubmit_lock);
+
+ /* Resubmit failed writes first */
+ if (resubmit) {
+ struct pblk_c_ctx *r_ctx;
+
+ spin_lock(&pblk->resubmit_lock);
+ r_ctx = list_first_entry(&pblk->resubmit_list,
+ struct pblk_c_ctx, list);
+ list_del(&r_ctx->list);
+ spin_unlock(&pblk->resubmit_lock);
+
+ secs_avail = r_ctx->nr_valid;
+ pos = r_ctx->sentry;
+
+ pblk_prepare_resubmit(pblk, pos, secs_avail);
+ secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
+ secs_avail);
- secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
- pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
+ kfree(r_ctx);
+ } else {
+ /* If there are no sectors in the cache,
+ * flushes (bios without data) will be cleared on
+ * the cache threads
+ */
+ secs_avail = pblk_rb_read_count(&pblk->rwb);
+ if (!secs_avail)
+ return 1;
+
+ secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
+ if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
+ return 1;
+
+ secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
+ secs_to_flush);
+ if (secs_to_sync > pblk->max_write_pgs) {
+ pr_err("pblk: bad buffer sync calculation\n");
+ return 1;
+ }
+
+ secs_to_com = (secs_to_sync > secs_avail) ?
+ secs_avail : secs_to_sync;
+ pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
+ }
bio = bio_alloc(GFP_KERNEL, secs_to_sync);
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 9c682acfc5d1..34cc1d64a9d4 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -89,12 +89,14 @@ struct pblk_sec_meta {
/* The number of GC lists and the rate-limiter states go together. This way the
* rate-limiter can dictate how much GC is needed based on resource utilization.
*/
-#define PBLK_GC_NR_LISTS 3
+#define PBLK_GC_NR_LISTS 4
enum {
- PBLK_RL_HIGH = 1,
- PBLK_RL_MID = 2,
- PBLK_RL_LOW = 3,
+ PBLK_RL_OFF = 0,
+ PBLK_RL_WERR = 1,
+ PBLK_RL_HIGH = 2,
+ PBLK_RL_MID = 3,
+ PBLK_RL_LOW = 4
};
#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
@@ -128,7 +130,6 @@ struct pblk_pad_rq {
struct pblk_rec_ctx {
struct pblk *pblk;
struct nvm_rq *rqd;
- struct list_head failed;
struct work_struct ws_rec;
};
@@ -279,6 +280,8 @@ struct pblk_rl {
int rb_user_active;
int rb_gc_active;
+ atomic_t werr_lines; /* Number of write error lines that needs gc */
+
struct timer_list u_timer;
unsigned long long nr_secs;
@@ -312,6 +315,7 @@ enum {
PBLK_LINEGC_MID = 23,
PBLK_LINEGC_HIGH = 24,
PBLK_LINEGC_FULL = 25,
+ PBLK_LINEGC_WERR = 26
};
#define PBLK_MAGIC 0x70626c6b /*pblk*/
@@ -413,6 +417,11 @@ struct pblk_smeta {
struct line_smeta *buf; /* smeta buffer in persistent format */
};
+struct pblk_w_err_gc {
+ int has_write_err;
+ __le64 *lba_list;
+};
+
struct pblk_line {
struct pblk *pblk;
unsigned int id; /* Line number corresponds to the
@@ -458,6 +467,8 @@ struct pblk_line {
struct kref ref; /* Write buffer L2P references */
+ struct pblk_w_err_gc *w_err_gc; /* Write error gc recovery metadata */
+
spinlock_t lock; /* Necessary for invalid_bitmap only */
};
@@ -489,6 +500,8 @@ struct pblk_line_mgmt {
struct list_head gc_mid_list; /* Full lines ready to GC, mid isc */
struct list_head gc_low_list; /* Full lines ready to GC, low isc */
+ struct list_head gc_werr_list; /* Write err recovery list */
+
struct list_head gc_full_list; /* Full lines ready to GC, no valid */
struct list_head gc_empty_list; /* Full lines close, all valid */
@@ -664,12 +677,15 @@ struct pblk {
struct list_head compl_list;
- mempool_t *page_bio_pool;
- mempool_t *gen_ws_pool;
- mempool_t *rec_pool;
- mempool_t *r_rq_pool;
- mempool_t *w_rq_pool;
- mempool_t *e_rq_pool;
+ spinlock_t resubmit_lock; /* Resubmit list lock */
+ struct list_head resubmit_list; /* Resubmit list for failed writes*/
+
+ mempool_t page_bio_pool;
+ mempool_t gen_ws_pool;
+ mempool_t rec_pool;
+ mempool_t r_rq_pool;
+ mempool_t w_rq_pool;
+ mempool_t e_rq_pool;
struct workqueue_struct *close_wq;
struct workqueue_struct *bb_wq;
@@ -713,9 +729,6 @@ void pblk_rb_sync_l2p(struct pblk_rb *rb);
unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
unsigned int pos, unsigned int nr_entries,
unsigned int count);
-unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
- struct list_head *list,
- unsigned int max);
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
struct ppa_addr ppa, int bio_iter, bool advanced_bio);
unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
@@ -766,11 +779,13 @@ struct pblk_line *pblk_line_get_data(struct pblk *pblk);
struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
int pblk_line_is_full(struct pblk_line *line);
-void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
+void pblk_line_free(struct pblk_line *line);
void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
void pblk_line_close_ws(struct work_struct *work);
void pblk_pipeline_stop(struct pblk *pblk);
+void __pblk_pipeline_stop(struct pblk *pblk);
+void __pblk_pipeline_flush(struct pblk *pblk);
void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
void (*work)(struct work_struct *), gfp_t gfp_mask,
struct workqueue_struct *wq);
@@ -794,7 +809,6 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
unsigned long *lun_bitmap);
-void pblk_end_io_sync(struct nvm_rq *rqd);
int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
int nr_pages);
void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
@@ -837,23 +851,20 @@ void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
int pblk_write_ts(void *data);
void pblk_write_timer_fn(struct timer_list *t);
void pblk_write_should_kick(struct pblk *pblk);
+void pblk_write_kick(struct pblk *pblk);
/*
* pblk read path
*/
-extern struct bio_set *pblk_bio_set;
+extern struct bio_set pblk_bio_set;
int pblk_submit_read(struct pblk *pblk, struct bio *bio);
int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
/*
* pblk recovery
*/
-void pblk_submit_rec(struct work_struct *work);
struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
int pblk_recov_pad(struct pblk *pblk);
int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
-int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
- struct pblk_rec_ctx *recovery, u64 *comp_bits,
- unsigned int comp);
/*
* pblk gc
@@ -864,7 +875,7 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
#define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */
int pblk_gc_init(struct pblk *pblk);
-void pblk_gc_exit(struct pblk *pblk);
+void pblk_gc_exit(struct pblk *pblk, bool graceful);
void pblk_gc_should_start(struct pblk *pblk);
void pblk_gc_should_stop(struct pblk *pblk);
void pblk_gc_should_kick(struct pblk *pblk);
@@ -894,6 +905,9 @@ void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
bool used);
int pblk_rl_is_limit(struct pblk_rl *rl);
+void pblk_rl_werr_line_in(struct pblk_rl *rl);
+void pblk_rl_werr_line_out(struct pblk_rl *rl);
+
/*
* pblk sysfs
*/
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 3a0cfb237af9..d6bf294f3907 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -269,7 +269,7 @@ struct bcache_device {
atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
- struct bio_set *bio_split;
+ struct bio_set bio_split;
unsigned data_csum:1;
@@ -345,6 +345,7 @@ struct cached_dev {
struct keybuf writeback_keys;
+ struct task_struct *status_update_thread;
/*
* Order the write-half of writeback operations strongly in dispatch
* order. (Maintain LBA order; don't allow reads completing out of
@@ -392,6 +393,7 @@ struct cached_dev {
#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
atomic_t io_errors;
unsigned error_limit;
+ unsigned offline_seconds;
char backing_dev_name[BDEVNAME_SIZE];
};
@@ -528,9 +530,9 @@ struct cache_set {
struct closure sb_write;
struct semaphore sb_write_mutex;
- mempool_t *search;
- mempool_t *bio_meta;
- struct bio_set *bio_split;
+ mempool_t search;
+ mempool_t bio_meta;
+ struct bio_set bio_split;
/* For the btree cache */
struct shrinker shrink;
@@ -655,7 +657,7 @@ struct cache_set {
* A btree node on disk could have too many bsets for an iterator to fit
* on the stack - have to dynamically allocate them
*/
- mempool_t *fill_iter;
+ mempool_t fill_iter;
struct bset_sort_state sort;
@@ -956,8 +958,6 @@ void bch_prio_write(struct cache *);
void bch_write_bdev_super(struct cached_dev *, struct closure *);
extern struct workqueue_struct *bcache_wq;
-extern const char * const bch_cache_modes[];
-extern const char * const bch_stop_on_failure_modes[];
extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets;
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 579c696a5fe0..f3403b45bc28 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -1118,8 +1118,7 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
void bch_bset_sort_state_free(struct bset_sort_state *state)
{
- if (state->pool)
- mempool_destroy(state->pool);
+ mempool_exit(&state->pool);
}
int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
@@ -1129,11 +1128,7 @@ int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
state->page_order = page_order;
state->crit_factor = int_sqrt(1 << page_order);
- state->pool = mempool_create_page_pool(1, page_order);
- if (!state->pool)
- return -ENOMEM;
-
- return 0;
+ return mempool_init_page_pool(&state->pool, 1, page_order);
}
EXPORT_SYMBOL(bch_bset_sort_state_init);
@@ -1191,7 +1186,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
BUG_ON(order > state->page_order);
- outp = mempool_alloc(state->pool, GFP_NOIO);
+ outp = mempool_alloc(&state->pool, GFP_NOIO);
out = page_address(outp);
used_mempool = true;
order = state->page_order;
@@ -1220,7 +1215,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
}
if (used_mempool)
- mempool_free(virt_to_page(out), state->pool);
+ mempool_free(virt_to_page(out), &state->pool);
else
free_pages((unsigned long) out, order);
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 0c24280f3b98..b867f2200495 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -347,7 +347,7 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
/* Sorting */
struct bset_sort_state {
- mempool_t *pool;
+ mempool_t pool;
unsigned page_order;
unsigned crit_factor;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 17936b2dc7d6..2a0968c04e21 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -204,7 +204,7 @@ void bch_btree_node_read_done(struct btree *b)
struct bset *i = btree_bset_first(b);
struct btree_iter *iter;
- iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
+ iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
iter->used = 0;
@@ -271,7 +271,7 @@ void bch_btree_node_read_done(struct btree *b)
bch_bset_init_next(&b->keys, write_block(b),
bset_magic(&b->c->sb));
out:
- mempool_free(iter, b->c->fill_iter);
+ mempool_free(iter, &b->c->fill_iter);
return;
err:
set_btree_node_io_error(b);
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 4e63c6f6c04d..d030ce3025a6 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -250,7 +250,9 @@ void bch_debug_exit(void)
int __init bch_debug_init(struct kobject *kobj)
{
- bcache_debug = debugfs_create_dir("bcache", NULL);
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+ bcache_debug = debugfs_create_dir("bcache", NULL);
return IS_ERR_OR_NULL(bcache_debug);
}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 2ddf8515e6a5..9612873afee2 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -17,12 +17,12 @@
void bch_bbio_free(struct bio *bio, struct cache_set *c)
{
struct bbio *b = container_of(bio, struct bbio, bio);
- mempool_free(b, c->bio_meta);
+ mempool_free(b, &c->bio_meta);
}
struct bio *bch_bbio_alloc(struct cache_set *c)
{
- struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
+ struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 8e3e8655ed63..ae67f5fa8047 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -213,7 +213,7 @@ static void bch_data_insert_start(struct closure *cl)
do {
unsigned i;
struct bkey *k;
- struct bio_set *split = op->c->bio_split;
+ struct bio_set *split = &op->c->bio_split;
/* 1 for the device pointer and 1 for the chksum */
if (bch_keylist_realloc(&op->insert_keys,
@@ -548,7 +548,7 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
KEY_OFFSET(k) - bio->bi_iter.bi_sector),
- GFP_NOIO, s->d->bio_split);
+ GFP_NOIO, &s->d->bio_split);
bio_key = &container_of(n, struct bbio, bio)->key;
bch_bkey_copy_single_ptr(bio_key, k, ptr);
@@ -707,7 +707,7 @@ static void search_free(struct closure *cl)
bio_complete(s);
closure_debug_destroy(cl);
- mempool_free(s, s->d->c->search);
+ mempool_free(s, &s->d->c->search);
}
static inline struct search *search_alloc(struct bio *bio,
@@ -715,7 +715,7 @@ static inline struct search *search_alloc(struct bio *bio,
{
struct search *s;
- s = mempool_alloc(d->c->search, GFP_NOIO);
+ s = mempool_alloc(&d->c->search, GFP_NOIO);
closure_init(&s->cl, NULL);
do_bio_hook(s, bio, request_endio);
@@ -864,7 +864,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->cache_missed = 1;
if (s->cache_miss || s->iop.bypass) {
- miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
goto out_submit;
}
@@ -887,14 +887,14 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->iop.replace = true;
- miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
cache_bio = bio_alloc_bioset(GFP_NOWAIT,
DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
- dc->disk.bio_split);
+ &dc->disk.bio_split);
if (!cache_bio)
goto out_submit;
@@ -1008,7 +1008,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
struct bio *flush;
flush = bio_alloc_bioset(GFP_NOIO, 0,
- dc->disk.bio_split);
+ &dc->disk.bio_split);
if (!flush) {
s->iop.status = BLK_STS_RESOURCE;
goto insert_data;
@@ -1021,7 +1021,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
closure_bio_submit(s->iop.c, flush, cl);
}
} else {
- s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
+ s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
/* I/O request sent to backing device */
bio->bi_end_io = backing_request_endio;
closure_bio_submit(s->iop.c, bio, cl);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3dea06b41d43..a31e55bcc4e5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -37,24 +37,6 @@ static const char invalid_uuid[] = {
0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
};
-/* Default is -1; we skip past it for struct cached_dev's cache mode */
-const char * const bch_cache_modes[] = {
- "default",
- "writethrough",
- "writeback",
- "writearound",
- "none",
- NULL
-};
-
-/* Default is -1; we skip past it for stop_when_cache_set_failed */
-const char * const bch_stop_on_failure_modes[] = {
- "default",
- "auto",
- "always",
- NULL
-};
-
static struct kobject *bcache_kobj;
struct mutex bch_register_lock;
LIST_HEAD(bch_cache_sets);
@@ -654,6 +636,11 @@ static int ioctl_dev(struct block_device *b, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct bcache_device *d = b->bd_disk->private_data;
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+
+ if (dc->io_disable)
+ return -EIO;
+
return d->ioctl(d, mode, cmd, arg);
}
@@ -766,8 +753,7 @@ static void bcache_device_free(struct bcache_device *d)
put_disk(d->disk);
}
- if (d->bio_split)
- bioset_free(d->bio_split);
+ bioset_exit(&d->bio_split);
kvfree(d->full_dirty_stripes);
kvfree(d->stripe_sectors_dirty);
@@ -809,9 +795,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (idx < 0)
return idx;
- if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio),
- BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER)) ||
+ if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
+ BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
!(d->disk = alloc_disk(BCACHE_MINORS))) {
ida_simple_remove(&bcache_device_idx, idx);
return -ENOMEM;
@@ -864,6 +849,44 @@ static void calc_cached_dev_sectors(struct cache_set *c)
c->cached_dev_sectors = sectors;
}
+#define BACKING_DEV_OFFLINE_TIMEOUT 5
+static int cached_dev_status_update(void *arg)
+{
+ struct cached_dev *dc = arg;
+ struct request_queue *q;
+
+ /*
+ * If this delayed worker is stopping outside, directly quit here.
+ * dc->io_disable might be set via sysfs interface, so check it
+ * here too.
+ */
+ while (!kthread_should_stop() && !dc->io_disable) {
+ q = bdev_get_queue(dc->bdev);
+ if (blk_queue_dying(q))
+ dc->offline_seconds++;
+ else
+ dc->offline_seconds = 0;
+
+ if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
+ pr_err("%s: device offline for %d seconds",
+ dc->backing_dev_name,
+ BACKING_DEV_OFFLINE_TIMEOUT);
+ pr_err("%s: disable I/O request due to backing "
+ "device offline", dc->disk.name);
+ dc->io_disable = true;
+ /* let others know earlier that io_disable is true */
+ smp_mb();
+ bcache_device_stop(&dc->disk);
+ break;
+ }
+ schedule_timeout_interruptible(HZ);
+ }
+
+ wait_for_kthread_stop();
+ return 0;
+}
+
+
void bch_cached_dev_run(struct cached_dev *dc)
{
struct bcache_device *d = &dc->disk;
@@ -906,6 +929,14 @@ void bch_cached_dev_run(struct cached_dev *dc)
if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
pr_debug("error creating sysfs link");
+
+ dc->status_update_thread = kthread_run(cached_dev_status_update,
+ dc, "bcache_status_update");
+ if (IS_ERR(dc->status_update_thread)) {
+ pr_warn("failed to create bcache_status_update kthread, "
+ "continue to run without monitoring backing "
+ "device status");
+ }
}
/*
@@ -1139,6 +1170,8 @@ static void cached_dev_free(struct closure *cl)
kthread_stop(dc->writeback_thread);
if (dc->writeback_write_wq)
destroy_workqueue(dc->writeback_write_wq);
+ if (!IS_ERR_OR_NULL(dc->status_update_thread))
+ kthread_stop(dc->status_update_thread);
if (atomic_read(&dc->running))
bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
@@ -1465,14 +1498,10 @@ static void cache_set_free(struct closure *cl)
if (c->moving_gc_wq)
destroy_workqueue(c->moving_gc_wq);
- if (c->bio_split)
- bioset_free(c->bio_split);
- if (c->fill_iter)
- mempool_destroy(c->fill_iter);
- if (c->bio_meta)
- mempool_destroy(c->bio_meta);
- if (c->search)
- mempool_destroy(c->search);
+ bioset_exit(&c->bio_split);
+ mempool_exit(&c->fill_iter);
+ mempool_exit(&c->bio_meta);
+ mempool_exit(&c->search);
kfree(c->devices);
mutex_lock(&bch_register_lock);
@@ -1683,21 +1712,17 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
INIT_LIST_HEAD(&c->btree_cache_freed);
INIT_LIST_HEAD(&c->data_buckets);
- c->search = mempool_create_slab_pool(32, bch_search_cache);
- if (!c->search)
- goto err;
-
iter_size = (sb->bucket_size / sb->block_size + 1) *
sizeof(struct btree_iter_set);
if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
- !(c->bio_meta = mempool_create_kmalloc_pool(2,
- sizeof(struct bbio) + sizeof(struct bio_vec) *
- bucket_pages(c))) ||
- !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
- !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio),
- BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER)) ||
+ mempool_init_slab_pool(&c->search, 32, bch_search_cache) ||
+ mempool_init_kmalloc_pool(&c->bio_meta, 2,
+ sizeof(struct bbio) + sizeof(struct bio_vec) *
+ bucket_pages(c)) ||
+ mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
+ bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
+ BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
!(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
!(c->moving_gc_wq = alloc_workqueue("bcache_gc",
WQ_MEM_RECLAIM, 0)) ||
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index dfeef583ee50..8ccbc8f3b3af 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -16,6 +16,22 @@
#include <linux/sort.h>
#include <linux/sched/clock.h>
+/* Default is -1; we skip past it for struct cached_dev's cache mode */
+static const char * const bch_cache_modes[] = {
+ "writethrough",
+ "writeback",
+ "writearound",
+ "none",
+ NULL
+};
+
+/* Default is -1; we skip past it for stop_when_cache_set_failed */
+static const char * const bch_stop_on_failure_modes[] = {
+ "auto",
+ "always",
+ NULL
+};
+
static const char * const cache_replacement_policies[] = {
"lru",
"fifo",
@@ -114,6 +130,20 @@ rw_attribute(btree_shrinker_disabled);
rw_attribute(copy_gc_enabled);
rw_attribute(size);
+static ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
+ size_t selected)
+{
+ char *out = buf;
+ size_t i;
+
+ for (i = 0; list[i]; i++)
+ out += snprintf(out, buf + size - out,
+ i == selected ? "[%s] " : "%s ", list[i]);
+
+ out[-1] = '\n';
+ return out - buf;
+}
+
SHOW(__bch_cached_dev)
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
@@ -124,12 +154,12 @@ SHOW(__bch_cached_dev)
if (attr == &sysfs_cache_mode)
return bch_snprint_string_list(buf, PAGE_SIZE,
- bch_cache_modes + 1,
+ bch_cache_modes,
BDEV_CACHE_MODE(&dc->sb));
if (attr == &sysfs_stop_when_cache_set_failed)
return bch_snprint_string_list(buf, PAGE_SIZE,
- bch_stop_on_failure_modes + 1,
+ bch_stop_on_failure_modes,
dc->stop_when_cache_set_failed);
@@ -253,8 +283,7 @@ STORE(__cached_dev)
bch_cached_dev_run(dc);
if (attr == &sysfs_cache_mode) {
- v = bch_read_string_list(buf, bch_cache_modes + 1);
-
+ v = __sysfs_match_string(bch_cache_modes, -1, buf);
if (v < 0)
return v;
@@ -265,8 +294,7 @@ STORE(__cached_dev)
}
if (attr == &sysfs_stop_when_cache_set_failed) {
- v = bch_read_string_list(buf, bch_stop_on_failure_modes + 1);
-
+ v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
if (v < 0)
return v;
@@ -635,6 +663,7 @@ SHOW_LOCKED(bch_cache_set)
STORE(__bch_cache_set)
{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+ ssize_t v;
if (attr == &sysfs_unregister)
bch_cache_set_unregister(c);
@@ -698,8 +727,7 @@ STORE(__bch_cache_set)
c->congested_write_threshold_us);
if (attr == &sysfs_errors) {
- ssize_t v = bch_read_string_list(buf, error_actions);
-
+ v = __sysfs_match_string(error_actions, -1, buf);
if (v < 0)
return v;
@@ -714,8 +742,7 @@ STORE(__bch_cache_set)
c->error_decay = strtoul_or_return(buf) / 88;
if (attr == &sysfs_io_disable) {
- int v = strtoul_or_return(buf);
-
+ v = strtoul_or_return(buf);
if (v) {
if (test_and_set_bit(CACHE_SET_IO_DISABLE,
&c->flags))
@@ -929,6 +956,7 @@ SHOW_LOCKED(bch_cache)
STORE(__bch_cache)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
+ ssize_t v;
if (attr == &sysfs_discard) {
bool v = strtoul_or_return(buf);
@@ -943,8 +971,7 @@ STORE(__bch_cache)
}
if (attr == &sysfs_cache_replacement_policy) {
- ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
-
+ v = __sysfs_match_string(cache_replacement_policies, -1, buf);
if (v < 0)
return v;
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 74febd5230df..fc479b026d6d 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -120,41 +120,6 @@ ssize_t bch_hprint(char *buf, int64_t v)
return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
}
-ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
- size_t selected)
-{
- char *out = buf;
- size_t i;
-
- for (i = 0; list[i]; i++)
- out += snprintf(out, buf + size - out,
- i == selected ? "[%s] " : "%s ", list[i]);
-
- out[-1] = '\n';
- return out - buf;
-}
-
-ssize_t bch_read_string_list(const char *buf, const char * const list[])
-{
- size_t i;
- char *s, *d = kstrndup(buf, PAGE_SIZE - 1, GFP_KERNEL);
- if (!d)
- return -ENOMEM;
-
- s = strim(d);
-
- for (i = 0; list[i]; i++)
- if (!strcmp(list[i], s))
- break;
-
- kfree(d);
-
- if (!list[i])
- return -EINVAL;
-
- return i;
-}
-
bool bch_is_zero(const char *p, size_t n)
{
size_t i;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 268024529edd..cced87f8eb27 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -365,11 +365,6 @@ ssize_t bch_hprint(char *buf, int64_t v);
bool bch_is_zero(const char *p, size_t n);
int bch_parse_uuid(const char *s, char *uuid);
-ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
- size_t selected);
-
-ssize_t bch_read_string_list(const char *buf, const char * const list[]);
-
struct time_stats {
spinlock_t lock;
/*
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index 874841f0fc83..8e33a3808368 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -19,7 +19,7 @@
struct dm_bio_prison {
spinlock_t lock;
- mempool_t *cell_pool;
+ mempool_t cell_pool;
struct rb_root cells;
};
@@ -34,14 +34,15 @@ static struct kmem_cache *_cell_cache;
struct dm_bio_prison *dm_bio_prison_create(void)
{
struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
+ int ret;
if (!prison)
return NULL;
spin_lock_init(&prison->lock);
- prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
- if (!prison->cell_pool) {
+ ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
+ if (ret) {
kfree(prison);
return NULL;
}
@@ -54,21 +55,21 @@ EXPORT_SYMBOL_GPL(dm_bio_prison_create);
void dm_bio_prison_destroy(struct dm_bio_prison *prison)
{
- mempool_destroy(prison->cell_pool);
+ mempool_exit(&prison->cell_pool);
kfree(prison);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
{
- return mempool_alloc(prison->cell_pool, gfp);
+ return mempool_alloc(&prison->cell_pool, gfp);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell)
{
- mempool_free(cell, prison->cell_pool);
+ mempool_free(cell, &prison->cell_pool);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index 8ce3a1a588cf..601b1569206a 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -21,7 +21,7 @@ struct dm_bio_prison_v2 {
struct workqueue_struct *wq;
spinlock_t lock;
- mempool_t *cell_pool;
+ mempool_t cell_pool;
struct rb_root cells;
};
@@ -36,6 +36,7 @@ static struct kmem_cache *_cell_cache;
struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
{
struct dm_bio_prison_v2 *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
+ int ret;
if (!prison)
return NULL;
@@ -43,8 +44,8 @@ struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
prison->wq = wq;
spin_lock_init(&prison->lock);
- prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
- if (!prison->cell_pool) {
+ ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
+ if (ret) {
kfree(prison);
return NULL;
}
@@ -57,21 +58,21 @@ EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);
void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
{
- mempool_destroy(prison->cell_pool);
+ mempool_exit(&prison->cell_pool);
kfree(prison);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);
struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
{
- return mempool_alloc(prison->cell_pool, gfp);
+ return mempool_alloc(&prison->cell_pool, gfp);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);
void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell)
{
- mempool_free(cell, prison->cell_pool);
+ mempool_free(cell, &prison->cell_pool);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index da208638fba4..001c71248246 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -447,9 +447,9 @@ struct cache {
struct work_struct migration_worker;
struct delayed_work waker;
struct dm_bio_prison_v2 *prison;
- struct bio_set *bs;
+ struct bio_set bs;
- mempool_t *migration_pool;
+ mempool_t migration_pool;
struct dm_cache_policy *policy;
unsigned policy_nr_args;
@@ -550,7 +550,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
{
struct dm_cache_migration *mg;
- mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
if (!mg)
return NULL;
@@ -569,7 +569,7 @@ static void free_migration(struct dm_cache_migration *mg)
if (atomic_dec_and_test(&cache->nr_allocated_migrations))
wake_up(&cache->migration_wait);
- mempool_free(mg, cache->migration_pool);
+ mempool_free(mg, &cache->migration_pool);
}
/*----------------------------------------------------------------*/
@@ -924,7 +924,7 @@ static void issue_op(struct bio *bio, void *context)
static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
- struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, cache->bs);
+ struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
BUG_ON(!origin_bio);
@@ -2011,7 +2011,7 @@ static void destroy(struct cache *cache)
{
unsigned i;
- mempool_destroy(cache->migration_pool);
+ mempool_exit(&cache->migration_pool);
if (cache->prison)
dm_bio_prison_destroy_v2(cache->prison);
@@ -2047,8 +2047,7 @@ static void destroy(struct cache *cache)
kfree(cache->ctr_args[i]);
kfree(cache->ctr_args);
- if (cache->bs)
- bioset_free(cache->bs);
+ bioset_exit(&cache->bs);
kfree(cache);
}
@@ -2498,8 +2497,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->features = ca->features;
if (writethrough_mode(cache)) {
/* Create bioset for writethrough bios issued to origin */
- cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0);
- if (!cache->bs)
+ r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
+ if (r)
goto bad;
}
@@ -2630,9 +2629,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
- cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
- migration_cache);
- if (!cache->migration_pool) {
+ r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
+ migration_cache);
+ if (r) {
*error = "Error creating cache's migration mempool";
goto bad;
}
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 3222e21cbbf8..f21c5d21bf1b 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -91,8 +91,8 @@ struct mapped_device {
/*
* io objects are allocated from here.
*/
- struct bio_set *io_bs;
- struct bio_set *bs;
+ struct bio_set io_bs;
+ struct bio_set bs;
/*
* freeze/thaw support require holding onto a super block
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 44ff473dab3e..da02f4d8e4b9 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -143,14 +143,14 @@ struct crypt_config {
* pool for per bio private data, crypto requests,
* encryption requeusts/buffer pages and integrity tags
*/
- mempool_t *req_pool;
- mempool_t *page_pool;
- mempool_t *tag_pool;
+ mempool_t req_pool;
+ mempool_t page_pool;
+ mempool_t tag_pool;
unsigned tag_pool_max_sectors;
struct percpu_counter n_allocated_pages;
- struct bio_set *bs;
+ struct bio_set bs;
struct mutex bio_alloc_lock;
struct workqueue_struct *io_queue;
@@ -1245,7 +1245,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
if (!ctx->r.req)
- ctx->r.req = mempool_alloc(cc->req_pool, GFP_NOIO);
+ ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
@@ -1262,7 +1262,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
struct convert_context *ctx)
{
if (!ctx->r.req_aead)
- ctx->r.req_aead = mempool_alloc(cc->req_pool, GFP_NOIO);
+ ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
@@ -1290,7 +1290,7 @@ static void crypt_free_req_skcipher(struct crypt_config *cc,
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
if ((struct skcipher_request *)(io + 1) != req)
- mempool_free(req, cc->req_pool);
+ mempool_free(req, &cc->req_pool);
}
static void crypt_free_req_aead(struct crypt_config *cc,
@@ -1299,7 +1299,7 @@ static void crypt_free_req_aead(struct crypt_config *cc,
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
if ((struct aead_request *)(io + 1) != req)
- mempool_free(req, cc->req_pool);
+ mempool_free(req, &cc->req_pool);
}
static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
@@ -1409,7 +1409,7 @@ retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_lock(&cc->bio_alloc_lock);
- clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
+ clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
if (!clone)
goto out;
@@ -1418,7 +1418,7 @@ retry:
remaining_size = size;
for (i = 0; i < nr_iovecs; i++) {
- page = mempool_alloc(cc->page_pool, gfp_mask);
+ page = mempool_alloc(&cc->page_pool, gfp_mask);
if (!page) {
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
@@ -1453,7 +1453,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
bio_for_each_segment_all(bv, clone, i) {
BUG_ON(!bv->bv_page);
- mempool_free(bv->bv_page, cc->page_pool);
+ mempool_free(bv->bv_page, &cc->page_pool);
}
}
@@ -1492,7 +1492,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
crypt_free_req(cc, io->ctx.r.req, base_bio);
if (unlikely(io->integrity_metadata_from_pool))
- mempool_free(io->integrity_metadata, io->cc->tag_pool);
+ mempool_free(io->integrity_metadata, &io->cc->tag_pool);
else
kfree(io->integrity_metadata);
@@ -1565,7 +1565,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
* biovecs we don't need to worry about the block layer
* modifying the biovec array; so leverage bio_clone_fast().
*/
- clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
+ clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
if (!clone)
return 1;
@@ -2219,15 +2219,13 @@ static void crypt_dtr(struct dm_target *ti)
crypt_free_tfms(cc);
- if (cc->bs)
- bioset_free(cc->bs);
+ bioset_exit(&cc->bs);
- mempool_destroy(cc->page_pool);
- mempool_destroy(cc->req_pool);
- mempool_destroy(cc->tag_pool);
+ mempool_exit(&cc->page_pool);
+ mempool_exit(&cc->req_pool);
+ mempool_exit(&cc->tag_pool);
- if (cc->page_pool)
- WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
+ WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
percpu_counter_destroy(&cc->n_allocated_pages);
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
@@ -2743,8 +2741,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
iv_size_padding = align_mask;
}
- ret = -ENOMEM;
-
/* ...| IV + padding | original IV | original sec. number | bio tag offset | */
additional_req_size = sizeof(struct dm_crypt_request) +
iv_size_padding + cc->iv_size +
@@ -2752,8 +2748,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
sizeof(uint64_t) +
sizeof(unsigned int);
- cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + additional_req_size);
- if (!cc->req_pool) {
+ ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
+ if (ret) {
ti->error = "Cannot allocate crypt request mempool";
goto bad;
}
@@ -2762,14 +2758,14 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
ARCH_KMALLOC_MINALIGN);
- cc->page_pool = mempool_create(BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
- if (!cc->page_pool) {
+ ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
+ if (ret) {
ti->error = "Cannot allocate page mempool";
goto bad;
}
- cc->bs = bioset_create(MIN_IOS, 0, BIOSET_NEED_BVECS);
- if (!cc->bs) {
+ ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
+ if (ret) {
ti->error = "Cannot allocate crypt bioset";
goto bad;
}
@@ -2806,11 +2802,10 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!cc->tag_pool_max_sectors)
cc->tag_pool_max_sectors = 1;
- cc->tag_pool = mempool_create_kmalloc_pool(MIN_IOS,
+ ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
cc->tag_pool_max_sectors * cc->on_disk_tag_size);
- if (!cc->tag_pool) {
+ if (ret) {
ti->error = "Cannot allocate integrity tags mempool";
- ret = -ENOMEM;
goto bad;
}
@@ -2903,7 +2898,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
if (bio_sectors(bio) > cc->tag_pool_max_sectors)
dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
- io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
+ io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
io->integrity_metadata_from_pool = true;
}
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 514fb4aec5d1..fc68c7aaef8e 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -142,7 +142,7 @@ struct dm_integrity_c {
unsigned tag_size;
__s8 log2_tag_size;
sector_t start;
- mempool_t *journal_io_mempool;
+ mempool_t journal_io_mempool;
struct dm_io_client *io;
struct dm_bufio_client *bufio;
struct workqueue_struct *metadata_wq;
@@ -1817,7 +1817,7 @@ static void complete_copy_from_journal(unsigned long error, void *context)
struct journal_completion *comp = io->comp;
struct dm_integrity_c *ic = comp->ic;
remove_range(ic, &io->range);
- mempool_free(io, ic->journal_io_mempool);
+ mempool_free(io, &ic->journal_io_mempool);
if (unlikely(error != 0))
dm_integrity_io_error(ic, "copying from journal", -EIO);
complete_journal_op(comp);
@@ -1886,7 +1886,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
}
next_loop = k - 1;
- io = mempool_alloc(ic->journal_io_mempool, GFP_NOIO);
+ io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
io->comp = &comp;
io->range.logical_sector = sec;
io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
@@ -1918,7 +1918,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
if (j == k) {
remove_range_unlocked(ic, &io->range);
spin_unlock_irq(&ic->endio_wait.lock);
- mempool_free(io, ic->journal_io_mempool);
+ mempool_free(io, &ic->journal_io_mempool);
goto skip_io;
}
for (l = j; l < k; l++) {
@@ -2980,9 +2980,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- ic->journal_io_mempool = mempool_create_slab_pool(JOURNAL_IO_MEMPOOL, journal_io_cache);
- if (!ic->journal_io_mempool) {
- r = -ENOMEM;
+ r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
+ if (r) {
ti->error = "Cannot allocate mempool";
goto bad;
}
@@ -3196,7 +3195,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
destroy_workqueue(ic->writer_wq);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
- mempool_destroy(ic->journal_io_mempool);
+ mempool_exit(&ic->journal_io_mempool);
if (ic->io)
dm_io_client_destroy(ic->io);
if (ic->dev)
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index a8d914d5abbe..53c6ed0eaa1f 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -22,8 +22,8 @@
#define DM_IO_MAX_REGIONS BITS_PER_LONG
struct dm_io_client {
- mempool_t *pool;
- struct bio_set *bios;
+ mempool_t pool;
+ struct bio_set bios;
};
/*
@@ -49,32 +49,33 @@ struct dm_io_client *dm_io_client_create(void)
{
struct dm_io_client *client;
unsigned min_ios = dm_get_reserved_bio_based_ios();
+ int ret;
client = kmalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
- client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
- if (!client->pool)
+ ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
+ if (ret)
goto bad;
- client->bios = bioset_create(min_ios, 0, BIOSET_NEED_BVECS);
- if (!client->bios)
+ ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
+ if (ret)
goto bad;
return client;
bad:
- mempool_destroy(client->pool);
+ mempool_exit(&client->pool);
kfree(client);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL(dm_io_client_create);
void dm_io_client_destroy(struct dm_io_client *client)
{
- mempool_destroy(client->pool);
- bioset_free(client->bios);
+ mempool_exit(&client->pool);
+ bioset_exit(&client->bios);
kfree(client);
}
EXPORT_SYMBOL(dm_io_client_destroy);
@@ -120,7 +121,7 @@ static void complete_io(struct io *io)
invalidate_kernel_vmap_range(io->vma_invalidate_address,
io->vma_invalidate_size);
- mempool_free(io, io->client->pool);
+ mempool_free(io, &io->client->pool);
fn(error_bits, context);
}
@@ -344,7 +345,7 @@ static void do_region(int op, int op_flags, unsigned region,
dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
}
- bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
+ bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio_set_dev(bio, where->bdev);
bio->bi_end_io = endio;
@@ -442,7 +443,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
init_completion(&sio.wait);
- io = mempool_alloc(client->pool, GFP_NOIO);
+ io = mempool_alloc(&client->pool, GFP_NOIO);
io->error_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
io->client = client;
@@ -474,7 +475,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
return -EIO;
}
- io = mempool_alloc(client->pool, GFP_NOIO);
+ io = mempool_alloc(&client->pool, GFP_NOIO);
io->error_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
io->client = client;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index e6e7c686646d..c89a675a2aac 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -47,7 +47,7 @@ struct dm_kcopyd_client {
wait_queue_head_t destroyq;
atomic_t nr_jobs;
- mempool_t *job_pool;
+ mempool_t job_pool;
struct workqueue_struct *kcopyd_wq;
struct work_struct kcopyd_work;
@@ -479,7 +479,7 @@ static int run_complete_job(struct kcopyd_job *job)
*/
if (job->master_job == job) {
mutex_destroy(&job->lock);
- mempool_free(job, kc->job_pool);
+ mempool_free(job, &kc->job_pool);
}
fn(read_err, write_err, context);
@@ -751,7 +751,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
* Allocate an array of jobs consisting of one master job
* followed by SPLIT_COUNT sub jobs.
*/
- job = mempool_alloc(kc->job_pool, GFP_NOIO);
+ job = mempool_alloc(&kc->job_pool, GFP_NOIO);
mutex_init(&job->lock);
/*
@@ -835,7 +835,7 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
{
struct kcopyd_job *job;
- job = mempool_alloc(kc->job_pool, GFP_NOIO);
+ job = mempool_alloc(&kc->job_pool, GFP_NOIO);
memset(job, 0, sizeof(struct kcopyd_job));
job->kc = kc;
@@ -879,7 +879,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
*---------------------------------------------------------------*/
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
- int r = -ENOMEM;
+ int r;
struct dm_kcopyd_client *kc;
kc = kmalloc(sizeof(*kc), GFP_KERNEL);
@@ -892,14 +892,16 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
INIT_LIST_HEAD(&kc->pages_jobs);
kc->throttle = throttle;
- kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
- if (!kc->job_pool)
+ r = mempool_init_slab_pool(&kc->job_pool, MIN_JOBS, _job_cache);
+ if (r)
goto bad_slab;
INIT_WORK(&kc->kcopyd_work, do_work);
kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
- if (!kc->kcopyd_wq)
+ if (!kc->kcopyd_wq) {
+ r = -ENOMEM;
goto bad_workqueue;
+ }
kc->pages = NULL;
kc->nr_reserved_pages = kc->nr_free_pages = 0;
@@ -923,7 +925,7 @@ bad_io_client:
bad_client_pages:
destroy_workqueue(kc->kcopyd_wq);
bad_workqueue:
- mempool_destroy(kc->job_pool);
+ mempool_exit(&kc->job_pool);
bad_slab:
kfree(kc);
@@ -942,7 +944,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
destroy_workqueue(kc->kcopyd_wq);
dm_io_client_destroy(kc->io_client);
client_free_pages(kc);
- mempool_destroy(kc->job_pool);
+ mempool_exit(&kc->job_pool);
kfree(kc);
}
EXPORT_SYMBOL(dm_kcopyd_client_destroy);
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 53b7b06d0aa8..52090bee17c2 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -76,7 +76,7 @@ struct log_c {
*/
uint32_t integrated_flush;
- mempool_t *flush_entry_pool;
+ mempool_t flush_entry_pool;
};
static struct kmem_cache *_flush_entry_cache;
@@ -249,11 +249,10 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
goto out;
}
- lc->flush_entry_pool = mempool_create_slab_pool(FLUSH_ENTRY_POOL_SIZE,
- _flush_entry_cache);
- if (!lc->flush_entry_pool) {
+ r = mempool_init_slab_pool(&lc->flush_entry_pool, FLUSH_ENTRY_POOL_SIZE,
+ _flush_entry_cache);
+ if (r) {
DMERR("Failed to create flush_entry_pool");
- r = -ENOMEM;
goto out;
}
@@ -313,7 +312,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
out:
kfree(devices_rdata);
if (r) {
- mempool_destroy(lc->flush_entry_pool);
+ mempool_exit(&lc->flush_entry_pool);
kfree(lc);
kfree(ctr_str);
} else {
@@ -342,7 +341,7 @@ static void userspace_dtr(struct dm_dirty_log *log)
if (lc->log_dev)
dm_put_device(lc->ti, lc->log_dev);
- mempool_destroy(lc->flush_entry_pool);
+ mempool_exit(&lc->flush_entry_pool);
kfree(lc->usr_argv_str);
kfree(lc);
@@ -570,7 +569,7 @@ static int userspace_flush(struct dm_dirty_log *log)
int mark_list_is_empty;
int clear_list_is_empty;
struct dm_dirty_log_flush_entry *fe, *tmp_fe;
- mempool_t *flush_entry_pool = lc->flush_entry_pool;
+ mempool_t *flush_entry_pool = &lc->flush_entry_pool;
spin_lock_irqsave(&lc->flush_lock, flags);
list_splice_init(&lc->mark_list, &mark_list);
@@ -653,7 +652,7 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
struct dm_dirty_log_flush_entry *fe;
/* Wait for an allocation, but _never_ fail */
- fe = mempool_alloc(lc->flush_entry_pool, GFP_NOIO);
+ fe = mempool_alloc(&lc->flush_entry_pool, GFP_NOIO);
BUG_ON(!fe);
spin_lock_irqsave(&lc->flush_lock, flags);
@@ -687,7 +686,7 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
* to cause the region to be resync'ed when the
* device is activated next time.
*/
- fe = mempool_alloc(lc->flush_entry_pool, GFP_ATOMIC);
+ fe = mempool_alloc(&lc->flush_entry_pool, GFP_ATOMIC);
if (!fe) {
DMERR("Failed to allocate memory to clear region.");
return;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 203a0419d2b0..d94ba6f72ff5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -520,7 +520,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
bdev = pgpath->path.dev->bdev;
q = bdev_get_queue(bdev);
- clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
+ clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
+ BLK_MQ_REQ_NOWAIT);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
if (blk_queue_dying(q)) {
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 85c32b22a420..43149eb49375 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -63,7 +63,7 @@ struct dm_region_hash {
/* hash table */
rwlock_t hash_lock;
- mempool_t *region_pool;
+ mempool_t region_pool;
unsigned mask;
unsigned nr_buckets;
unsigned prime;
@@ -169,6 +169,7 @@ struct dm_region_hash *dm_region_hash_create(
struct dm_region_hash *rh;
unsigned nr_buckets, max_buckets;
size_t i;
+ int ret;
/*
* Calculate a suitable number of buckets for our hash
@@ -220,9 +221,9 @@ struct dm_region_hash *dm_region_hash_create(
INIT_LIST_HEAD(&rh->failed_recovered_regions);
rh->flush_failure = 0;
- rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
- sizeof(struct dm_region));
- if (!rh->region_pool) {
+ ret = mempool_init_kmalloc_pool(&rh->region_pool, MIN_REGIONS,
+ sizeof(struct dm_region));
+ if (ret) {
vfree(rh->buckets);
kfree(rh);
rh = ERR_PTR(-ENOMEM);
@@ -242,14 +243,14 @@ void dm_region_hash_destroy(struct dm_region_hash *rh)
list_for_each_entry_safe(reg, nreg, rh->buckets + h,
hash_list) {
BUG_ON(atomic_read(&reg->pending));
- mempool_free(reg, rh->region_pool);
+ mempool_free(reg, &rh->region_pool);
}
}
if (rh->log)
dm_dirty_log_destroy(rh->log);
- mempool_destroy(rh->region_pool);
+ mempool_exit(&rh->region_pool);
vfree(rh->buckets);
kfree(rh);
}
@@ -287,7 +288,7 @@ static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
{
struct dm_region *reg, *nreg;
- nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
+ nreg = mempool_alloc(&rh->region_pool, GFP_ATOMIC);
if (unlikely(!nreg))
nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
@@ -303,7 +304,7 @@ static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
reg = __rh_lookup(rh, region);
if (reg)
/* We lost the race. */
- mempool_free(nreg, rh->region_pool);
+ mempool_free(nreg, &rh->region_pool);
else {
__rh_insert(rh, nreg);
if (nreg->state == DM_RH_CLEAN) {
@@ -481,17 +482,17 @@ void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
list_for_each_entry_safe(reg, next, &recovered, list) {
rh->log->type->clear_region(rh->log, reg->key);
complete_resync_work(reg, 1);
- mempool_free(reg, rh->region_pool);
+ mempool_free(reg, &rh->region_pool);
}
list_for_each_entry_safe(reg, next, &failed_recovered, list) {
complete_resync_work(reg, errors_handled ? 0 : 1);
- mempool_free(reg, rh->region_pool);
+ mempool_free(reg, &rh->region_pool);
}
list_for_each_entry_safe(reg, next, &clean, list) {
rh->log->type->clear_region(rh->log, reg->key);
- mempool_free(reg, rh->region_pool);
+ mempool_free(reg, &rh->region_pool);
}
rh->log->type->flush(rh->log);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index bf0b840645cc..6e547b8dd298 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -406,7 +406,7 @@ static blk_status_t dm_dispatch_clone_request(struct request *clone, struct requ
if (blk_queue_io_stat(clone->q))
clone->rq_flags |= RQF_IO_STAT;
- clone->start_time = jiffies;
+ clone->start_time_ns = ktime_get_ns();
r = blk_insert_cloned_request(clone->q, clone);
if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
/* must complete clone in terms of original request */
@@ -433,7 +433,7 @@ static int setup_clone(struct request *clone, struct request *rq,
{
int r;
- r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
+ r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
dm_rq_bio_constructor, tio);
if (r)
return r;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 216035be5661..b11ddc55f297 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -87,7 +87,7 @@ struct dm_snapshot {
*/
struct list_head out_of_order_list;
- mempool_t *pending_pool;
+ mempool_t pending_pool;
struct dm_exception_table pending;
struct dm_exception_table complete;
@@ -682,7 +682,7 @@ static void free_completed_exception(struct dm_exception *e)
static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
{
- struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
+ struct dm_snap_pending_exception *pe = mempool_alloc(&s->pending_pool,
GFP_NOIO);
atomic_inc(&s->pending_exceptions_count);
@@ -695,7 +695,7 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
{
struct dm_snapshot *s = pe->snap;
- mempool_free(pe, s->pending_pool);
+ mempool_free(pe, &s->pending_pool);
smp_mb__before_atomic();
atomic_dec(&s->pending_exceptions_count);
}
@@ -1196,10 +1196,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_kcopyd;
}
- s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
- if (!s->pending_pool) {
+ r = mempool_init_slab_pool(&s->pending_pool, MIN_IOS, pending_cache);
+ if (r) {
ti->error = "Could not allocate mempool for pending exceptions";
- r = -ENOMEM;
goto bad_pending_pool;
}
@@ -1259,7 +1258,7 @@ bad_read_metadata:
unregister_snapshot(s);
bad_load_and_register:
- mempool_destroy(s->pending_pool);
+ mempool_exit(&s->pending_pool);
bad_pending_pool:
dm_kcopyd_client_destroy(s->kcopyd_client);
@@ -1355,7 +1354,7 @@ static void snapshot_dtr(struct dm_target *ti)
while (atomic_read(&s->pending_exceptions_count))
msleep(1);
/*
- * Ensure instructions in mempool_destroy aren't reordered
+ * Ensure instructions in mempool_exit aren't reordered
* before atomic_read.
*/
smp_mb();
@@ -1367,7 +1366,7 @@ static void snapshot_dtr(struct dm_target *ti)
__free_exceptions(s);
- mempool_destroy(s->pending_pool);
+ mempool_exit(&s->pending_pool);
dm_exception_store_destroy(s->store);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index b11107497d2e..6c923824ec91 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -260,7 +260,7 @@ struct pool {
struct dm_deferred_set *all_io_ds;
struct dm_thin_new_mapping *next_mapping;
- mempool_t *mapping_pool;
+ mempool_t mapping_pool;
process_bio_fn process_bio;
process_bio_fn process_discard;
@@ -917,7 +917,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
cell_error(m->tc->pool, m->cell);
list_del(&m->list);
- mempool_free(m, m->tc->pool->mapping_pool);
+ mempool_free(m, &m->tc->pool->mapping_pool);
}
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
@@ -961,7 +961,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
out:
list_del(&m->list);
- mempool_free(m, pool->mapping_pool);
+ mempool_free(m, &pool->mapping_pool);
}
/*----------------------------------------------------------------*/
@@ -971,7 +971,7 @@ static void free_discard_mapping(struct dm_thin_new_mapping *m)
struct thin_c *tc = m->tc;
if (m->cell)
cell_defer_no_holder(tc, m->cell);
- mempool_free(m, tc->pool->mapping_pool);
+ mempool_free(m, &tc->pool->mapping_pool);
}
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
@@ -999,7 +999,7 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
bio_endio(m->bio);
cell_defer_no_holder(tc, m->cell);
- mempool_free(m, tc->pool->mapping_pool);
+ mempool_free(m, &tc->pool->mapping_pool);
}
/*----------------------------------------------------------------*/
@@ -1092,7 +1092,7 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
metadata_operation_failed(pool, "dm_thin_remove_range", r);
bio_io_error(m->bio);
cell_defer_no_holder(tc, m->cell);
- mempool_free(m, pool->mapping_pool);
+ mempool_free(m, &pool->mapping_pool);
return;
}
@@ -1105,7 +1105,7 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
bio_io_error(m->bio);
cell_defer_no_holder(tc, m->cell);
- mempool_free(m, pool->mapping_pool);
+ mempool_free(m, &pool->mapping_pool);
return;
}
@@ -1150,7 +1150,7 @@ static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
bio_endio(m->bio);
cell_defer_no_holder(tc, m->cell);
- mempool_free(m, pool->mapping_pool);
+ mempool_free(m, &pool->mapping_pool);
}
static void process_prepared(struct pool *pool, struct list_head *head,
@@ -1196,7 +1196,7 @@ static int ensure_next_mapping(struct pool *pool)
if (pool->next_mapping)
return 0;
- pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
+ pool->next_mapping = mempool_alloc(&pool->mapping_pool, GFP_ATOMIC);
return pool->next_mapping ? 0 : -ENOMEM;
}
@@ -2835,8 +2835,8 @@ static void __pool_destroy(struct pool *pool)
destroy_workqueue(pool->wq);
if (pool->next_mapping)
- mempool_free(pool->next_mapping, pool->mapping_pool);
- mempool_destroy(pool->mapping_pool);
+ mempool_free(pool->next_mapping, &pool->mapping_pool);
+ mempool_exit(&pool->mapping_pool);
dm_deferred_set_destroy(pool->shared_read_ds);
dm_deferred_set_destroy(pool->all_io_ds);
kfree(pool);
@@ -2931,11 +2931,11 @@ static struct pool *pool_create(struct mapped_device *pool_md,
}
pool->next_mapping = NULL;
- pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
- _new_mapping_cache);
- if (!pool->mapping_pool) {
+ r = mempool_init_slab_pool(&pool->mapping_pool, MAPPING_POOL_SIZE,
+ _new_mapping_cache);
+ if (r) {
*error = "Error creating pool's mapping mempool";
- err_p = ERR_PTR(-ENOMEM);
+ err_p = ERR_PTR(r);
goto bad_mapping_pool;
}
@@ -2955,7 +2955,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
return pool;
bad_sort_array:
- mempool_destroy(pool->mapping_pool);
+ mempool_exit(&pool->mapping_pool);
bad_mapping_pool:
dm_deferred_set_destroy(pool->all_io_ds);
bad_all_io_ds:
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index e13f90832b6b..86405869f1af 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -309,13 +309,13 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
unsigned n;
if (!fio->rs)
- fio->rs = mempool_alloc(v->fec->rs_pool, GFP_NOIO);
+ fio->rs = mempool_alloc(&v->fec->rs_pool, GFP_NOIO);
fec_for_each_prealloc_buffer(n) {
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOWAIT);
+ fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOWAIT);
if (unlikely(!fio->bufs[n])) {
DMERR("failed to allocate FEC buffer");
return -ENOMEM;
@@ -327,7 +327,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOWAIT);
+ fio->bufs[n] = mempool_alloc(&v->fec->extra_pool, GFP_NOWAIT);
/* we can manage with even one buffer if necessary */
if (unlikely(!fio->bufs[n]))
break;
@@ -335,7 +335,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
fio->nbufs = n;
if (!fio->output)
- fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO);
+ fio->output = mempool_alloc(&v->fec->output_pool, GFP_NOIO);
return 0;
}
@@ -493,15 +493,15 @@ void verity_fec_finish_io(struct dm_verity_io *io)
if (!verity_fec_is_enabled(io->v))
return;
- mempool_free(fio->rs, f->rs_pool);
+ mempool_free(fio->rs, &f->rs_pool);
fec_for_each_prealloc_buffer(n)
- mempool_free(fio->bufs[n], f->prealloc_pool);
+ mempool_free(fio->bufs[n], &f->prealloc_pool);
fec_for_each_extra_buffer(fio, n)
- mempool_free(fio->bufs[n], f->extra_pool);
+ mempool_free(fio->bufs[n], &f->extra_pool);
- mempool_free(fio->output, f->output_pool);
+ mempool_free(fio->output, &f->output_pool);
}
/*
@@ -549,9 +549,9 @@ void verity_fec_dtr(struct dm_verity *v)
if (!verity_fec_is_enabled(v))
goto out;
- mempool_destroy(f->rs_pool);
- mempool_destroy(f->prealloc_pool);
- mempool_destroy(f->extra_pool);
+ mempool_exit(&f->rs_pool);
+ mempool_exit(&f->prealloc_pool);
+ mempool_exit(&f->extra_pool);
kmem_cache_destroy(f->cache);
if (f->data_bufio)
@@ -675,6 +675,7 @@ int verity_fec_ctr(struct dm_verity *v)
struct dm_verity_fec *f = v->fec;
struct dm_target *ti = v->ti;
u64 hash_blocks;
+ int ret;
if (!verity_fec_is_enabled(v)) {
verity_fec_dtr(v);
@@ -770,11 +771,11 @@ int verity_fec_ctr(struct dm_verity *v)
}
/* Preallocate an rs_control structure for each worker thread */
- f->rs_pool = mempool_create(num_online_cpus(), fec_rs_alloc,
- fec_rs_free, (void *) v);
- if (!f->rs_pool) {
+ ret = mempool_init(&f->rs_pool, num_online_cpus(), fec_rs_alloc,
+ fec_rs_free, (void *) v);
+ if (ret) {
ti->error = "Cannot allocate RS pool";
- return -ENOMEM;
+ return ret;
}
f->cache = kmem_cache_create("dm_verity_fec_buffers",
@@ -786,26 +787,26 @@ int verity_fec_ctr(struct dm_verity *v)
}
/* Preallocate DM_VERITY_FEC_BUF_PREALLOC buffers for each thread */
- f->prealloc_pool = mempool_create_slab_pool(num_online_cpus() *
- DM_VERITY_FEC_BUF_PREALLOC,
- f->cache);
- if (!f->prealloc_pool) {
+ ret = mempool_init_slab_pool(&f->prealloc_pool, num_online_cpus() *
+ DM_VERITY_FEC_BUF_PREALLOC,
+ f->cache);
+ if (ret) {
ti->error = "Cannot allocate FEC buffer prealloc pool";
- return -ENOMEM;
+ return ret;
}
- f->extra_pool = mempool_create_slab_pool(0, f->cache);
- if (!f->extra_pool) {
+ ret = mempool_init_slab_pool(&f->extra_pool, 0, f->cache);
+ if (ret) {
ti->error = "Cannot allocate FEC buffer extra pool";
- return -ENOMEM;
+ return ret;
}
/* Preallocate an output buffer for each thread */
- f->output_pool = mempool_create_kmalloc_pool(num_online_cpus(),
- 1 << v->data_dev_block_bits);
- if (!f->output_pool) {
+ ret = mempool_init_kmalloc_pool(&f->output_pool, num_online_cpus(),
+ 1 << v->data_dev_block_bits);
+ if (ret) {
ti->error = "Cannot allocate FEC output pool";
- return -ENOMEM;
+ return ret;
}
/* Reserve space for our per-bio data */
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index bb31ce87a933..6ad803b2b36c 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -46,10 +46,10 @@ struct dm_verity_fec {
sector_t hash_blocks; /* blocks covered after v->hash_start */
unsigned char roots; /* number of parity bytes, M-N of RS(M, N) */
unsigned char rsn; /* N of RS(M, N) */
- mempool_t *rs_pool; /* mempool for fio->rs */
- mempool_t *prealloc_pool; /* mempool for preallocated buffers */
- mempool_t *extra_pool; /* mempool for extra buffers */
- mempool_t *output_pool; /* mempool for output */
+ mempool_t rs_pool; /* mempool for fio->rs */
+ mempool_t prealloc_pool; /* mempool for preallocated buffers */
+ mempool_t extra_pool; /* mempool for extra buffers */
+ mempool_t output_pool; /* mempool for output */
struct kmem_cache *cache; /* cache for buffers */
};
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index e73b0776683c..30602d15ad9a 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -57,7 +57,7 @@ struct dmz_target {
struct workqueue_struct *chunk_wq;
/* For cloned BIOs to zones */
- struct bio_set *bio_set;
+ struct bio_set bio_set;
/* For flush */
spinlock_t flush_lock;
@@ -121,7 +121,7 @@ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
}
/* Partial BIO: we need to clone the BIO */
- clone = bio_clone_fast(bio, GFP_NOIO, dmz->bio_set);
+ clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
if (!clone)
return -ENOMEM;
@@ -779,10 +779,9 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
/* Zone BIO */
- dmz->bio_set = bioset_create(DMZ_MIN_BIOS, 0, 0);
- if (!dmz->bio_set) {
+ ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
+ if (ret) {
ti->error = "Create BIO set failed";
- ret = -ENOMEM;
goto err_meta;
}
@@ -828,7 +827,7 @@ err_cwq:
destroy_workqueue(dmz->chunk_wq);
err_bio:
mutex_destroy(&dmz->chunk_lock);
- bioset_free(dmz->bio_set);
+ bioset_exit(&dmz->bio_set);
err_meta:
dmz_dtr_metadata(dmz->metadata);
err_dev:
@@ -858,7 +857,7 @@ static void dmz_dtr(struct dm_target *ti)
dmz_dtr_metadata(dmz->metadata);
- bioset_free(dmz->bio_set);
+ bioset_exit(&dmz->bio_set);
dmz_put_zoned_device(ti);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0a7b0107ca78..98dff36b89a3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -148,8 +148,8 @@ static int dm_numa_node = DM_NUMA_NODE;
* For mempools pre-allocation at the table loading time.
*/
struct dm_md_mempools {
- struct bio_set *bs;
- struct bio_set *io_bs;
+ struct bio_set bs;
+ struct bio_set io_bs;
};
struct table_device {
@@ -537,7 +537,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
struct dm_target_io *tio;
struct bio *clone;
- clone = bio_alloc_bioset(GFP_NOIO, 0, md->io_bs);
+ clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
if (!clone)
return NULL;
@@ -572,7 +572,7 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *t
/* the dm_target_io embedded in ci->io is available */
tio = &ci->io->tio;
} else {
- struct bio *clone = bio_alloc_bioset(gfp_mask, 0, ci->io->md->bs);
+ struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
if (!clone)
return NULL;
@@ -1583,7 +1583,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* won't be affected by this reassignment.
*/
struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
- md->queue->bio_split);
+ &md->queue->bio_split);
ci.io->orig_bio = b;
bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
bio_chain(b, bio);
@@ -1785,10 +1785,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
destroy_workqueue(md->wq);
if (md->kworker_task)
kthread_stop(md->kworker_task);
- if (md->bs)
- bioset_free(md->bs);
- if (md->io_bs)
- bioset_free(md->io_bs);
+ bioset_exit(&md->bs);
+ bioset_exit(&md->io_bs);
if (md->dax_dev) {
kill_dax(md->dax_dev);
@@ -1965,16 +1963,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
* If so, reload bioset because front_pad may have changed
* because a different table was loaded.
*/
- if (md->bs) {
- bioset_free(md->bs);
- md->bs = NULL;
- }
- if (md->io_bs) {
- bioset_free(md->io_bs);
- md->io_bs = NULL;
- }
+ bioset_exit(&md->bs);
+ bioset_exit(&md->io_bs);
- } else if (md->bs) {
+ } else if (bioset_initialized(&md->bs)) {
/*
* There's no need to reload with request-based dm
* because the size of front_pad doesn't change.
@@ -1986,12 +1978,14 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
goto out;
}
- BUG_ON(!p || md->bs || md->io_bs);
+ BUG_ON(!p ||
+ bioset_initialized(&md->bs) ||
+ bioset_initialized(&md->io_bs));
md->bs = p->bs;
- p->bs = NULL;
+ memset(&p->bs, 0, sizeof(p->bs));
md->io_bs = p->io_bs;
- p->io_bs = NULL;
+ memset(&p->io_bs, 0, sizeof(p->io_bs));
out:
/* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
@@ -2905,6 +2899,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
unsigned int pool_size = 0;
unsigned int front_pad, io_front_pad;
+ int ret;
if (!pools)
return NULL;
@@ -2916,10 +2911,10 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
- pools->io_bs = bioset_create(pool_size, io_front_pad, 0);
- if (!pools->io_bs)
+ ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
+ if (ret)
goto out;
- if (integrity && bioset_integrity_create(pools->io_bs, pool_size))
+ if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
goto out;
break;
case DM_TYPE_REQUEST_BASED:
@@ -2932,11 +2927,11 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
BUG();
}
- pools->bs = bioset_create(pool_size, front_pad, 0);
- if (!pools->bs)
+ ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
+ if (ret)
goto out;
- if (integrity && bioset_integrity_create(pools->bs, pool_size))
+ if (integrity && bioset_integrity_create(&pools->bs, pool_size))
goto out;
return pools;
@@ -2952,10 +2947,8 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (!pools)
return;
- if (pools->bs)
- bioset_free(pools->bs);
- if (pools->io_bs)
- bioset_free(pools->io_bs);
+ bioset_exit(&pools->bs);
+ bioset_exit(&pools->io_bs);
kfree(pools);
}
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index 38264b38420f..c2fdf899de14 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -214,7 +214,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
}
}
if (failit) {
- struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
+ struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
bio_set_dev(b, conf->rdev->bdev);
b->bi_private = bio;
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 4964323d936b..d45c697c0ebe 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -269,7 +269,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely(bio_end_sector(bio) > end_sector)) {
/* This bio crosses a device boundary, so we have to split it */
struct bio *split = bio_split(bio, end_sector - bio_sector,
- GFP_NOIO, mddev->bio_set);
+ GFP_NOIO, &mddev->bio_set);
bio_chain(split, bio);
generic_make_request(bio);
bio = split;
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 0a7e99d62c69..f71fcdb9b39c 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -80,7 +80,7 @@ static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status)
bio->bi_status = status;
bio_endio(bio);
- mempool_free(mp_bh, conf->pool);
+ mempool_free(mp_bh, &conf->pool);
}
static void multipath_end_request(struct bio *bio)
@@ -117,7 +117,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
return true;
}
- mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
+ mp_bh = mempool_alloc(&conf->pool, GFP_NOIO);
mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
@@ -125,7 +125,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
bio_io_error(bio);
- mempool_free(mp_bh, conf->pool);
+ mempool_free(mp_bh, &conf->pool);
return true;
}
multipath = conf->multipaths + mp_bh->path;
@@ -378,6 +378,7 @@ static int multipath_run (struct mddev *mddev)
struct multipath_info *disk;
struct md_rdev *rdev;
int working_disks;
+ int ret;
if (md_check_no_bitmap(mddev))
return -EINVAL;
@@ -431,9 +432,9 @@ static int multipath_run (struct mddev *mddev)
}
mddev->degraded = conf->raid_disks - working_disks;
- conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
- sizeof(struct multipath_bh));
- if (conf->pool == NULL)
+ ret = mempool_init_kmalloc_pool(&conf->pool, NR_RESERVED_BUFS,
+ sizeof(struct multipath_bh));
+ if (ret)
goto out_free_conf;
mddev->thread = md_register_thread(multipathd, mddev,
@@ -455,7 +456,7 @@ static int multipath_run (struct mddev *mddev)
return 0;
out_free_conf:
- mempool_destroy(conf->pool);
+ mempool_exit(&conf->pool);
kfree(conf->multipaths);
kfree(conf);
mddev->private = NULL;
@@ -467,7 +468,7 @@ static void multipath_free(struct mddev *mddev, void *priv)
{
struct mpconf *conf = priv;
- mempool_destroy(conf->pool);
+ mempool_exit(&conf->pool);
kfree(conf->multipaths);
kfree(conf);
}
diff --git a/drivers/md/md-multipath.h b/drivers/md/md-multipath.h
index 0adb941f485a..b3099e5fc4d7 100644
--- a/drivers/md/md-multipath.h
+++ b/drivers/md/md-multipath.h
@@ -13,7 +13,7 @@ struct mpconf {
spinlock_t device_lock;
struct list_head retry_list;
- mempool_t *pool;
+ mempool_t pool;
};
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c208c01f63a5..fc692b7128bb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -193,10 +193,10 @@ struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
{
struct bio *b;
- if (!mddev || !mddev->bio_set)
+ if (!mddev || !bioset_initialized(&mddev->bio_set))
return bio_alloc(gfp_mask, nr_iovecs);
- b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
+ b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
if (!b)
return NULL;
return b;
@@ -205,10 +205,10 @@ EXPORT_SYMBOL_GPL(bio_alloc_mddev);
static struct bio *md_bio_alloc_sync(struct mddev *mddev)
{
- if (!mddev || !mddev->sync_set)
+ if (!mddev || !bioset_initialized(&mddev->sync_set))
return bio_alloc(GFP_NOIO, 1);
- return bio_alloc_bioset(GFP_NOIO, 1, mddev->sync_set);
+ return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
}
/*
@@ -510,7 +510,10 @@ static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(struct mddev *mddev)
{
- struct bio_set *bs = NULL, *sync_bs = NULL;
+ struct bio_set bs, sync_bs;
+
+ memset(&bs, 0, sizeof(bs));
+ memset(&sync_bs, 0, sizeof(sync_bs));
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
@@ -521,8 +524,8 @@ static void mddev_put(struct mddev *mddev)
list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
sync_bs = mddev->sync_set;
- mddev->bio_set = NULL;
- mddev->sync_set = NULL;
+ memset(&mddev->bio_set, 0, sizeof(mddev->bio_set));
+ memset(&mddev->sync_set, 0, sizeof(mddev->sync_set));
if (mddev->gendisk) {
/* We did a probe so need to clean up. Call
* queue_work inside the spinlock so that
@@ -535,10 +538,8 @@ static void mddev_put(struct mddev *mddev)
kfree(mddev);
}
spin_unlock(&all_mddevs_lock);
- if (bs)
- bioset_free(bs);
- if (sync_bs)
- bioset_free(sync_bs);
+ bioset_exit(&bs);
+ bioset_exit(&sync_bs);
}
static void md_safemode_timeout(struct timer_list *t);
@@ -2123,7 +2124,7 @@ int md_integrity_register(struct mddev *mddev)
bdev_get_integrity(reference->bdev));
pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
- if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
+ if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) {
pr_err("md: failed to create integrity pool for %s\n",
mdname(mddev));
return -EINVAL;
@@ -5497,17 +5498,15 @@ int md_run(struct mddev *mddev)
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
- if (mddev->bio_set == NULL) {
- mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
- if (!mddev->bio_set)
- return -ENOMEM;
+ if (!bioset_initialized(&mddev->bio_set)) {
+ err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ if (err)
+ return err;
}
- if (mddev->sync_set == NULL) {
- mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
- if (!mddev->sync_set) {
- err = -ENOMEM;
+ if (!bioset_initialized(&mddev->sync_set)) {
+ err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ if (err)
goto abort;
- }
}
spin_lock(&pers_lock);
@@ -5668,14 +5667,8 @@ int md_run(struct mddev *mddev)
return 0;
abort:
- if (mddev->bio_set) {
- bioset_free(mddev->bio_set);
- mddev->bio_set = NULL;
- }
- if (mddev->sync_set) {
- bioset_free(mddev->sync_set);
- mddev->sync_set = NULL;
- }
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
return err;
}
@@ -5888,14 +5881,8 @@ void md_stop(struct mddev *mddev)
* This is called from dm-raid
*/
__md_stop(mddev);
- if (mddev->bio_set) {
- bioset_free(mddev->bio_set);
- mddev->bio_set = NULL;
- }
- if (mddev->sync_set) {
- bioset_free(mddev->sync_set);
- mddev->sync_set = NULL;
- }
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
}
EXPORT_SYMBOL_GPL(md_stop);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index fbc925cce810..3507cab22cb6 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -452,8 +452,8 @@ struct mddev {
struct attribute_group *to_remove;
- struct bio_set *bio_set;
- struct bio_set *sync_set; /* for sync operations like
+ struct bio_set bio_set;
+ struct bio_set sync_set; /* for sync operations like
* metadata and bitmap writes
*/
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 584c10347267..65ae47a02218 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -479,7 +479,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
if (bio_end_sector(bio) > zone->zone_end) {
struct bio *split = bio_split(bio,
zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
- mddev->bio_set);
+ &mddev->bio_set);
bio_chain(split, bio);
generic_make_request(bio);
bio = split;
@@ -582,7 +582,8 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
sector = bio_sector;
if (sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, sectors, GFP_NOIO, mddev->bio_set);
+ struct bio *split = bio_split(bio, sectors, GFP_NOIO,
+ &mddev->bio_set);
bio_chain(split, bio);
generic_make_request(bio);
bio = split;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e9e3308cb0a7..bad28520719b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -221,7 +221,7 @@ static void free_r1bio(struct r1bio *r1_bio)
struct r1conf *conf = r1_bio->mddev->private;
put_all_bios(conf, r1_bio);
- mempool_free(r1_bio, conf->r1bio_pool);
+ mempool_free(r1_bio, &conf->r1bio_pool);
}
static void put_buf(struct r1bio *r1_bio)
@@ -236,7 +236,7 @@ static void put_buf(struct r1bio *r1_bio)
rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
}
- mempool_free(r1_bio, conf->r1buf_pool);
+ mempool_free(r1_bio, &conf->r1buf_pool);
lower_barrier(conf, sect);
}
@@ -1178,7 +1178,7 @@ alloc_r1bio(struct mddev *mddev, struct bio *bio)
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+ r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
/* Ensure no bio records IO_BLOCKED */
memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
init_r1bio(r1_bio, mddev, bio);
@@ -1268,7 +1268,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
- gfp, conf->bio_split);
+ gfp, &conf->bio_split);
bio_chain(split, bio);
generic_make_request(bio);
bio = split;
@@ -1278,7 +1278,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
r1_bio->read_disk = rdisk;
- read_bio = bio_clone_fast(bio, gfp, mddev->bio_set);
+ read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
r1_bio->bios[rdisk] = read_bio;
@@ -1439,7 +1439,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
- GFP_NOIO, conf->bio_split);
+ GFP_NOIO, &conf->bio_split);
bio_chain(split, bio);
generic_make_request(bio);
bio = split;
@@ -1479,9 +1479,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (r1_bio->behind_master_bio)
mbio = bio_clone_fast(r1_bio->behind_master_bio,
- GFP_NOIO, mddev->bio_set);
+ GFP_NOIO, &mddev->bio_set);
else
- mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
+ mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
if (r1_bio->behind_master_bio) {
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
@@ -1657,8 +1657,7 @@ static void close_sync(struct r1conf *conf)
_allow_barrier(conf, idx);
}
- mempool_destroy(conf->r1buf_pool);
- conf->r1buf_pool = NULL;
+ mempool_exit(&conf->r1buf_pool);
}
static int raid1_spare_active(struct mddev *mddev)
@@ -2348,10 +2347,10 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
wbio = bio_clone_fast(r1_bio->behind_master_bio,
GFP_NOIO,
- mddev->bio_set);
+ &mddev->bio_set);
} else {
wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
- mddev->bio_set);
+ &mddev->bio_set);
}
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
@@ -2564,17 +2563,15 @@ static int init_resync(struct r1conf *conf)
int buffs;
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
- BUG_ON(conf->r1buf_pool);
- conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
- conf->poolinfo);
- if (!conf->r1buf_pool)
- return -ENOMEM;
- return 0;
+ BUG_ON(mempool_initialized(&conf->r1buf_pool));
+
+ return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
+ r1buf_pool_free, conf->poolinfo);
}
static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
{
- struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
+ struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
struct resync_pages *rps;
struct bio *bio;
int i;
@@ -2617,7 +2614,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
int idx = sector_to_idx(sector_nr);
int page_idx = 0;
- if (!conf->r1buf_pool)
+ if (!mempool_initialized(&conf->r1buf_pool))
if (init_resync(conf))
return 0;
@@ -2953,14 +2950,13 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!conf->poolinfo)
goto abort;
conf->poolinfo->raid_disks = mddev->raid_disks * 2;
- conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
- r1bio_pool_free,
- conf->poolinfo);
- if (!conf->r1bio_pool)
+ err = mempool_init(&conf->r1bio_pool, NR_RAID1_BIOS, r1bio_pool_alloc,
+ r1bio_pool_free, conf->poolinfo);
+ if (err)
goto abort;
- conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
- if (!conf->bio_split)
+ err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
+ if (err)
goto abort;
conf->poolinfo->mddev = mddev;
@@ -3033,7 +3029,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
abort:
if (conf) {
- mempool_destroy(conf->r1bio_pool);
+ mempool_exit(&conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf->poolinfo);
@@ -3041,8 +3037,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
kfree(conf->nr_waiting);
kfree(conf->nr_queued);
kfree(conf->barrier);
- if (conf->bio_split)
- bioset_free(conf->bio_split);
+ bioset_exit(&conf->bio_split);
kfree(conf);
}
return ERR_PTR(err);
@@ -3144,7 +3139,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
{
struct r1conf *conf = priv;
- mempool_destroy(conf->r1bio_pool);
+ mempool_exit(&conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf->poolinfo);
@@ -3152,8 +3147,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
kfree(conf->nr_waiting);
kfree(conf->nr_queued);
kfree(conf->barrier);
- if (conf->bio_split)
- bioset_free(conf->bio_split);
+ bioset_exit(&conf->bio_split);
kfree(conf);
}
@@ -3199,13 +3193,17 @@ static int raid1_reshape(struct mddev *mddev)
* At the same time, we "pack" the devices so that all the missing
* devices have the higher raid_disk numbers.
*/
- mempool_t *newpool, *oldpool;
+ mempool_t newpool, oldpool;
struct pool_info *newpoolinfo;
struct raid1_info *newmirrors;
struct r1conf *conf = mddev->private;
int cnt, raid_disks;
unsigned long flags;
int d, d2;
+ int ret;
+
+ memset(&newpool, 0, sizeof(newpool));
+ memset(&oldpool, 0, sizeof(oldpool));
/* Cannot change chunk_size, layout, or level */
if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
@@ -3237,17 +3235,17 @@ static int raid1_reshape(struct mddev *mddev)
newpoolinfo->mddev = mddev;
newpoolinfo->raid_disks = raid_disks * 2;
- newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
- r1bio_pool_free, newpoolinfo);
- if (!newpool) {
+ ret = mempool_init(&newpool, NR_RAID1_BIOS, r1bio_pool_alloc,
+ r1bio_pool_free, newpoolinfo);
+ if (ret) {
kfree(newpoolinfo);
- return -ENOMEM;
+ return ret;
}
newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
GFP_KERNEL);
if (!newmirrors) {
kfree(newpoolinfo);
- mempool_destroy(newpool);
+ mempool_exit(&newpool);
return -ENOMEM;
}
@@ -3287,7 +3285,7 @@ static int raid1_reshape(struct mddev *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
- mempool_destroy(oldpool);
+ mempool_exit(&oldpool);
return 0;
}
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index eb84bc68e2fd..e7ccad898736 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -118,10 +118,10 @@ struct r1conf {
* mempools - it changes when the array grows or shrinks
*/
struct pool_info *poolinfo;
- mempool_t *r1bio_pool;
- mempool_t *r1buf_pool;
+ mempool_t r1bio_pool;
+ mempool_t r1buf_pool;
- struct bio_set *bio_split;
+ struct bio_set bio_split;
/* temporary buffer to synchronous IO when attempting to repair
* a read error.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3c60774c8430..37d4b236b81b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -291,14 +291,14 @@ static void free_r10bio(struct r10bio *r10_bio)
struct r10conf *conf = r10_bio->mddev->private;
put_all_bios(conf, r10_bio);
- mempool_free(r10_bio, conf->r10bio_pool);
+ mempool_free(r10_bio, &conf->r10bio_pool);
}
static void put_buf(struct r10bio *r10_bio)
{
struct r10conf *conf = r10_bio->mddev->private;
- mempool_free(r10_bio, conf->r10buf_pool);
+ mempool_free(r10_bio, &conf->r10buf_pool);
lower_barrier(conf);
}
@@ -1204,7 +1204,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
(unsigned long long)r10_bio->sector);
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
- gfp, conf->bio_split);
+ gfp, &conf->bio_split);
bio_chain(split, bio);
generic_make_request(bio);
bio = split;
@@ -1213,7 +1213,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
}
slot = r10_bio->read_slot;
- read_bio = bio_clone_fast(bio, gfp, mddev->bio_set);
+ read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
r10_bio->devs[slot].bio = read_bio;
r10_bio->devs[slot].rdev = rdev;
@@ -1261,7 +1261,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
} else
rdev = conf->mirrors[devnum].rdev;
- mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
+ mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
if (replacement)
r10_bio->devs[n_copy].repl_bio = mbio;
else
@@ -1509,7 +1509,7 @@ retry_write:
if (r10_bio->sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, r10_bio->sectors,
- GFP_NOIO, conf->bio_split);
+ GFP_NOIO, &conf->bio_split);
bio_chain(split, bio);
generic_make_request(bio);
bio = split;
@@ -1533,7 +1533,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
- r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+ r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio;
r10_bio->sectors = sectors;
@@ -1732,8 +1732,7 @@ static void close_sync(struct r10conf *conf)
wait_barrier(conf);
allow_barrier(conf);
- mempool_destroy(conf->r10buf_pool);
- conf->r10buf_pool = NULL;
+ mempool_exit(&conf->r10buf_pool);
}
static int raid10_spare_active(struct mddev *mddev)
@@ -2583,7 +2582,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
if (sectors > sect_to_write)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
- wbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
+ wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
wbio->bi_iter.bi_sector = wsector +
@@ -2816,25 +2815,25 @@ static void raid10d(struct md_thread *thread)
static int init_resync(struct r10conf *conf)
{
- int buffs;
- int i;
+ int ret, buffs, i;
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
- BUG_ON(conf->r10buf_pool);
+ BUG_ON(mempool_initialized(&conf->r10buf_pool));
conf->have_replacement = 0;
for (i = 0; i < conf->geo.raid_disks; i++)
if (conf->mirrors[i].replacement)
conf->have_replacement = 1;
- conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
- if (!conf->r10buf_pool)
- return -ENOMEM;
+ ret = mempool_init(&conf->r10buf_pool, buffs,
+ r10buf_pool_alloc, r10buf_pool_free, conf);
+ if (ret)
+ return ret;
conf->next_resync = 0;
return 0;
}
static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
{
- struct r10bio *r10bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
struct rsync_pages *rp;
struct bio *bio;
int nalloc;
@@ -2945,7 +2944,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t chunk_mask = conf->geo.chunk_mask;
int page_idx = 0;
- if (!conf->r10buf_pool)
+ if (!mempool_initialized(&conf->r10buf_pool))
if (init_resync(conf))
return 0;
@@ -3699,13 +3698,13 @@ static struct r10conf *setup_conf(struct mddev *mddev)
conf->geo = geo;
conf->copies = copies;
- conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
- r10bio_pool_free, conf);
- if (!conf->r10bio_pool)
+ err = mempool_init(&conf->r10bio_pool, NR_RAID10_BIOS, r10bio_pool_alloc,
+ r10bio_pool_free, conf);
+ if (err)
goto out;
- conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
- if (!conf->bio_split)
+ err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
+ if (err)
goto out;
calc_sectors(conf, mddev->dev_sectors);
@@ -3733,6 +3732,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
init_waitqueue_head(&conf->wait_barrier);
atomic_set(&conf->nr_pending, 0);
+ err = -ENOMEM;
conf->thread = md_register_thread(raid10d, mddev, "raid10");
if (!conf->thread)
goto out;
@@ -3742,11 +3742,10 @@ static struct r10conf *setup_conf(struct mddev *mddev)
out:
if (conf) {
- mempool_destroy(conf->r10bio_pool);
+ mempool_exit(&conf->r10bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
- if (conf->bio_split)
- bioset_free(conf->bio_split);
+ bioset_exit(&conf->bio_split);
kfree(conf);
}
return ERR_PTR(err);
@@ -3953,7 +3952,7 @@ static int raid10_run(struct mddev *mddev)
out_free_conf:
md_unregister_thread(&mddev->thread);
- mempool_destroy(conf->r10bio_pool);
+ mempool_exit(&conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf);
@@ -3966,13 +3965,12 @@ static void raid10_free(struct mddev *mddev, void *priv)
{
struct r10conf *conf = priv;
- mempool_destroy(conf->r10bio_pool);
+ mempool_exit(&conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf->mirrors_old);
kfree(conf->mirrors_new);
- if (conf->bio_split)
- bioset_free(conf->bio_split);
+ bioset_exit(&conf->bio_split);
kfree(conf);
}
@@ -4543,7 +4541,7 @@ read_more:
* on all the target devices.
*/
// FIXME
- mempool_free(r10_bio, conf->r10buf_pool);
+ mempool_free(r10_bio, &conf->r10buf_pool);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
return sectors_done;
}
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index e2e8840de9bf..d3eaaf3eb1bc 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -93,10 +93,10 @@ struct r10conf {
*/
wait_queue_head_t wait_barrier;
- mempool_t *r10bio_pool;
- mempool_t *r10buf_pool;
+ mempool_t r10bio_pool;
+ mempool_t r10buf_pool;
struct page *tmppage;
- struct bio_set *bio_split;
+ struct bio_set bio_split;
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array.
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 3c65f52b68f5..2b775abf377b 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -125,9 +125,9 @@ struct r5l_log {
struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
struct kmem_cache *io_kc;
- mempool_t *io_pool;
- struct bio_set *bs;
- mempool_t *meta_pool;
+ mempool_t io_pool;
+ struct bio_set bs;
+ mempool_t meta_pool;
struct md_thread *reclaim_thread;
unsigned long reclaim_target; /* number of space that need to be
@@ -579,7 +579,7 @@ static void r5l_log_endio(struct bio *bio)
md_error(log->rdev->mddev, log->rdev);
bio_put(bio);
- mempool_free(io->meta_page, log->meta_pool);
+ mempool_free(io->meta_page, &log->meta_pool);
spin_lock_irqsave(&log->io_list_lock, flags);
__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
@@ -748,7 +748,7 @@ static void r5l_submit_current_io(struct r5l_log *log)
static struct bio *r5l_bio_alloc(struct r5l_log *log)
{
- struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
+ struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, &log->bs);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio_set_dev(bio, log->rdev->bdev);
@@ -780,7 +780,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
struct r5l_io_unit *io;
struct r5l_meta_block *block;
- io = mempool_alloc(log->io_pool, GFP_ATOMIC);
+ io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
if (!io)
return NULL;
memset(io, 0, sizeof(*io));
@@ -791,7 +791,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
bio_list_init(&io->flush_barriers);
io->state = IO_UNIT_RUNNING;
- io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
+ io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
block = page_address(io->meta_page);
clear_page(block);
block->magic = cpu_to_le32(R5LOG_MAGIC);
@@ -1223,7 +1223,7 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
log->next_checkpoint = io->log_start;
list_del(&io->log_sibling);
- mempool_free(io, log->io_pool);
+ mempool_free(io, &log->io_pool);
r5l_run_no_mem_stripe(log);
found = true;
@@ -1647,7 +1647,7 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
{
struct page *page;
- ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, log->bs);
+ ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, &log->bs);
if (!ctx->ra_bio)
return -ENOMEM;
@@ -3066,6 +3066,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
struct request_queue *q = bdev_get_queue(rdev->bdev);
struct r5l_log *log;
char b[BDEVNAME_SIZE];
+ int ret;
pr_debug("md/raid:%s: using device %s as journal\n",
mdname(conf->mddev), bdevname(rdev->bdev, b));
@@ -3111,16 +3112,16 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
if (!log->io_kc)
goto io_kc;
- log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc);
- if (!log->io_pool)
+ ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
+ if (ret)
goto io_pool;
- log->bs = bioset_create(R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
- if (!log->bs)
+ ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ if (ret)
goto io_bs;
- log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0);
- if (!log->meta_pool)
+ ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
+ if (ret)
goto out_mempool;
spin_lock_init(&log->tree_lock);
@@ -3155,11 +3156,11 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
rcu_assign_pointer(conf->log, NULL);
md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
- mempool_destroy(log->meta_pool);
+ mempool_exit(&log->meta_pool);
out_mempool:
- bioset_free(log->bs);
+ bioset_exit(&log->bs);
io_bs:
- mempool_destroy(log->io_pool);
+ mempool_exit(&log->io_pool);
io_pool:
kmem_cache_destroy(log->io_kc);
io_kc:
@@ -3178,9 +3179,9 @@ void r5l_exit_log(struct r5conf *conf)
wake_up(&conf->mddev->sb_wait);
flush_work(&log->disable_writeback_work);
md_unregister_thread(&log->reclaim_thread);
- mempool_destroy(log->meta_pool);
- bioset_free(log->bs);
- mempool_destroy(log->io_pool);
+ mempool_exit(&log->meta_pool);
+ bioset_exit(&log->bs);
+ mempool_exit(&log->io_pool);
kmem_cache_destroy(log->io_kc);
kfree(log);
}
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 42890a08375b..3a7c36326589 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -105,9 +105,9 @@ struct ppl_conf {
atomic64_t seq; /* current log write sequence number */
struct kmem_cache *io_kc;
- mempool_t *io_pool;
- struct bio_set *bs;
- struct bio_set *flush_bs;
+ mempool_t io_pool;
+ struct bio_set bs;
+ struct bio_set flush_bs;
/* used only for recovery */
int recovered_entries;
@@ -244,7 +244,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
struct ppl_header *pplhdr;
struct page *header_page;
- io = mempool_alloc(ppl_conf->io_pool, GFP_NOWAIT);
+ io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT);
if (!io)
return NULL;
@@ -503,7 +503,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
struct bio *prev = bio;
bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
- ppl_conf->bs);
+ &ppl_conf->bs);
bio->bi_opf = prev->bi_opf;
bio_copy_dev(bio, prev);
bio->bi_iter.bi_sector = bio_end_sector(prev);
@@ -570,7 +570,7 @@ static void ppl_io_unit_finished(struct ppl_io_unit *io)
list_del(&io->log_sibling);
spin_unlock(&log->io_list_lock);
- mempool_free(io, ppl_conf->io_pool);
+ mempool_free(io, &ppl_conf->io_pool);
spin_lock(&ppl_conf->no_mem_stripes_lock);
if (!list_empty(&ppl_conf->no_mem_stripes)) {
@@ -642,7 +642,7 @@ static void ppl_do_flush(struct ppl_io_unit *io)
struct bio *bio;
char b[BDEVNAME_SIZE];
- bio = bio_alloc_bioset(GFP_NOIO, 0, ppl_conf->flush_bs);
+ bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
bio_set_dev(bio, bdev);
bio->bi_private = io;
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
@@ -1246,11 +1246,9 @@ static void __ppl_exit_log(struct ppl_conf *ppl_conf)
kfree(ppl_conf->child_logs);
- if (ppl_conf->bs)
- bioset_free(ppl_conf->bs);
- if (ppl_conf->flush_bs)
- bioset_free(ppl_conf->flush_bs);
- mempool_destroy(ppl_conf->io_pool);
+ bioset_exit(&ppl_conf->bs);
+ bioset_exit(&ppl_conf->flush_bs);
+ mempool_exit(&ppl_conf->io_pool);
kmem_cache_destroy(ppl_conf->io_kc);
kfree(ppl_conf);
@@ -1387,24 +1385,18 @@ int ppl_init_log(struct r5conf *conf)
goto err;
}
- ppl_conf->io_pool = mempool_create(conf->raid_disks, ppl_io_pool_alloc,
- ppl_io_pool_free, ppl_conf->io_kc);
- if (!ppl_conf->io_pool) {
- ret = -ENOMEM;
+ ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc,
+ ppl_io_pool_free, ppl_conf->io_kc);
+ if (ret)
goto err;
- }
- ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS);
- if (!ppl_conf->bs) {
- ret = -ENOMEM;
+ ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS);
+ if (ret)
goto err;
- }
- ppl_conf->flush_bs = bioset_create(conf->raid_disks, 0, 0);
- if (!ppl_conf->flush_bs) {
- ret = -ENOMEM;
+ ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0);
+ if (ret)
goto err;
- }
ppl_conf->count = conf->raid_disks;
ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index be117d0a65a8..a2e64989b01f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5192,7 +5192,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
/*
* use bio_clone_fast to make a copy of the bio
*/
- align_bi = bio_clone_fast(raid_bio, GFP_NOIO, mddev->bio_set);
+ align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
if (!align_bi)
return 0;
/*
@@ -5277,7 +5277,7 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
if (sectors < bio_sectors(raid_bio)) {
struct r5conf *conf = mddev->private;
- split = bio_split(raid_bio, sectors, GFP_NOIO, conf->bio_split);
+ split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
bio_chain(split, raid_bio);
generic_make_request(raid_bio);
raid_bio = split;
@@ -6773,8 +6773,7 @@ static void free_conf(struct r5conf *conf)
if (conf->disks[i].extra_page)
put_page(conf->disks[i].extra_page);
kfree(conf->disks);
- if (conf->bio_split)
- bioset_free(conf->bio_split);
+ bioset_exit(&conf->bio_split);
kfree(conf->stripe_hashtbl);
kfree(conf->pending_data);
kfree(conf);
@@ -6853,6 +6852,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
int i;
int group_cnt, worker_cnt_per_group;
struct r5worker_group *new_group;
+ int ret;
if (mddev->new_level != 5
&& mddev->new_level != 4
@@ -6950,8 +6950,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort;
}
- conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
- if (!conf->bio_split)
+ ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
+ if (ret)
goto abort;
conf->mddev = mddev;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 3f8da26032ac..72e75ba6abf0 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -669,7 +669,7 @@ struct r5conf {
int pool_size; /* number of disks in stripeheads in pool */
spinlock_t device_lock;
struct disk_info *disks;
- struct bio_set *bio_split;
+ struct bio_set bio_split;
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array.
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 57b13dfbd21e..a15181fa45f7 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2094,14 +2094,9 @@ static const struct block_device_operations msb_bdops = {
static int msb_init_disk(struct memstick_dev *card)
{
struct msb_data *msb = memstick_get_drvdata(card);
- struct memstick_host *host = card->host;
int rc;
- u64 limit = BLK_BOUNCE_HIGH;
unsigned long capacity;
- if (host->dev.dma_mask && *(host->dev.dma_mask))
- limit = *(host->dev.dma_mask);
-
mutex_lock(&msb_disk_lock);
msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
mutex_unlock(&msb_disk_lock);
@@ -2123,7 +2118,6 @@ static int msb_init_disk(struct memstick_dev *card)
msb->queue->queuedata = card;
- blk_queue_bounce_limit(msb->queue, limit);
blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
blk_queue_max_segment_size(msb->queue,
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 8897962781bb..5ee932631fae 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1170,17 +1170,12 @@ static int mspro_block_init_card(struct memstick_dev *card)
static int mspro_block_init_disk(struct memstick_dev *card)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
- struct memstick_host *host = card->host;
struct mspro_devinfo *dev_info = NULL;
struct mspro_sys_info *sys_info = NULL;
struct mspro_sys_attr *s_attr = NULL;
int rc, disk_id;
- u64 limit = BLK_BOUNCE_HIGH;
unsigned long capacity;
- if (host->dev.dma_mask && *(host->dev.dma_mask))
- limit = *(host->dev.dma_mask);
-
for (rc = 0; msb->attr_group.attrs[rc]; ++rc) {
s_attr = mspro_from_sysfs_attr(msb->attr_group.attrs[rc]);
@@ -1219,7 +1214,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
msb->queue->queuedata = card;
- blk_queue_bounce_limit(msb->queue, limit);
blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
blk_queue_max_segment_size(msb->queue,
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 86503f60468f..19a5aa70ecda 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1929,7 +1929,7 @@ static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc)
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
VirtDevice *vdevice;
- enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
+ enum blk_eh_timer_return rc = BLK_EH_DONE;
hd = shost_priv(sc->device->host);
if (hd == NULL) {
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 1b52b8557034..2060d1483043 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -419,10 +419,25 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
/* Verify that EC can process command */
for (i = 0; i < len; i++) {
rx_byte = rx_buf[i];
+ /*
+ * Seeing the PAST_END, RX_BAD_DATA, or NOT_READY
+ * markers are all signs that the EC didn't fully
+ * receive our command. e.g., if the EC is flashing
+ * itself, it can't respond to any commands and instead
+ * clocks out EC_SPI_PAST_END from its SPI hardware
+ * buffer. Similar occurrences can happen if the AP is
+ * too slow to clock out data after asserting CS -- the
+ * EC will abort and fill its buffer with
+ * EC_SPI_RX_BAD_DATA.
+ *
+ * In all cases, these errors should be safe to retry.
+ * Report -EAGAIN and let the caller decide what to do
+ * about that.
+ */
if (rx_byte == EC_SPI_PAST_END ||
rx_byte == EC_SPI_RX_BAD_DATA ||
rx_byte == EC_SPI_NOT_READY) {
- ret = -EREMOTEIO;
+ ret = -EAGAIN;
break;
}
}
@@ -431,7 +446,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
if (!ret)
ret = cros_ec_spi_receive_packet(ec_dev,
ec_msg->insize + sizeof(*response));
- else
+ else if (ret != -EAGAIN)
dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
final_ret = terminate_request(ec_dev);
@@ -537,10 +552,11 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
/* Verify that EC can process command */
for (i = 0; i < len; i++) {
rx_byte = rx_buf[i];
+ /* See comments in cros_ec_pkt_xfer_spi() */
if (rx_byte == EC_SPI_PAST_END ||
rx_byte == EC_SPI_RX_BAD_DATA ||
rx_byte == EC_SPI_NOT_READY) {
- ret = -EREMOTEIO;
+ ret = -EAGAIN;
break;
}
}
@@ -549,7 +565,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
if (!ret)
ret = cros_ec_spi_receive_response(ec_dev,
ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
- else
+ else if (ret != -EAGAIN)
dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
final_ret = terminate_request(ec_dev);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a4c9c8297a6d..918d4fb742d1 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -717,6 +717,7 @@ struct cxl {
bool perst_select_user;
bool perst_same_image;
bool psl_timebase_synced;
+ bool tunneled_ops_supported;
/*
* number of contexts mapped on to this card. Possible values are:
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 83f1d08058fc..4d6736f9d463 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1742,6 +1742,15 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
/* Required for devices using CAPP DMA mode, harmless for others */
pci_set_master(dev);
+ adapter->tunneled_ops_supported = false;
+
+ if (cxl_is_power9()) {
+ if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1))
+ dev_info(&dev->dev, "Tunneled operations unsupported\n");
+ else
+ adapter->tunneled_ops_supported = true;
+ }
+
if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
goto err;
@@ -1768,6 +1777,9 @@ static void cxl_deconfigure_adapter(struct cxl *adapter)
{
struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
+ if (cxl_is_power9())
+ pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0);
+
cxl_native_release_psl_err_irq(adapter);
cxl_unmap_adapter_regs(adapter);
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 95285b7f636f..4b5a4c5d3c01 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -78,6 +78,15 @@ static ssize_t psl_timebase_synced_show(struct device *device,
return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
}
+static ssize_t tunneled_ops_supported_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
+}
+
static ssize_t reset_adapter_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -183,6 +192,7 @@ static struct device_attribute adapter_attrs[] = {
__ATTR_RO(base_image),
__ATTR_RO(image_loaded),
__ATTR_RO(psl_timebase_synced),
+ __ATTR_RO(tunneled_ops_supported),
__ATTR_RW(load_image_on_perst),
__ATTR_RW(perst_reloads_same_image),
__ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 0c125f207aea..33053b0d1fdf 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -518,7 +518,7 @@ static int at24_get_pdata(struct device *dev, struct at24_platform_data *pdata)
if (of_node && of_match_device(at24_of_match, dev))
cdata = of_device_get_match_data(dev);
else if (id)
- cdata = (void *)&id->driver_data;
+ cdata = (void *)id->driver_data;
else
cdata = acpi_device_get_match_data(dev);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 9e923cd1d80e..d89e17829527 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -244,7 +244,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
mq = &md->queue;
/* Dispatch locking to the block layer */
- req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
+ req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
count = PTR_ERR(req);
goto out_put;
@@ -650,8 +650,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
*/
mq = &md->queue;
req = blk_get_request(mq->queue,
- idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
- __GFP_RECLAIM);
+ idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto cmd_done;
@@ -721,8 +720,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
*/
mq = &md->queue;
req = blk_get_request(mq->queue,
- idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
- __GFP_RECLAIM);
+ idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto cmd_err;
@@ -2485,7 +2483,7 @@ static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
break;
}
- return 0;
+ return ret;
}
#ifdef CONFIG_COMPAT
@@ -2750,7 +2748,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
int ret;
/* Ask the block layer about the card status */
- req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
@@ -2786,7 +2784,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
return -ENOMEM;
/* Ask the block layer for the EXT CSD */
- req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_free;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 56e9a803db21..648eb6743ed5 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -111,8 +111,9 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
__mmc_cqe_recovery_notifier(mq);
return BLK_EH_RESET_TIMER;
}
- /* No timeout */
- return BLK_EH_HANDLED;
+ /* No timeout (XXX: huh? comment doesn't make much sense) */
+ blk_mq_complete_request(req);
+ return BLK_EH_DONE;
default:
/* Timeout is handled by mmc core */
return BLK_EH_RESET_TIMER;
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 0ef741bc515d..d0e83db42ae5 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -33,6 +33,8 @@ struct sdhci_iproc_host {
const struct sdhci_iproc_data *data;
u32 shadow_cmd;
u32 shadow_blk;
+ bool is_cmd_shadowed;
+ bool is_blk_shadowed;
};
#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
@@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
{
- u32 val = sdhci_iproc_readl(host, (reg & ~3));
- u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
+ u32 val;
+ u16 word;
+
+ if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
+ /* Get the saved transfer mode */
+ val = iproc_host->shadow_cmd;
+ } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
+ iproc_host->is_blk_shadowed) {
+ /* Get the saved block info */
+ val = iproc_host->shadow_blk;
+ } else {
+ val = sdhci_iproc_readl(host, (reg & ~3));
+ }
+ word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
return word;
}
@@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
if (reg == SDHCI_COMMAND) {
/* Write the block now as we are issuing a command */
- if (iproc_host->shadow_blk != 0) {
+ if (iproc_host->is_blk_shadowed) {
sdhci_iproc_writel(host, iproc_host->shadow_blk,
SDHCI_BLOCK_SIZE);
- iproc_host->shadow_blk = 0;
+ iproc_host->is_blk_shadowed = false;
}
oldval = iproc_host->shadow_cmd;
- } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
+ iproc_host->is_cmd_shadowed = false;
+ } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
+ iproc_host->is_blk_shadowed) {
/* Block size and count are stored in shadow reg */
oldval = iproc_host->shadow_blk;
} else {
@@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
if (reg == SDHCI_TRANSFER_MODE) {
/* Save the transfer mode until the command is issued */
iproc_host->shadow_cmd = newval;
+ iproc_host->is_cmd_shadowed = true;
} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
/* Save the block info until the command is issued */
iproc_host->shadow_blk = newval;
+ iproc_host->is_blk_shadowed = true;
} else {
/* Command or other regular 32-bit write */
sdhci_iproc_writel(host, newval, reg & ~3);
@@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
- .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
.ops = &sdhci_iproc_32only_ops,
};
@@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = {
.caps1 = SDHCI_DRIVER_TYPE_C |
SDHCI_DRIVER_TYPE_D |
SDHCI_SUPPORT_DDR50,
- .mmc_caps = MMC_CAP_1_8V_DDR,
};
static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 16ae4ae8e8f9..29c0bfd74e8a 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -82,7 +82,6 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
block = blk_rq_pos(req) << 9 >> tr->blkshift;
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
- buf = bio_data(req->bio);
if (req_op(req) == REQ_OP_FLUSH) {
if (tr->flush(dev))
@@ -100,9 +99,14 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
return BLK_STS_IOERR;
return BLK_STS_OK;
case REQ_OP_READ:
- for (; nsect > 0; nsect--, block++, buf += tr->blksize)
- if (tr->readsect(dev, block, buf))
+ buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
+ for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
+ if (tr->readsect(dev, block, buf)) {
+ kunmap(bio_page(req->bio));
return BLK_STS_IOERR;
+ }
+ }
+ kunmap(bio_page(req->bio));
rq_flush_dcache_pages(req);
return BLK_STS_OK;
case REQ_OP_WRITE:
@@ -110,9 +114,14 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
return BLK_STS_IOERR;
rq_flush_dcache_pages(req);
- for (; nsect > 0; nsect--, block++, buf += tr->blksize)
- if (tr->writesect(dev, block, buf))
+ buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
+ for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
+ if (tr->writesect(dev, block, buf)) {
+ kunmap(bio_page(req->bio));
return BLK_STS_IOERR;
+ }
+ }
+ kunmap(bio_page(req->bio));
return BLK_STS_OK;
default:
return BLK_STS_IOERR;
@@ -418,7 +427,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
new->rq->queuedata = new;
blk_queue_logical_block_size(new->rq, tr->blksize);
- blk_queue_bounce_limit(new->rq, BLK_BOUNCE_HIGH);
blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index db5ec4e8bde9..ebb1d141b900 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1194,11 +1194,13 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
NDCB0_CMD2(NAND_CMD_READSTART);
/*
- * Trigger the naked read operation only on the last chunk.
- * Otherwise, use monolithic read.
+ * Trigger the monolithic read on the first chunk, then naked read on
+ * intermediate chunks and finally a last naked read on the last chunk.
*/
- if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
+ if (chunk == 0)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+ else if (chunk < lt->nchunks - 1)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
else
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 78616787f2a3..3da5fca77cbd 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1712,6 +1712,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_FE,
},
{
+ .chip_id = BCM5389_DEVICE_ID,
+ .dev_name = "BCM5389",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
.chip_id = BCM5395_DEVICE_ID,
.dev_name = "BCM5395",
.vlans = 4096,
@@ -2034,6 +2046,7 @@ int b53_switch_detect(struct b53_device *dev)
else
dev->chip_id = BCM5365_DEVICE_ID;
break;
+ case BCM5389_DEVICE_ID:
case BCM5395_DEVICE_ID:
case BCM5397_DEVICE_ID:
case BCM5398_DEVICE_ID:
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index fa7556f5d4fb..a533a90e3904 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -285,6 +285,7 @@ static const struct b53_io_ops b53_mdio_ops = {
#define B53_BRCM_OUI_1 0x0143bc00
#define B53_BRCM_OUI_2 0x03625c00
#define B53_BRCM_OUI_3 0x00406000
+#define B53_BRCM_OUI_4 0x01410c00
static int b53_mdio_probe(struct mdio_device *mdiodev)
{
@@ -311,7 +312,8 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
*/
if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
- (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) {
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) {
dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
return -ENODEV;
}
@@ -360,6 +362,7 @@ static const struct of_device_id b53_of_match[] = {
{ .compatible = "brcm,bcm53125" },
{ .compatible = "brcm,bcm53128" },
{ .compatible = "brcm,bcm5365" },
+ { .compatible = "brcm,bcm5389" },
{ .compatible = "brcm,bcm5395" },
{ .compatible = "brcm,bcm5397" },
{ .compatible = "brcm,bcm5398" },
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 1187ebd79287..3b57f47d0e79 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -48,6 +48,7 @@ struct b53_io_ops {
enum {
BCM5325_DEVICE_ID = 0x25,
BCM5365_DEVICE_ID = 0x65,
+ BCM5389_DEVICE_ID = 0x89,
BCM5395_DEVICE_ID = 0x95,
BCM5397_DEVICE_ID = 0x97,
BCM5398_DEVICE_ID = 0x98,
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 23b45da784cb..b89acaee12d4 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -354,10 +354,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
/* Locate the first rule available */
if (fs->location == RX_CLS_LOC_ANY)
rule_index = find_first_zero_bit(priv->cfp.used,
- bcm_sf2_cfp_rule_size(priv));
+ priv->num_cfp_rules);
else
rule_index = fs->location;
+ if (rule_index > bcm_sf2_cfp_rule_size(priv))
+ return -ENOSPC;
+
layout = &udf_tcpip4_layout;
/* We only use one UDF slice for now */
slice_num = bcm_sf2_get_slice_number(layout, 0);
@@ -562,19 +565,21 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
* first half because the HW search is by incrementing addresses.
*/
if (fs->location == RX_CLS_LOC_ANY)
- rule_index[0] = find_first_zero_bit(priv->cfp.used,
- bcm_sf2_cfp_rule_size(priv));
+ rule_index[1] = find_first_zero_bit(priv->cfp.used,
+ priv->num_cfp_rules);
else
- rule_index[0] = fs->location;
+ rule_index[1] = fs->location;
+ if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
+ return -ENOSPC;
/* Flag it as used (cleared on error path) such that we can immediately
* obtain a second one to chain from.
*/
- set_bit(rule_index[0], priv->cfp.used);
+ set_bit(rule_index[1], priv->cfp.used);
- rule_index[1] = find_first_zero_bit(priv->cfp.used,
- bcm_sf2_cfp_rule_size(priv));
- if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) {
+ rule_index[0] = find_first_zero_bit(priv->cfp.used,
+ priv->num_cfp_rules);
+ if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
ret = -ENOSPC;
goto out_err;
}
@@ -712,14 +717,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
/* Flag the second half rule as being used now, return it as the
* location, and flag it as unique while dumping rules
*/
- set_bit(rule_index[1], priv->cfp.used);
+ set_bit(rule_index[0], priv->cfp.used);
set_bit(rule_index[1], priv->cfp.unique);
fs->location = rule_index[1];
return ret;
out_err:
- clear_bit(rule_index[0], priv->cfp.used);
+ clear_bit(rule_index[1], priv->cfp.used);
return ret;
}
@@ -785,10 +790,6 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
int ret;
u32 reg;
- /* Refuse deletion of unused rules, and the default reserved rule */
- if (!test_bit(loc, priv->cfp.used) || loc == 0)
- return -EINVAL;
-
/* Indicate which rule we want to read */
bcm_sf2_cfp_rule_addr_set(priv, loc);
@@ -826,6 +827,13 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
u32 next_loc = 0;
int ret;
+ /* Refuse deleting unused rules, and those that are not unique since
+ * that could leave IPv6 rules with one of the chained rule in the
+ * table.
+ */
+ if (!test_bit(loc, priv->cfp.unique) || loc == 0)
+ return -EINVAL;
+
ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 36c8950dbd2d..176861bd2252 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
vp->mii.reg_num_mask = 0x1f;
/* Makes sure rings are at least 16 byte aligned. */
- vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
- &vp->rx_ring_dma);
+ &vp->rx_ring_dma, GFP_KERNEL);
retval = -ENOMEM;
if (!vp->rx_ring)
goto free_device;
@@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
return 0;
free_ring:
- pci_free_consistent(pdev,
- sizeof(struct boom_rx_desc) * RX_RING_SIZE
- + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
- vp->rx_ring,
- vp->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE +
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring, vp->rx_ring_dma);
free_device:
free_netdev(dev);
pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
@@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev)
break; /* Bad news! */
skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
- dma = pci_map_single(VORTEX_PCI(vp), skb->data,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
+ dma = dma_map_single(vp->gendev, skb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ if (dma_mapping_error(vp->gendev, dma))
break;
vp->rx_ring[i].addr = cpu_to_le32(dma);
}
@@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
int len = (skb->len + 3) & ~3;
- vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
+ vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
if (!skb_shinfo(skb)->nr_frags) {
- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(vp->gendev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
int i;
- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ dma_addr = dma_map_single(vp->gendev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(vp->gendev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
+ dma_addr = skb_frag_dma_map(vp->gendev, frag,
0,
frag->size,
DMA_TO_DEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
+ if (dma_mapping_error(vp->gendev, dma_addr)) {
for(i = i-1; i >= 0; i--)
- dma_unmap_page(&VORTEX_PCI(vp)->dev,
+ dma_unmap_page(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
DMA_TO_DEVICE);
- pci_unmap_single(VORTEX_PCI(vp),
+ dma_unmap_single(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
le32_to_cpu(vp->tx_ring[entry].frag[0].length),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
goto out_dma_err;
}
@@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#else
- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(vp->gendev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
@@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
out:
return NETDEV_TX_OK;
out_dma_err:
- dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
+ dev_err(vp->gendev, "Error mapping dma buffer\n");
goto out;
}
@@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id)
if (status & DMADone) {
if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
- pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+ dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
pkts_compl++;
bytes_compl += vp->tx_skb->len;
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
@@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id)
struct sk_buff *skb = vp->tx_skbuff[entry];
#if DO_ZEROCOPY
int i;
- pci_unmap_single(VORTEX_PCI(vp),
+ dma_unmap_single(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
- pci_unmap_page(VORTEX_PCI(vp),
+ dma_unmap_page(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
#else
- pci_unmap_single(VORTEX_PCI(vp),
- le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(vp->gendev,
+ le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
#endif
pkts_compl++;
bytes_compl += skb->len;
@@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev)
/* 'skb_put()' points to the start of sk_buff data area. */
if (vp->bus_master &&
! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
- dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
+ pkt_len, DMA_FROM_DEVICE);
iowrite32(dma, ioaddr + Wn7_MasterAddr);
iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
iowrite16(StartDMAUp, ioaddr + EL3_CMD);
while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
;
- pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
} else {
ioread32_rep(ioaddr + RX_FIFO,
skb_put(skb, pkt_len),
@@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev)
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
/* 'skb_put()' points to the start of sk_buff data area. */
skb_put_data(skb, vp->rx_skbuff[entry]->data,
pkt_len);
- pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
vp->rx_copy++;
} else {
/* Pre-allocate the replacement skb. If it or its
@@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev)
dev->stats.rx_dropped++;
goto clear_complete;
}
- newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
+ newdma = dma_map_single(vp->gendev, newskb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ if (dma_mapping_error(vp->gendev, newdma)) {
dev->stats.rx_dropped++;
consume_skb(newskb);
goto clear_complete;
@@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev)
vp->rx_skbuff[entry] = newskb;
vp->rx_ring[entry].addr = cpu_to_le32(newdma);
skb_put(skb, pkt_len);
- pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
vp->rx_nocopy++;
}
skb->protocol = eth_type_trans(skb, dev);
@@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev)
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
for (i = 0; i < RX_RING_SIZE; i++)
if (vp->rx_skbuff[i]) {
- pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb(vp->rx_skbuff[i]);
vp->rx_skbuff[i] = NULL;
}
@@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev)
int k;
for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
- pci_unmap_single(VORTEX_PCI(vp),
+ dma_unmap_single(vp->gendev,
le32_to_cpu(vp->tx_ring[i].frag[k].addr),
le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
#else
- pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
#endif
dev_kfree_skb(skb);
vp->tx_skbuff[i] = NULL;
@@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, vp->ioaddr);
- pci_free_consistent(pdev,
- sizeof(struct boom_rx_desc) * RX_RING_SIZE
- + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
- vp->rx_ring,
- vp->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE +
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring, vp->rx_ring_dma);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index ac99d089ac72..1c97e39b478e 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -164,7 +164,9 @@ bad_clone_list[] __initdata = {
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
-#if defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */
+#if defined(CONFIG_MACH_TX49XX)
+# define DCR_VAL 0x48 /* 8-bit mode */
+#elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */
# define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49)
#else
# define DCR_VAL 0x49
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index a561705f232c..be198cc0b10c 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1552,22 +1552,26 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!ioaddr) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("card has no PCI IO resources, aborting\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto err_disable_dev;
}
err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
if (err) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
- return err;
+ goto err_disable_dev;
}
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("io address range already allocated\n");
- return -EBUSY;
+ err = -EBUSY;
+ goto err_disable_dev;
}
err = pcnet32_probe1(ioaddr, 1, pdev);
+
+err_disable_dev:
if (err < 0)
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index b57acb8dc35b..dc25066c59a1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -419,15 +419,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
- {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
- {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
- {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
- {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
- {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
- {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
- {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
- {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
- {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
+ {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
+ {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
+ {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
+ {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
+ {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
+ {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
+ {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
+ {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
+ {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
};
static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
@@ -444,16 +444,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
- {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
- {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
- {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
- {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
- {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
- {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
- {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
- {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
- {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
- {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
};
static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index db92f1858060..b76447baccaf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -836,7 +836,7 @@ bool is_filter_exact_match(struct adapter *adap,
{
struct tp_params *tp = &adap->params.tp;
u64 hash_filter_mask = tp->hash_filter_mask;
- u32 mask;
+ u64 ntuple_mask = 0;
if (!is_hashfilter(adap))
return false;
@@ -865,73 +865,45 @@ bool is_filter_exact_match(struct adapter *adap,
if (!fs->val.fport || fs->mask.fport != 0xffff)
return false;
- if (tp->fcoe_shift >= 0) {
- mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W;
- if (mask && !fs->mask.fcoe)
- return false;
- }
+ /* calculate tuple mask and compare with mask configured in hw */
+ if (tp->fcoe_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
- if (tp->port_shift >= 0) {
- mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W;
- if (mask && !fs->mask.iport)
- return false;
- }
+ if (tp->port_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
if (tp->vnic_shift >= 0) {
- mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W;
-
- if ((adap->params.tp.ingress_config & VNIC_F)) {
- if (mask && !fs->mask.pfvf_vld)
- return false;
- } else {
- if (mask && !fs->mask.ovlan_vld)
- return false;
- }
+ if ((adap->params.tp.ingress_config & VNIC_F))
+ ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
+ else
+ ntuple_mask |= (u64)fs->mask.ovlan_vld <<
+ tp->vnic_shift;
}
- if (tp->vlan_shift >= 0) {
- mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W;
- if (mask && !fs->mask.ivlan)
- return false;
- }
+ if (tp->vlan_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
- if (tp->tos_shift >= 0) {
- mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W;
- if (mask && !fs->mask.tos)
- return false;
- }
+ if (tp->tos_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
- if (tp->protocol_shift >= 0) {
- mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W;
- if (mask && !fs->mask.proto)
- return false;
- }
+ if (tp->protocol_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
- if (tp->ethertype_shift >= 0) {
- mask = (hash_filter_mask >> tp->ethertype_shift) &
- FT_ETHERTYPE_W;
- if (mask && !fs->mask.ethtype)
- return false;
- }
+ if (tp->ethertype_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
- if (tp->macmatch_shift >= 0) {
- mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W;
- if (mask && !fs->mask.macidx)
- return false;
- }
+ if (tp->macmatch_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
+
+ if (tp->matchtype_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
+
+ if (tp->frag_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
+
+ if (ntuple_mask != hash_filter_mask)
+ return false;
- if (tp->matchtype_shift >= 0) {
- mask = (hash_filter_mask >> tp->matchtype_shift) &
- FT_MPSHITTYPE_W;
- if (mask && !fs->mask.matchtype)
- return false;
- }
- if (tp->frag_shift >= 0) {
- mask = (hash_filter_mask >> tp->frag_shift) &
- FT_FRAGMENTATION_W;
- if (mask && !fs->mask.frag)
- return false;
- }
return true;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 81684acf52af..8a8b12b720ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2747,11 +2747,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
- * limitation for the device. Try 64-bit first, and
+ * limitation for the device. Try 47-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
@@ -2765,10 +2765,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 64);
+ "for consistent allocations, aborting\n", 47);
goto err_out_release_regions;
}
using_dac = 1;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c697e79e491e..8f755009ff38 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3309,7 +3309,9 @@ void be_detect_error(struct be_adapter *adapter)
if ((val & POST_STAGE_FAT_LOG_START)
!= POST_STAGE_FAT_LOG_START &&
(val & POST_STAGE_ARMFW_UE)
- != POST_STAGE_ARMFW_UE)
+ != POST_STAGE_ARMFW_UE &&
+ (val & POST_STAGE_RECOVERABLE_ERR)
+ != POST_STAGE_RECOVERABLE_ERR)
return;
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4604bc8eb5b..9d3eed46830d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index f81439796ac7..43d973215040 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Fast Ethernet Controller (ENET) PTP driver for MX6x.
*
* Copyright (C) 2012 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6e8d6a6f6aaf..5ec1185808e5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -192,6 +192,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
adapter->fw_done_rc);
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
return -1;
}
return 0;
@@ -795,9 +796,11 @@ static int ibmvnic_login(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000);
int retry_count = 0;
+ bool retry;
int rc;
do {
+ retry = false;
if (retry_count > IBMVNIC_MAX_QUEUES) {
netdev_warn(netdev, "Login attempts exceeded\n");
return -1;
@@ -821,6 +824,9 @@ static int ibmvnic_login(struct net_device *netdev)
retry_count++;
release_sub_crqs(adapter, 1);
+ retry = true;
+ netdev_dbg(netdev,
+ "Received partial success, retrying...\n");
adapter->init_done_rc = 0;
reinit_completion(&adapter->init_done);
send_cap_queries(adapter);
@@ -848,7 +854,7 @@ static int ibmvnic_login(struct net_device *netdev)
netdev_warn(netdev, "Adapter login failed\n");
return -1;
}
- } while (adapter->init_done_rc == PARTIALSUCCESS);
+ } while (retry);
/* handle pending MAC address changes after successful login */
if (adapter->mac_change_pending) {
@@ -1821,9 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (rc)
return rc;
}
+ ibmvnic_disable_irqs(adapter);
}
-
- ibmvnic_disable_irqs(adapter);
adapter->state = VNIC_CLOSED;
if (reset_state == VNIC_CLOSED)
@@ -2617,18 +2622,21 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
{
struct device *dev = &adapter->vdev->dev;
unsigned long rc;
- u64 val;
if (scrq->hw_irq > 0x100000000ULL) {
dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
return 1;
}
- val = (0xff000000) | scrq->hw_irq;
- rc = plpar_hcall_norets(H_EOI, val);
- if (rc)
- dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
- val, rc);
+ if (adapter->resetting &&
+ adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ u64 val = (0xff000000) | scrq->hw_irq;
+
+ rc = plpar_hcall_norets(H_EOI, val);
+ if (rc)
+ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
+ val, rc);
+ }
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
@@ -4586,14 +4594,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
release_crq_queue(adapter);
}
- rc = init_stats_buffers(adapter);
- if (rc)
- return rc;
-
- rc = init_stats_token(adapter);
- if (rc)
- return rc;
-
return rc;
}
@@ -4662,13 +4662,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
goto ibmvnic_init_fail;
} while (rc == EAGAIN);
+ rc = init_stats_buffers(adapter);
+ if (rc)
+ goto ibmvnic_init_fail;
+
+ rc = init_stats_token(adapter);
+ if (rc)
+ goto ibmvnic_stats_fail;
+
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
rc = device_create_file(&dev->dev, &dev_attr_failover);
if (rc)
- goto ibmvnic_init_fail;
+ goto ibmvnic_dev_file_err;
netif_carrier_off(netdev);
rc = register_netdev(netdev);
@@ -4687,6 +4695,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
ibmvnic_register_fail:
device_remove_file(&dev->dev, &dev_attr_failover);
+ibmvnic_dev_file_err:
+ release_stats_token(adapter);
+
+ibmvnic_stats_fail:
+ release_stats_buffers(adapter);
+
ibmvnic_init_fail:
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index afadba99f7b8..2ecd55856c50 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9054,7 +9054,6 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
{
const struct tc_action *a;
LIST_HEAD(actions);
- int err;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
@@ -9075,11 +9074,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
if (!dev)
return -EINVAL;
- err = handle_redirect_action(adapter, dev->ifindex, queue,
- action);
- if (err == 0)
- return err;
+ return handle_redirect_action(adapter, dev->ifindex,
+ queue, action);
}
+
+ return -EINVAL;
}
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index a822f7a56bc5..685337d58276 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -43,12 +43,12 @@
#include "fw.h"
/*
- * We allocate in as big chunks as we can, up to a maximum of 256 KB
- * per chunk.
+ * We allocate in page size (default 4KB on many archs) chunks to avoid high
+ * order memory allocations in fragmented/high usage memory situation.
*/
enum {
- MLX4_ICM_ALLOC_SIZE = 1 << 18,
- MLX4_TABLE_CHUNK_SIZE = 1 << 18
+ MLX4_ICM_ALLOC_SIZE = PAGE_SIZE,
+ MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE,
};
static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -398,9 +398,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 size;
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
+ if (WARN_ON(!obj_per_chunk))
+ return -EINVAL;
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
- table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
+ table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL);
if (!table->icm)
return -ENOMEM;
table->virt = virt;
@@ -446,7 +448,7 @@ err:
mlx4_free_icm(dev, table->icm[i], use_coherent);
}
- kfree(table->icm);
+ kvfree(table->icm);
return -ENOMEM;
}
@@ -462,5 +464,5 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
mlx4_free_icm(dev, table->icm[i], table->coherent);
}
- kfree(table->icm);
+ kvfree(table->icm);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 2edcce98ab2d..65482f004e50 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -172,7 +172,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
list_add_tail(&dev_ctx->list, &priv->ctx_list);
spin_unlock_irqrestore(&priv->ctx_lock, flags);
- mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
+ mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n",
dev_ctx->intf->protocol, enable ?
"enabled" : "disabled");
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 211578ffc70d..60172a38c4a4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2929,6 +2929,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create file for port %d\n", port);
devlink_port_unregister(&info->devlink_port);
info->port = -1;
+ return err;
}
sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
@@ -2950,9 +2951,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
&info->port_attr);
devlink_port_unregister(&info->devlink_port);
info->port = -1;
+ return err;
}
- return err;
+ return 0;
}
static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 3aaf4bad6c5a..427e7a31862c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
struct mlx4_qp *qp;
- spin_lock(&qp_table->lock);
+ spin_lock_irq(&qp_table->lock);
qp = __mlx4_qp_lookup(dev, qpn);
- spin_unlock(&qp_table->lock);
+ spin_unlock_irq(&qp_table->lock);
return qp;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 176645762e49..1ff0b0e93804 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -615,6 +615,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
}
+static __be32 mlx5e_get_fcs(struct sk_buff *skb)
+{
+ int last_frag_sz, bytes_in_prev, nr_frags;
+ u8 *fcs_p1, *fcs_p2;
+ skb_frag_t *last_frag;
+ __be32 fcs_bytes;
+
+ if (!skb_is_nonlinear(skb))
+ return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
+ last_frag_sz = skb_frag_size(last_frag);
+
+ /* If all FCS data is in last frag */
+ if (last_frag_sz >= ETH_FCS_LEN)
+ return *(__be32 *)(skb_frag_address(last_frag) +
+ last_frag_sz - ETH_FCS_LEN);
+
+ fcs_p2 = (u8 *)skb_frag_address(last_frag);
+ bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
+
+ /* Find where the other part of the FCS is - Linear or another frag */
+ if (nr_frags == 1) {
+ fcs_p1 = skb_tail_pointer(skb);
+ } else {
+ skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
+
+ fcs_p1 = skb_frag_address(prev_frag) +
+ skb_frag_size(prev_frag);
+ }
+ fcs_p1 -= bytes_in_prev;
+
+ memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
+ memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
+
+ return fcs_bytes;
+}
+
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
@@ -643,6 +682,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->csum = csum_partial(skb->data + ETH_HLEN,
network_depth - ETH_HLEN,
skb->csum);
+ if (unlikely(netdev->features & NETIF_F_RXFCS))
+ skb->csum = csum_add(skb->csum,
+ (__force __wsum)mlx5e_get_fcs(skb));
rq->stats.csum_complete++;
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 0f5da499a223..fad8c2e3804e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -237,19 +237,17 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
context->buf.sg[0].data = &context->command;
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
+ res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
+ if (!res)
+ list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
- res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
if (res) {
- mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n",
- res);
- spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- list_del(&context->list);
- spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
+ mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
kfree(context);
return ERR_PTR(res);
}
+
/* Context will be freed by wait func after completion */
return context;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index ca38a30fbe91..adc6ab2cf429 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4433,6 +4433,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
return -EINVAL;
}
+ if (is_vlan_dev(upper_dev) &&
+ vlan_dev_vlan_id(upper_dev) == 1) {
+ NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic");
+ return -EINVAL;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 7ed08486ae23..c805dcbebd02 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -84,7 +84,7 @@ static int sonic_open(struct net_device *dev)
for (i = 0; i < SONIC_NUM_RRS; i++) {
dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!laddr) {
+ if (dma_mapping_error(lp->device, laddr)) {
while(i > 0) { /* free any that were mapped successfully */
i--;
dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 1dc424685f4e..35fb31f682af 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -335,7 +335,7 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
start = mem;
- while (mem - start + 8 < nfp_cpp_area_size(area)) {
+ while (mem - start + 8 <= nfp_cpp_area_size(area)) {
u8 __iomem *value;
u32 type, length;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 00f41c145d4d..820b226d6ff8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -77,7 +77,7 @@
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
/* ILT entry structure */
-#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
#define ILT_ENTRY_PHY_ADDR_SHIFT 0
#define ILT_ENTRY_VALID_MASK 0x1ULL
#define ILT_ENTRY_VALID_SHIFT 52
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 38502815d681..468c59d2e491 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
struct qed_ll2_tx_packet *p_pkt = NULL;
struct qed_ll2_info *p_ll2_conn;
struct qed_ll2_tx_queue *p_tx;
+ unsigned long flags = 0;
dma_addr_t tx_frag;
p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
@@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
p_tx = &p_ll2_conn->tx_queue;
+ spin_lock_irqsave(&p_tx->lock, flags);
while (!list_empty(&p_tx->active_descq)) {
p_pkt = list_first_entry(&p_tx->active_descq,
struct qed_ll2_tx_packet, list_entry);
@@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+ spin_unlock_irqrestore(&p_tx->lock, flags);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
struct qed_ooo_buffer *p_buffer;
@@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
b_last_frag,
b_last_packet);
}
+ spin_lock_irqsave(&p_tx->lock, flags);
}
+ spin_unlock_irqrestore(&p_tx->lock, flags);
}
static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
@@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ll2_rx_packet *p_pkt = NULL;
struct qed_ll2_rx_queue *p_rx;
+ unsigned long flags = 0;
p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
if (!p_ll2_conn)
@@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
p_rx = &p_ll2_conn->rx_queue;
+ spin_lock_irqsave(&p_rx->lock, flags);
while (!list_empty(&p_rx->active_descq)) {
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
if (!p_pkt)
break;
-
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
+ spin_unlock_irqrestore(&p_rx->lock, flags);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
struct qed_ooo_buffer *p_buffer;
@@ -588,7 +595,30 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
cookie,
rx_buf_addr, b_last);
}
+ spin_lock_irqsave(&p_rx->lock, flags);
}
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+}
+
+static bool
+qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
+ struct core_rx_slow_path_cqe *p_cqe)
+{
+ struct ooo_opaque *iscsi_ooo;
+ u32 cid;
+
+ if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
+ return false;
+
+ iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
+ if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
+ return false;
+
+ /* Need to make a flush */
+ cid = le32_to_cpu(iscsi_ooo->cid);
+ qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
+
+ return true;
}
static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
@@ -617,6 +647,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
cqe_type = cqe->rx_cqe_sp.type;
+ if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
+ if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
+ &cqe->rx_cqe_sp))
+ continue;
+
if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
DP_NOTICE(p_hwfn,
"Got a non-regular LB LL2 completion [type 0x%02x]\n",
@@ -794,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
int rc;
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
+ return 0;
+
rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
if (rc)
return rc;
@@ -814,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0;
int rc;
+ if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+ return 0;
+
new_idx = le16_to_cpu(*p_tx->p_fw_cons);
num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
@@ -1867,17 +1908,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
/* Stop Tx & Rx of connection, if needed */
if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->tx_queue.b_cb_registred = false;
+ smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
goto out;
+
qed_ll2_txq_flush(p_hwfn, connection_handle);
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
}
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->rx_queue.b_cb_registred = false;
+ smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
goto out;
+
qed_ll2_rxq_flush(p_hwfn, connection_handle);
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
}
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
@@ -1925,16 +1974,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
if (!p_ll2_conn)
return;
- if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
- p_ll2_conn->rx_queue.b_cb_registred = false;
- qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
- }
-
- if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
- p_ll2_conn->tx_queue.b_cb_registred = false;
- qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
- }
-
kfree(p_ll2_conn->tx_queue.descq_mem);
qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index a01e7d6e5442..f6655e251bbd 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1066,13 +1066,12 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
DP_INFO(edev, "Starting qede_remove\n");
+ qede_rdma_dev_remove(edev);
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);
qede_ptp_disable(edev);
- qede_rdma_dev_remove(edev);
-
edev->ops->common->set_power_state(cdev, PCI_D0);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a5b792ce2ae7..1bf930d4a1e5 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -163,7 +163,7 @@ enum {
};
/* Driver's parameters */
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_RENESAS)
#define SH_ETH_RX_ALIGN 32
#else
#define SH_ETH_RX_ALIGN 2
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index f4c0b02ddad8..59fbf74dcada 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1674,8 +1674,8 @@ static int netsec_probe(struct platform_device *pdev)
if (ret)
goto unreg_napi;
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
- dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
+ dev_warn(&pdev->dev, "Failed to set DMA mask\n");
ret = register_netdev(ndev);
if (ret) {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index abceea802ea1..38828ab77eb9 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1873,7 +1873,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (IS_ERR(priv->txchan)) {
dev_err(&pdev->dev, "error initializing tx dma channel\n");
rc = PTR_ERR(priv->txchan);
- goto no_cpdma_chan;
+ goto err_free_dma;
}
priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
@@ -1881,14 +1881,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (IS_ERR(priv->rxchan)) {
dev_err(&pdev->dev, "error initializing rx dma channel\n");
rc = PTR_ERR(priv->rxchan);
- goto no_cpdma_chan;
+ goto err_free_txchan;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(&pdev->dev, "error getting irq res\n");
rc = -ENOENT;
- goto no_cpdma_chan;
+ goto err_free_rxchan;
}
ndev->irq = res->start;
@@ -1914,7 +1914,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, rc);
- goto no_cpdma_chan;
+ goto err_napi_del;
}
/* register the network device */
@@ -1924,7 +1924,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
pm_runtime_put(&pdev->dev);
- goto no_cpdma_chan;
+ goto err_napi_del;
}
@@ -1937,11 +1937,13 @@ static int davinci_emac_probe(struct platform_device *pdev)
return 0;
-no_cpdma_chan:
- if (priv->txchan)
- cpdma_chan_destroy(priv->txchan);
- if (priv->rxchan)
- cpdma_chan_destroy(priv->rxchan);
+err_napi_del:
+ netif_napi_del(&priv->napi);
+err_free_rxchan:
+ cpdma_chan_destroy(priv->rxchan);
+err_free_txchan:
+ cpdma_chan_destroy(priv->txchan);
+err_free_dma:
cpdma_ctlr_destroy(priv->dma);
no_pdata:
if (of_phy_is_fixed_link(np))
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 450eec264a5e..4377c26f714d 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -792,8 +792,10 @@ static int ipvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_CHANGEADDR:
- list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev);
+ }
break;
case NETDEV_PRE_TYPE_CHANGE:
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 6838129839ca..e757b09f1889 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
return rc;
/* make rcal=100, since rdb default is 000 */
- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10);
+ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
if (rc < 0)
return rc;
/* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10);
+ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
if (rc < 0)
return rc;
/* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00);
+ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
return 0;
}
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 5ad130c3da43..d5e0833d69b9 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
/* The register must be written to both the Shadow Register Select and
* the Shadow Read Register Selector
*/
- phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum |
+ phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
return phy_read(phydev, MII_BCM54XX_AUX_CTL);
}
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 7c73808cbbde..81cceaa412fe 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -14,11 +14,18 @@
#ifndef _LINUX_BCM_PHY_LIB_H
#define _LINUX_BCM_PHY_LIB_H
+#include <linux/brcmphy.h>
#include <linux/phy.h>
int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
+static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
+ u16 reg, u16 val)
+{
+ return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
+}
+
int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 29b1c88b55cc..01d2ff2f6241 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv {
static void r_rc_cal_reset(struct phy_device *phydev)
{
/* Reset R_CAL/RC_CAL Engine */
- bcm_phy_write_exp(phydev, 0x00b0, 0x0010);
+ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
/* Disable Reset R_AL/RC_CAL Engine */
- bcm_phy_write_exp(phydev, 0x00b0, 0x0000);
+ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
}
static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index f41b224a9cdb..ab195f0916d6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -573,9 +573,40 @@ static int ksz9031_config_init(struct phy_device *phydev)
ksz9031_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
tx_data_skews, 4);
+
+ /* Silicon Errata Sheet (DS80000691D or DS80000692D):
+ * When the device links in the 1000BASE-T slave mode only,
+ * the optional 125MHz reference output clock (CLK125_NDO)
+ * has wide duty cycle variation.
+ *
+ * The optional CLK125_NDO clock does not meet the RGMII
+ * 45/55 percent (min/max) duty cycle requirement and therefore
+ * cannot be used directly by the MAC side for clocking
+ * applications that have setup/hold time requirements on
+ * rising and falling clock edges.
+ *
+ * Workaround:
+ * Force the phy to be the master to receive a stable clock
+ * which meets the duty cycle requirement.
+ */
+ if (of_property_read_bool(of_node, "micrel,force-master")) {
+ result = phy_read(phydev, MII_CTRL1000);
+ if (result < 0)
+ goto err_force_master;
+
+ /* enable master mode, config & prefer master */
+ result |= CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER;
+ result = phy_write(phydev, MII_CTRL1000, result);
+ if (result < 0)
+ goto err_force_master;
+ }
}
return ksz9031_center_flp_timing(phydev);
+
+err_force_master:
+ phydev_err(phydev, "failed to force the phy to master mode\n");
+ return result;
}
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index dc7c7ec43202..02ad03a2fab7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -605,30 +605,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cmd == PPPIOCDETACH) {
/*
- * We have to be careful here... if the file descriptor
- * has been dup'd, we could have another process in the
- * middle of a poll using the same file *, so we had
- * better not free the interface data structures -
- * instead we fail the ioctl. Even in this case, we
- * shut down the interface if we are the owner of it.
- * Actually, we should get rid of PPPIOCDETACH, userland
- * (i.e. pppd) could achieve the same effect by closing
- * this fd and reopening /dev/ppp.
+ * PPPIOCDETACH is no longer supported as it was heavily broken,
+ * and is only known to have been used by pppd older than
+ * ppp-2.4.2 (released November 2003).
*/
+ pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
+ current->comm, current->pid);
err = -EINVAL;
- if (pf->kind == INTERFACE) {
- ppp = PF_TO_PPP(pf);
- rtnl_lock();
- if (file == ppp->owner)
- unregister_netdevice(ppp->dev);
- rtnl_unlock();
- }
- if (atomic_long_read(&file->f_count) < 2) {
- ppp_release(NULL, file);
- err = 0;
- } else
- pr_warn("PPPIOCDETACH file->f_count=%ld\n",
- atomic_long_read(&file->f_count));
goto out;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ef33950a45d9..23e9eb66197f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -681,15 +681,6 @@ static void tun_queue_purge(struct tun_file *tfile)
skb_queue_purge(&tfile->sk.sk_error_queue);
}
-static void tun_cleanup_tx_ring(struct tun_file *tfile)
-{
- if (tfile->tx_ring.queue) {
- ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
- xdp_rxq_info_unreg(&tfile->xdp_rxq);
- memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
- }
-}
-
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
@@ -736,7 +727,9 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
- tun_cleanup_tx_ring(tfile);
+ if (tun)
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
+ ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
sock_put(&tfile->sk);
}
}
@@ -783,14 +776,14 @@ static void tun_detach_all(struct net_device *dev)
tun_napi_del(tun, tfile);
/* Drop read queue */
tun_queue_purge(tfile);
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
sock_put(&tfile->sk);
- tun_cleanup_tx_ring(tfile);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
tun_queue_purge(tfile);
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
sock_put(&tfile->sk);
- tun_cleanup_tx_ring(tfile);
}
BUG_ON(tun->numdisabled != 0);
@@ -834,7 +827,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
}
if (!tfile->detached &&
- ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
+ ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
+ GFP_KERNEL, tun_ptr_free)) {
err = -ENOMEM;
goto out;
}
@@ -1429,6 +1423,13 @@ static void tun_net_init(struct net_device *dev)
dev->max_mtu = MAX_MTU - dev->hard_header_len;
}
+static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
+{
+ struct sock *sk = tfile->socket.sk;
+
+ return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
+}
+
/* Character device part */
/* Poll */
@@ -1451,10 +1452,14 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
if (!ptr_ring_empty(&tfile->tx_ring))
mask |= EPOLLIN | EPOLLRDNORM;
- if (tun->dev->flags & IFF_UP &&
- (sock_writeable(sk) ||
- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
- sock_writeable(sk))))
+ /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
+ * guarantee EPOLLOUT to be raised by either here or
+ * tun_sock_write_space(). Then process could get notification
+ * after it writes to a down device and meets -EIO.
+ */
+ if (tun_sock_writeable(tun, tfile) ||
+ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+ tun_sock_writeable(tun, tfile)))
mask |= EPOLLOUT | EPOLLWRNORM;
if (tun->dev->reg_state != NETREG_REGISTERED)
@@ -1645,7 +1650,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
else
*skb_xdp = 0;
- preempt_disable();
+ local_bh_disable();
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog && !*skb_xdp) {
@@ -1670,7 +1675,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (err)
goto err_redirect;
rcu_read_unlock();
- preempt_enable();
+ local_bh_enable();
return NULL;
case XDP_TX:
get_page(alloc_frag->page);
@@ -1679,7 +1684,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
goto err_redirect;
tun_xdp_flush(tun->dev);
rcu_read_unlock();
- preempt_enable();
+ local_bh_enable();
return NULL;
case XDP_PASS:
delta = orig_data - xdp.data;
@@ -1698,7 +1703,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
skb = build_skb(buf, buflen);
if (!skb) {
rcu_read_unlock();
- preempt_enable();
+ local_bh_enable();
return ERR_PTR(-ENOMEM);
}
@@ -1708,7 +1713,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
alloc_frag->offset += buflen;
rcu_read_unlock();
- preempt_enable();
+ local_bh_enable();
return skb;
@@ -1716,7 +1721,7 @@ err_redirect:
put_page(alloc_frag->page);
err_xdp:
rcu_read_unlock();
- preempt_enable();
+ local_bh_enable();
this_cpu_inc(tun->pcpu_stats->rx_dropped);
return NULL;
}
@@ -1912,16 +1917,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
struct bpf_prog *xdp_prog;
int ret;
+ local_bh_disable();
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
ret = do_xdp_generic(xdp_prog, skb);
if (ret != XDP_PASS) {
rcu_read_unlock();
+ local_bh_enable();
return total_len;
}
}
rcu_read_unlock();
+ local_bh_enable();
}
rcu_read_lock();
@@ -3219,6 +3227,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
&tun_proto, 0);
if (!tfile)
return -ENOMEM;
+ if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
+ sk_free(&tfile->sk);
+ return -ENOMEM;
+ }
+
RCU_INIT_POINTER(tfile->tun, NULL);
tfile->flags = 0;
tfile->ifindex = 0;
@@ -3239,8 +3252,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
- memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
-
return 0;
}
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 7220cd620717..0362acd5cdca 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = {
*/
static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = {
.description = "CDC MBIM",
- .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
.bind = cdc_mbim_bind,
.unbind = cdc_mbim_unbind,
.manage_power = cdc_mbim_manage_power,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 42565dd33aa6..094680871687 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1103,6 +1103,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
+ {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 770422e953f7..032e1ac10a30 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -707,6 +707,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
void *data;
u32 act;
+ /* Transient failure which in theory could occur if
+ * in-flight packets from before XDP was enabled reach
+ * the receive path after XDP is loaded.
+ */
+ if (unlikely(hdr->hdr.gso_type))
+ goto err_xdp;
+
/* This happens when rx buffer size is underestimated
* or headroom is not enough because of the buffer
* was refilled before XDP is set. This should only
@@ -727,14 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp_page = page;
}
- /* Transient failure which in theory could occur if
- * in-flight packets from before XDP was enabled reach
- * the receive path after XDP is loaded. In practice I
- * was not able to create this condition.
- */
- if (unlikely(hdr->hdr.gso_type))
- goto err_xdp;
-
/* Allow consuming headroom but reserve enough space to push
* the descriptor on if we get an XDP_TX return code.
*/
@@ -775,7 +774,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
*xdp_xmit = true;
if (unlikely(xdp_page != page))
- goto err_xdp;
+ put_page(page);
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
@@ -787,7 +786,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
*xdp_xmit = true;
if (unlikely(xdp_page != page))
- goto err_xdp;
+ put_page(page);
rcu_read_unlock();
goto xdp_xmit;
default:
@@ -875,7 +874,7 @@ err_xdp:
rcu_read_unlock();
err_skb:
put_page(page);
- while (--num_buf) {
+ while (num_buf-- > 1) {
buf = virtqueue_get_buf(rq->vq, &len);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9ebe2a689966..27a9bb8c9611 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
+ /* Prevent any &gdesc->tcd field from being (speculatively)
+ * read before (&gdesc->tcd)->gen is read.
+ */
+ dma_rmb();
+
completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
&gdesc->tcd), tq, adapter->pdev,
adapter);
@@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
gdesc->txd.tci = skb_vlan_tag_get(skb);
}
+ /* Ensure that the write to (&gdesc->txd)->gen will be observed after
+ * all other writes to &gdesc->txd.
+ */
+ dma_wmb();
+
/* finally flips the GEN bit of the SOP desc. */
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
VMXNET3_TXD_GEN);
@@ -1298,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
*/
break;
}
+
+ /* Prevent any rcd field from being (speculatively) read before
+ * rcd->gen is read.
+ */
+ dma_rmb();
+
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
rcd->rqID != rq->dataRingQid);
idx = rcd->rxdIdx;
@@ -1528,6 +1544,12 @@ rcd_done:
ring->next2comp = idx;
num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
ring = rq->rx_ring + ring_idx;
+
+ /* Ensure that the writes to rxd->gen bits will be observed
+ * after all other writes to rxd objects.
+ */
+ dma_wmb();
+
while (num_to_alloc) {
vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
&rxCmdDesc);
@@ -2688,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
/* ==================== initialization and cleanup routines ============ */
static int
-vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
+vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
{
int err;
unsigned long mmio_start, mmio_len;
@@ -2700,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
return err;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
- dev_err(&pdev->dev,
- "pci_set_consistent_dma_mask failed\n");
- err = -EIO;
- goto err_set_mask;
- }
- *dma64 = true;
- } else {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- dev_err(&pdev->dev,
- "pci_set_dma_mask failed\n");
- err = -EIO;
- goto err_set_mask;
- }
- *dma64 = false;
- }
-
err = pci_request_selected_regions(pdev, (1 << 2) - 1,
vmxnet3_driver_name);
if (err) {
dev_err(&pdev->dev,
"Failed to request region for adapter: error %d\n", err);
- goto err_set_mask;
+ goto err_enable_device;
}
pci_set_master(pdev);
@@ -2751,7 +2755,7 @@ err_bar1:
iounmap(adapter->hw_addr0);
err_ioremap:
pci_release_selected_regions(pdev, (1 << 2) - 1);
-err_set_mask:
+err_enable_device:
pci_disable_device(pdev);
return err;
}
@@ -3254,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
#endif
};
int err;
- bool dma64 = false; /* stupid gcc */
+ bool dma64;
u32 ver;
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
@@ -3300,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+ dev_err(&pdev->dev,
+ "pci_set_consistent_dma_mask failed\n");
+ err = -EIO;
+ goto err_set_mask;
+ }
+ dma64 = true;
+ } else {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&pdev->dev,
+ "pci_set_dma_mask failed\n");
+ err = -EIO;
+ goto err_set_mask;
+ }
+ dma64 = false;
+ }
+
spin_lock_init(&adapter->cmd_lock);
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
sizeof(struct vmxnet3_adapter),
@@ -3307,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
dev_err(&pdev->dev, "Failed to map dma\n");
err = -EFAULT;
- goto err_dma_map;
+ goto err_set_mask;
}
adapter->shared = dma_alloc_coherent(
&adapter->pdev->dev,
@@ -3358,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
}
#endif /* VMXNET3_RSS */
- err = vmxnet3_alloc_pci_resources(adapter, &dma64);
+ err = vmxnet3_alloc_pci_resources(adapter);
if (err < 0)
goto err_alloc_pci;
@@ -3504,7 +3526,7 @@ err_alloc_queue_desc:
err_alloc_shared:
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
-err_dma_map:
+err_set_mask:
free_netdev(netdev);
return err;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index a3326463b71f..a2c554f8a61b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,12 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k"
-/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00
+/* Each byte of this 32-bit integer encodes a version number in
+ * VMXNET3_DRIVER_VERSION_STRING.
+ */
+#define VMXNET3_DRIVER_VERSION_NUM 0x01041000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f8a0234d332c..5517ea4c2aa0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1590,14 +1590,13 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int max_irqs, num_irqs, i, ret, nr_online_cpus;
+ int max_irqs, num_irqs, i, ret;
u16 pci_cmd;
if (!trans->cfg->mq_rx_supported)
goto enable_msi;
- nr_online_cpus = num_online_cpus();
- max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
+ max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
for (i = 0; i < max_irqs; i++)
trans_pcie->msix_entries[i].entry = i;
@@ -1623,16 +1622,17 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
* Two interrupts less: non rx causes shared with FBQ and RSS.
* More than two interrupts: we will use fewer RSS queues.
*/
- if (num_irqs <= nr_online_cpus) {
+ if (num_irqs <= max_irqs - 2) {
trans_pcie->trans->num_rx_queues = num_irqs + 1;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
IWL_SHARED_IRQ_FIRST_RSS;
- } else if (num_irqs == nr_online_cpus + 1) {
+ } else if (num_irqs == max_irqs - 1) {
trans_pcie->trans->num_rx_queues = num_irqs;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
} else {
trans_pcie->trans->num_rx_queues = num_irqs - 1;
}
+ WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
trans_pcie->alloc_vecs = num_irqs;
trans_pcie->msix_enabled = true;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4a017a0d71ea..920c23e542a5 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3340,7 +3340,7 @@ out_err:
static int hwsim_dump_radio_nl(struct sk_buff *skb,
struct netlink_callback *cb)
{
- int last_idx = cb->args[0];
+ int last_idx = cb->args[0] - 1;
struct mac80211_hwsim_data *data = NULL;
int res = 0;
void *hdr;
@@ -3368,7 +3368,7 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
last_idx = data->idx;
}
- cb->args[0] = last_idx;
+ cb->args[0] = last_idx + 1;
/* list changed, but no new element sent, set interrupted flag */
if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index a6884e73d2ab..7ddee980048b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
/*
* Determine IFS values
- * - Use TXOP_BACKOFF for probe and management frames except beacons
+ * - Use TXOP_BACKOFF for management frames except beacons
* - Use TXOP_SIFS for fragment bursts
* - Use TXOP_HTTXOP for everything else
*
* Note: rt2800 devices won't use CTS protection (if used)
* for frames not transmitted with TXOP_HTTXOP
*/
- if ((ieee80211_is_mgmt(hdr->frame_control) &&
- !ieee80211_is_beacon(hdr->frame_control)) ||
- (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
+ if (ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_beacon(hdr->frame_control))
txdesc->u.ht.txop = TXOP_BACKOFF;
else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
txdesc->u.ht.txop = TXOP_SIFS;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 99b857e5a7a9..04a20da76786 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -101,6 +101,15 @@ static void nvme_ns_remove(struct nvme_ns *ns);
static int nvme_revalidate_disk(struct gendisk *disk);
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
+static void nvme_queue_scan(struct nvme_ctrl *ctrl)
+{
+ /*
+ * Only new queue scan work when admin and IO queues are both alive
+ */
+ if (ctrl->state == NVME_CTRL_LIVE)
+ queue_work(nvme_wq, &ctrl->scan_work);
+}
+
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
@@ -244,9 +253,6 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
void nvme_cancel_request(struct request *req, void *data, bool reserved)
{
- if (!blk_mq_request_started(req))
- return;
-
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
"Cancelling I/O %d", req->tag);
@@ -1033,6 +1039,21 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
}
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
+#define NVME_AEN_SUPPORTED \
+ (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT)
+
+static void nvme_enable_aen(struct nvme_ctrl *ctrl)
+{
+ u32 result;
+ int status;
+
+ status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT,
+ ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result);
+ if (status)
+ dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
+ ctrl->oaes & NVME_AEN_SUPPORTED);
+}
+
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
struct nvme_user_io io;
@@ -1351,13 +1372,19 @@ static void nvme_set_chunk_size(struct nvme_ns *ns)
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
}
-static void nvme_config_discard(struct nvme_ctrl *ctrl,
- unsigned stream_alignment, struct request_queue *queue)
+static void nvme_config_discard(struct nvme_ns *ns)
{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ struct request_queue *queue = ns->queue;
u32 size = queue_logical_block_size(queue);
- if (stream_alignment)
- size *= stream_alignment;
+ if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
+ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
+ return;
+ }
+
+ if (ctrl->nr_streams && ns->sws && ns->sgs)
+ size *= ns->sws * ns->sgs;
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
@@ -1365,9 +1392,12 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl,
queue->limits.discard_alignment = 0;
queue->limits.discard_granularity = size;
+ /* If discard is already enabled, don't reset queue limits */
+ if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
+ return;
+
blk_queue_max_discard_sectors(queue, UINT_MAX);
blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, queue);
if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
@@ -1411,10 +1441,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
{
sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
unsigned short bs = 1 << ns->lba_shift;
- unsigned stream_alignment = 0;
-
- if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
- stream_alignment = ns->sws * ns->sgs;
blk_mq_freeze_queue(disk->queue);
blk_integrity_unregister(disk);
@@ -1428,10 +1454,9 @@ static void nvme_update_disk_info(struct gendisk *disk,
nvme_init_integrity(disk, ns->ms, ns->pi_type);
if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk))
capacity = 0;
- set_capacity(disk, capacity);
- if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
- nvme_config_discard(ns->ctrl, stream_alignment, disk->queue);
+ set_capacity(disk, capacity);
+ nvme_config_discard(ns);
blk_mq_unfreeze_queue(disk->queue);
}
@@ -1447,8 +1472,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->lba_shift == 0)
ns->lba_shift = 9;
ns->noiob = le16_to_cpu(id->noiob);
- ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
+ ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
/* the PI implementation requires metadata equal t10 pi tuple size */
if (ns->ms == sizeof(struct t10_pi_tuple))
ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
@@ -1577,7 +1602,7 @@ static int nvme_pr_reserve(struct block_device *bdev, u64 key,
static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
enum pr_type type, bool abort)
{
- u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
+ u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
}
@@ -1589,7 +1614,7 @@ static int nvme_pr_clear(struct block_device *bdev, u64 key)
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
- u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
+ u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
@@ -2183,7 +2208,8 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
* Verify that the subsystem actually supports multiple
* controllers, else bail out.
*/
- if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
+ if (!ctrl->opts->discovery_nqn &&
+ nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
dev_err(ctrl->device,
"ignoring ctrl due to duplicate subnqn (%s).\n",
found->subnqn);
@@ -2314,7 +2340,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
ret = nvme_get_effects_log(ctrl);
if (ret < 0)
- return ret;
+ goto out_free;
}
if (!ctrl->identified) {
@@ -2345,6 +2371,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->oacs = le16_to_cpu(id->oacs);
ctrl->oncs = le16_to_cpup(&id->oncs);
+ ctrl->oaes = le32_to_cpu(id->oaes);
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
ctrl->cntlid = le16_to_cpup(&id->cntlid);
@@ -3170,6 +3197,42 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
nvme_remove_invalid_namespaces(ctrl, nn);
}
+static bool nvme_scan_changed_ns_log(struct nvme_ctrl *ctrl)
+{
+ size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
+ __le32 *log;
+ int error, i;
+ bool ret = false;
+
+ log = kzalloc(log_size, GFP_KERNEL);
+ if (!log)
+ return false;
+
+ error = nvme_get_log(ctrl, NVME_LOG_CHANGED_NS, log, log_size);
+ if (error) {
+ dev_warn(ctrl->device,
+ "reading changed ns log failed: %d\n", error);
+ goto out_free_log;
+ }
+
+ if (log[0] == cpu_to_le32(0xffffffff))
+ goto out_free_log;
+
+ for (i = 0; i < NVME_MAX_CHANGED_NAMESPACES; i++) {
+ u32 nsid = le32_to_cpu(log[i]);
+
+ if (nsid == 0)
+ break;
+ dev_info(ctrl->device, "rescanning namespace %d.\n", nsid);
+ nvme_validate_ns(ctrl, nsid);
+ }
+ ret = true;
+
+out_free_log:
+ kfree(log);
+ return ret;
+}
+
static void nvme_scan_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl =
@@ -3182,6 +3245,12 @@ static void nvme_scan_work(struct work_struct *work)
WARN_ON_ONCE(!ctrl->tagset);
+ if (test_and_clear_bit(EVENT_NS_CHANGED, &ctrl->events)) {
+ if (nvme_scan_changed_ns_log(ctrl))
+ goto out_sort_namespaces;
+ dev_info(ctrl->device, "rescanning namespaces.\n");
+ }
+
if (nvme_identify_ctrl(ctrl, &id))
return;
@@ -3189,26 +3258,17 @@ static void nvme_scan_work(struct work_struct *work)
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
if (!nvme_scan_ns_list(ctrl, nn))
- goto done;
+ goto out_free_id;
}
nvme_scan_ns_sequential(ctrl, nn);
- done:
+out_free_id:
+ kfree(id);
+out_sort_namespaces:
down_write(&ctrl->namespaces_rwsem);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
up_write(&ctrl->namespaces_rwsem);
- kfree(id);
}
-void nvme_queue_scan(struct nvme_ctrl *ctrl)
-{
- /*
- * Only new queue scan work when admin and IO queues are both alive
- */
- if (ctrl->state == NVME_CTRL_LIVE)
- queue_work(nvme_wq, &ctrl->scan_work);
-}
-EXPORT_SYMBOL_GPL(nvme_queue_scan);
-
/*
* This function iterates the namespace list unlocked to allow recovery from
* controller failure. It is up to the caller to ensure the namespace list is
@@ -3322,8 +3382,23 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_get_fw_slot_info(ctrl);
}
+static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+{
+ switch ((result & 0xff00) >> 8) {
+ case NVME_AER_NOTICE_NS_CHANGED:
+ set_bit(EVENT_NS_CHANGED, &ctrl->events);
+ nvme_queue_scan(ctrl);
+ break;
+ case NVME_AER_NOTICE_FW_ACT_STARTING:
+ queue_work(nvme_wq, &ctrl->fw_act_work);
+ break;
+ default:
+ dev_warn(ctrl->device, "async event result %08x\n", result);
+ }
+}
+
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
- union nvme_result *res)
+ volatile union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
@@ -3331,6 +3406,9 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
return;
switch (result & 0x7) {
+ case NVME_AER_NOTICE:
+ nvme_handle_aen_notice(ctrl, result);
+ break;
case NVME_AER_ERROR:
case NVME_AER_SMART:
case NVME_AER_CSS:
@@ -3340,18 +3418,6 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
default:
break;
}
-
- switch (result & 0xff07) {
- case NVME_AER_NOTICE_NS_CHANGED:
- dev_info(ctrl->device, "rescanning\n");
- nvme_queue_scan(ctrl);
- break;
- case NVME_AER_NOTICE_FW_ACT_STARTING:
- queue_work(nvme_wq, &ctrl->fw_act_work);
- break;
- default:
- dev_warn(ctrl->device, "async event result %08x\n", result);
- }
queue_work(nvme_wq, &ctrl->async_event_work);
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
@@ -3374,6 +3440,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
+ nvme_enable_aen(ctrl);
queue_work(nvme_wq, &ctrl->async_event_work);
nvme_start_queues(ctrl);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 7ae732a77fe8..5f5f7067c41d 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -57,7 +57,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
goto out_unlock;
kref_init(&host->ref);
- memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
+ strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
list_add_tail(&host->list, &nvmf_hosts);
out_unlock:
@@ -545,71 +545,54 @@ blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
return BLK_STS_OK;
switch (ctrl->state) {
- case NVME_CTRL_DELETING:
- goto reject_io;
-
case NVME_CTRL_NEW:
case NVME_CTRL_CONNECTING:
+ case NVME_CTRL_DELETING:
+ /*
+ * This is the case of starting a new or deleting an association
+ * but connectivity was lost before it was fully created or torn
+ * down. We need to error the commands used to initialize the
+ * controller so the reconnect can go into a retry attempt. The
+ * commands should all be marked REQ_FAILFAST_DRIVER, which will
+ * hit the reject path below. Anything else will be queued while
+ * the state settles.
+ */
if (!is_connected)
- /*
- * This is the case of starting a new
- * association but connectivity was lost
- * before it was fully created. We need to
- * error the commands used to initialize the
- * controller so the reconnect can go into a
- * retry attempt. The commands should all be
- * marked REQ_FAILFAST_DRIVER, which will hit
- * the reject path below. Anything else will
- * be queued while the state settles.
- */
- goto reject_or_queue_io;
-
- if ((queue_live &&
- !(nvme_req(rq)->flags & NVME_REQ_USERCMD)) ||
- (!queue_live && blk_rq_is_passthrough(rq) &&
- cmd->common.opcode == nvme_fabrics_command &&
- cmd->fabrics.fctype == nvme_fabrics_type_connect))
- /*
- * If queue is live, allow only commands that
- * are internally generated pass through. These
- * are commands on the admin queue to initialize
- * the controller. This will reject any ioctl
- * admin cmds received while initializing.
- *
- * If the queue is not live, allow only a
- * connect command. This will reject any ioctl
- * admin cmd as well as initialization commands
- * if the controller reverted the queue to non-live.
- */
+ break;
+
+ /*
+ * If queue is live, allow only commands that are internally
+ * generated pass through. These are commands on the admin
+ * queue to initialize the controller. This will reject any
+ * ioctl admin cmds received while initializing.
+ */
+ if (queue_live && !(nvme_req(rq)->flags & NVME_REQ_USERCMD))
return BLK_STS_OK;
/*
- * fall-thru to the reject_or_queue_io clause
+ * If the queue is not live, allow only a connect command. This
+ * will reject any ioctl admin cmd as well as initialization
+ * commands if the controller reverted the queue to non-live.
*/
+ if (!queue_live && blk_rq_is_passthrough(rq) &&
+ cmd->common.opcode == nvme_fabrics_command &&
+ cmd->fabrics.fctype == nvme_fabrics_type_connect)
+ return BLK_STS_OK;
break;
-
- /* these cases fall-thru
- * case NVME_CTRL_LIVE:
- * case NVME_CTRL_RESETTING:
- */
default:
break;
}
-reject_or_queue_io:
/*
- * Any other new io is something we're not in a state to send
- * to the device. Default action is to busy it and retry it
- * after the controller state is recovered. However, anything
- * marked for failfast or nvme multipath is immediately failed.
- * Note: commands used to initialize the controller will be
- * marked for failfast.
+ * Any other new io is something we're not in a state to send to the
+ * device. Default action is to busy it and retry it after the
+ * controller state is recovered. However, anything marked for failfast
+ * or nvme multipath is immediately failed. Note: commands used to
+ * initialize the controller will be marked for failfast.
* Note: nvme cli/ioctl commands are marked for failfast.
*/
if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
-
-reject_io:
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR;
}
@@ -689,10 +672,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->discovery_nqn =
!(strcmp(opts->subsysnqn,
NVME_DISC_SUBSYS_NAME));
- if (opts->discovery_nqn) {
- opts->kato = 0;
- opts->nr_io_queues = 0;
- }
break;
case NVMF_OPT_TRADDR:
p = match_strdup(args);
@@ -851,6 +830,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
}
+ if (opts->discovery_nqn) {
+ opts->kato = 0;
+ opts->nr_io_queues = 0;
+ opts->duplicate_connect = true;
+ }
if (ctrl_loss_tmo < 0)
opts->max_reconnects = -1;
else
@@ -983,16 +967,6 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_module_put;
}
- if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
- dev_warn(ctrl->device,
- "controller returned incorrect NQN: \"%s\".\n",
- ctrl->subsys->subnqn);
- module_put(ops->module);
- up_read(&nvmf_transports_rwsem);
- nvme_delete_ctrl_sync(ctrl);
- return ERR_PTR(-EINVAL);
- }
-
module_put(ops->module);
up_read(&nvmf_transports_rwsem);
return ctrl;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index ef46c915b7b5..0cf0460a5c92 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -139,7 +139,9 @@ static inline bool
nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts)
{
- if (strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
+ if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DEAD ||
+ strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t)))
return false;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 6cb26bcf6ec0..0bad65803271 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1686,16 +1686,6 @@ done:
goto check_error;
}
- /*
- * Force failures of commands if we're killing the controller
- * or have an error on a command used to create an new association
- */
- if (status &&
- (blk_queue_dying(rq->q) ||
- ctrl->ctrl.state == NVME_CTRL_NEW ||
- ctrl->ctrl.state == NVME_CTRL_CONNECTING))
- status |= cpu_to_le16(NVME_SC_DNR << 1);
-
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
nvme_end_request(rq, status, result);
@@ -2403,9 +2393,6 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
- if (!blk_mq_request_started(req))
- return;
-
__nvme_fc_abort_op(ctrl, op);
}
@@ -3284,6 +3271,8 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
}
spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ pr_warn("%s: %s - %s combination not found\n",
+ __func__, opts->traddr, opts->host_traddr);
return ERR_PTR(-ENOENT);
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 17d2f7cf3fed..de24fe77c80b 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -22,6 +22,7 @@
#include <linux/lightnvm.h>
#include <linux/sed-opal.h>
#include <linux/fault-inject.h>
+#include <linux/rcupdate.h>
extern unsigned int nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
@@ -180,6 +181,7 @@ struct nvme_ctrl {
u16 kas;
u8 npss;
u8 apsta;
+ u32 oaes;
u32 aen_result;
unsigned int shutdown_timeout;
unsigned int kato;
@@ -192,6 +194,8 @@ struct nvme_ctrl {
struct delayed_work ka_work;
struct nvme_command ka_cmd;
struct work_struct fw_act_work;
+#define EVENT_NS_CHANGED (1 << 0)
+ unsigned long events;
/* Power saving configuration */
u64 ps_max_latency_us;
@@ -398,14 +402,13 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_identify(struct nvme_ctrl *ctrl);
-void nvme_queue_scan(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
bool send);
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
- union nvme_result *res);
+ volatile union nvme_result *res);
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
@@ -454,7 +457,7 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
- if (head && ns == srcu_dereference(head->current_path, &head->srcu))
+ if (head && ns == rcu_access_pointer(head->current_path))
rcu_assign_pointer(head->current_path, NULL);
}
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 17a0190bd88f..e526437bacbf 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -13,6 +13,7 @@
*/
#include <linux/aer.h>
+#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
@@ -68,7 +69,6 @@ MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
struct nvme_dev;
struct nvme_queue;
-static void nvme_process_cq(struct nvme_queue *nvmeq);
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
/*
@@ -147,9 +147,10 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
struct nvme_queue {
struct device *q_dmadev;
struct nvme_dev *dev;
- spinlock_t q_lock;
+ spinlock_t sq_lock;
struct nvme_command *sq_cmds;
struct nvme_command __iomem *sq_cmds_io;
+ spinlock_t cq_lock ____cacheline_aligned_in_smp;
volatile struct nvme_completion *cqes;
struct blk_mq_tags **tags;
dma_addr_t sq_dma_addr;
@@ -159,9 +160,9 @@ struct nvme_queue {
s16 cq_vector;
u16 sq_tail;
u16 cq_head;
+ u16 last_cq_head;
u16 qid;
u8 cq_phase;
- u8 cqe_seen;
u32 *dbbuf_sq_db;
u32 *dbbuf_cq_db;
u32 *dbbuf_sq_ei;
@@ -420,28 +421,25 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
}
/**
- * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
* @nvmeq: The queue to use
* @cmd: The command to send
- *
- * Safe to use from interrupt context
*/
-static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
- struct nvme_command *cmd)
+static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
- u16 tail = nvmeq->sq_tail;
-
+ spin_lock(&nvmeq->sq_lock);
if (nvmeq->sq_cmds_io)
- memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
+ memcpy_toio(&nvmeq->sq_cmds_io[nvmeq->sq_tail], cmd,
+ sizeof(*cmd));
else
- memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+ memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
- if (++tail == nvmeq->q_depth)
- tail = 0;
- if (nvme_dbbuf_update_and_check_event(tail, nvmeq->dbbuf_sq_db,
- nvmeq->dbbuf_sq_ei))
- writel(tail, nvmeq->q_db);
- nvmeq->sq_tail = tail;
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
+ nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+ spin_unlock(&nvmeq->sq_lock);
}
static void **nvme_pci_iod_list(struct request *req)
@@ -872,6 +870,13 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_command cmnd;
blk_status_t ret;
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(nvmeq->cq_vector < 0))
+ return BLK_STS_IOERR;
+
ret = nvme_setup_cmd(ns, req, &cmnd);
if (ret)
return ret;
@@ -887,16 +892,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
blk_mq_start_request(req);
-
- spin_lock_irq(&nvmeq->q_lock);
- if (unlikely(nvmeq->cq_vector < 0)) {
- ret = BLK_STS_IOERR;
- spin_unlock_irq(&nvmeq->q_lock);
- goto out_cleanup_iod;
- }
- __nvme_submit_cmd(nvmeq, &cmnd);
- nvme_process_cq(nvmeq);
- spin_unlock_irq(&nvmeq->q_lock);
+ nvme_submit_cmd(nvmeq, &cmnd);
return BLK_STS_OK;
out_cleanup_iod:
nvme_free_iod(dev, req);
@@ -914,10 +910,10 @@ static void nvme_pci_complete_rq(struct request *req)
}
/* We read the CQE phase first to check if the rest of the entry is valid */
-static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
- u16 phase)
+static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
{
- return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
+ return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
+ nvmeq->cq_phase;
}
static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
@@ -931,9 +927,9 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
}
}
-static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
- struct nvme_completion *cqe)
+static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
+ volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
struct request *req;
if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
@@ -956,83 +952,87 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
return;
}
- nvmeq->cqe_seen = 1;
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
nvme_end_request(req, cqe->status, cqe->result);
}
-static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
- struct nvme_completion *cqe)
+static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
{
- if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
- *cqe = nvmeq->cqes[nvmeq->cq_head];
+ while (start != end) {
+ nvme_handle_cqe(nvmeq, start);
+ if (++start == nvmeq->q_depth)
+ start = 0;
+ }
+}
- if (++nvmeq->cq_head == nvmeq->q_depth) {
- nvmeq->cq_head = 0;
- nvmeq->cq_phase = !nvmeq->cq_phase;
- }
- return true;
+static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
+{
+ if (++nvmeq->cq_head == nvmeq->q_depth) {
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = !nvmeq->cq_phase;
}
- return false;
}
-static void nvme_process_cq(struct nvme_queue *nvmeq)
+static inline bool nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
+ u16 *end, int tag)
{
- struct nvme_completion cqe;
- int consumed = 0;
+ bool found = false;
- while (nvme_read_cqe(nvmeq, &cqe)) {
- nvme_handle_cqe(nvmeq, &cqe);
- consumed++;
+ *start = nvmeq->cq_head;
+ while (!found && nvme_cqe_pending(nvmeq)) {
+ if (nvmeq->cqes[nvmeq->cq_head].command_id == tag)
+ found = true;
+ nvme_update_cq_head(nvmeq);
}
+ *end = nvmeq->cq_head;
- if (consumed)
+ if (*start != *end)
nvme_ring_cq_doorbell(nvmeq);
+ return found;
}
static irqreturn_t nvme_irq(int irq, void *data)
{
- irqreturn_t result;
struct nvme_queue *nvmeq = data;
- spin_lock(&nvmeq->q_lock);
- nvme_process_cq(nvmeq);
- result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
- nvmeq->cqe_seen = 0;
- spin_unlock(&nvmeq->q_lock);
- return result;
+ irqreturn_t ret = IRQ_NONE;
+ u16 start, end;
+
+ spin_lock(&nvmeq->cq_lock);
+ if (nvmeq->cq_head != nvmeq->last_cq_head)
+ ret = IRQ_HANDLED;
+ nvme_process_cq(nvmeq, &start, &end, -1);
+ nvmeq->last_cq_head = nvmeq->cq_head;
+ spin_unlock(&nvmeq->cq_lock);
+
+ if (start != end) {
+ nvme_complete_cqes(nvmeq, start, end);
+ return IRQ_HANDLED;
+ }
+
+ return ret;
}
static irqreturn_t nvme_irq_check(int irq, void *data)
{
struct nvme_queue *nvmeq = data;
- if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
+ if (nvme_cqe_pending(nvmeq))
return IRQ_WAKE_THREAD;
return IRQ_NONE;
}
static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
{
- struct nvme_completion cqe;
- int found = 0, consumed = 0;
+ u16 start, end;
+ bool found;
- if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
+ if (!nvme_cqe_pending(nvmeq))
return 0;
- spin_lock_irq(&nvmeq->q_lock);
- while (nvme_read_cqe(nvmeq, &cqe)) {
- nvme_handle_cqe(nvmeq, &cqe);
- consumed++;
-
- if (tag == cqe.command_id) {
- found = 1;
- break;
- }
- }
-
- if (consumed)
- nvme_ring_cq_doorbell(nvmeq);
- spin_unlock_irq(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->cq_lock);
+ found = nvme_process_cq(nvmeq, &start, &end, tag);
+ spin_unlock_irq(&nvmeq->cq_lock);
+ nvme_complete_cqes(nvmeq, start, end);
return found;
}
@@ -1052,10 +1052,7 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
-
- spin_lock_irq(&nvmeq->q_lock);
- __nvme_submit_cmd(nvmeq, &c);
- spin_unlock_irq(&nvmeq->q_lock);
+ nvme_submit_cmd(nvmeq, &c);
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -1070,7 +1067,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
}
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
- struct nvme_queue *nvmeq)
+ struct nvme_queue *nvmeq, s16 vector)
{
struct nvme_command c;
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
@@ -1085,7 +1082,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
c.create_cq.cqid = cpu_to_le16(qid);
c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
c.create_cq.cq_flags = cpu_to_le16(flags);
- c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
+ c.create_cq.irq_vector = cpu_to_le16(vector);
return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
}
@@ -1208,7 +1205,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
nvme_warn_reset(dev, csts);
nvme_dev_disable(dev, false);
nvme_reset_ctrl(&dev->ctrl);
- return BLK_EH_HANDLED;
+ return BLK_EH_DONE;
}
/*
@@ -1218,24 +1215,24 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, completion polled\n",
req->tag, nvmeq->qid);
- return BLK_EH_HANDLED;
+ return BLK_EH_DONE;
}
/*
* Shutdown immediately if controller times out while starting. The
* reset work will see the pci device disabled when it gets the forced
* cancellation error. All outstanding requests are completed on
- * shutdown, so we return BLK_EH_HANDLED.
+ * shutdown, so we return BLK_EH_DONE.
*/
switch (dev->ctrl.state) {
case NVME_CTRL_CONNECTING:
case NVME_CTRL_RESETTING:
- dev_warn(dev->ctrl.device,
+ dev_warn_ratelimited(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
- return BLK_EH_HANDLED;
+ return BLK_EH_DONE;
default:
break;
}
@@ -1252,12 +1249,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
nvme_dev_disable(dev, false);
nvme_reset_ctrl(&dev->ctrl);
- /*
- * Mark the request as handled, since the inline shutdown
- * forces all outstanding requests to complete.
- */
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
- return BLK_EH_HANDLED;
+ return BLK_EH_DONE;
}
if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
@@ -1321,15 +1314,21 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
int vector;
- spin_lock_irq(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->cq_lock);
if (nvmeq->cq_vector == -1) {
- spin_unlock_irq(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->cq_lock);
return 1;
}
vector = nvmeq->cq_vector;
nvmeq->dev->online_queues--;
nvmeq->cq_vector = -1;
- spin_unlock_irq(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->cq_lock);
+
+ /*
+ * Ensure that nvme_queue_rq() sees it ->cq_vector == -1 without
+ * having to grab the lock.
+ */
+ mb();
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
@@ -1342,15 +1341,18 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
struct nvme_queue *nvmeq = &dev->queues[0];
+ u16 start, end;
if (shutdown)
nvme_shutdown_ctrl(&dev->ctrl);
else
nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
- spin_lock_irq(&nvmeq->q_lock);
- nvme_process_cq(nvmeq);
- spin_unlock_irq(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->cq_lock);
+ nvme_process_cq(nvmeq, &start, &end, -1);
+ spin_unlock_irq(&nvmeq->cq_lock);
+
+ nvme_complete_cqes(nvmeq, start, end);
}
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
@@ -1408,7 +1410,8 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
nvmeq->q_dmadev = dev->dev;
nvmeq->dev = dev;
- spin_lock_init(&nvmeq->q_lock);
+ spin_lock_init(&nvmeq->sq_lock);
+ spin_lock_init(&nvmeq->cq_lock);
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
@@ -1444,7 +1447,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
{
struct nvme_dev *dev = nvmeq->dev;
- spin_lock_irq(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->cq_lock);
nvmeq->sq_tail = 0;
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
@@ -1452,13 +1455,14 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
nvme_dbbuf_init(dev, nvmeq, qid);
dev->online_queues++;
- spin_unlock_irq(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->cq_lock);
}
static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
{
struct nvme_dev *dev = nvmeq->dev;
int result;
+ s16 vector;
if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
@@ -1471,15 +1475,21 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
* A queue's vector matches the queue identifier unless the controller
* has only one vector available.
*/
- nvmeq->cq_vector = dev->num_vecs == 1 ? 0 : qid;
- result = adapter_alloc_cq(dev, qid, nvmeq);
+ vector = dev->num_vecs == 1 ? 0 : qid;
+ result = adapter_alloc_cq(dev, qid, nvmeq, vector);
if (result < 0)
- goto release_vector;
+ goto out;
result = adapter_alloc_sq(dev, qid, nvmeq);
if (result < 0)
goto release_cq;
+ /*
+ * Set cq_vector after alloc cq/sq, otherwise nvme_suspend_queue will
+ * invoke free_irq for it and cause a 'Trying to free already-free IRQ
+ * xxx' warning if the create CQ/SQ command times out.
+ */
+ nvmeq->cq_vector = vector;
nvme_init_queue(nvmeq, qid);
result = queue_request_irq(nvmeq);
if (result < 0)
@@ -1487,13 +1497,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
return result;
- release_sq:
+release_sq:
+ nvmeq->cq_vector = -1;
dev->online_queues--;
adapter_delete_sq(dev, qid);
- release_cq:
+release_cq:
adapter_delete_cq(dev, qid);
- release_vector:
- nvmeq->cq_vector = -1;
+out:
return result;
}
@@ -1997,19 +2007,22 @@ static void nvme_del_queue_end(struct request *req, blk_status_t error)
static void nvme_del_cq_end(struct request *req, blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
+ u16 start, end;
if (!error) {
unsigned long flags;
/*
- * We might be called with the AQ q_lock held
- * and the I/O queue q_lock should always
+ * We might be called with the AQ cq_lock held
+ * and the I/O queue cq_lock should always
* nest inside the AQ one.
*/
- spin_lock_irqsave_nested(&nvmeq->q_lock, flags,
+ spin_lock_irqsave_nested(&nvmeq->cq_lock, flags,
SINGLE_DEPTH_NESTING);
- nvme_process_cq(nvmeq);
- spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+ nvme_process_cq(nvmeq, &start, &end, -1);
+ spin_unlock_irqrestore(&nvmeq->cq_lock, flags);
+
+ nvme_complete_cqes(nvmeq, start, end);
}
nvme_del_queue_end(req, error);
@@ -2497,6 +2510,15 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
return 0;
}
+static void nvme_async_probe(void *data, async_cookie_t cookie)
+{
+ struct nvme_dev *dev = data;
+
+ nvme_reset_ctrl_sync(&dev->ctrl);
+ flush_work(&dev->ctrl.scan_work);
+ nvme_put_ctrl(&dev->ctrl);
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int node, result = -ENOMEM;
@@ -2541,7 +2563,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
- nvme_reset_ctrl(&dev->ctrl);
+ nvme_get_ctrl(&dev->ctrl);
+ async_schedule(nvme_async_probe, dev);
return 0;
@@ -2685,6 +2708,9 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
static void nvme_error_resume(struct pci_dev *pdev)
{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ flush_work(&dev->ctrl.reset_work);
pci_cleanup_aer_uncorrect_error_status(pdev);
}
@@ -2714,6 +2740,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_MEDIUM_PRIO_SQ },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
+ { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
@@ -2728,6 +2756,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
.driver_data = NVME_QUIRK_LIGHTNVM, },
+ { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
+ .driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 1eb4438a8763..7b3f08410430 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -778,7 +778,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (error) {
dev_err(ctrl->ctrl.device,
"prop_get NVME_REG_CAP failed\n");
- goto out_cleanup_queue;
+ goto out_stop_queue;
}
ctrl->ctrl.sqsize =
@@ -786,23 +786,25 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
if (error)
- goto out_cleanup_queue;
+ goto out_stop_queue;
ctrl->ctrl.max_hw_sectors =
(ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
error = nvme_init_identify(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_stop_queue;
error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
&ctrl->async_event_sqe, sizeof(struct nvme_command),
DMA_TO_DEVICE);
if (error)
- goto out_cleanup_queue;
+ goto out_stop_queue;
return 0;
+out_stop_queue:
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
out_cleanup_queue:
if (new)
blk_cleanup_queue(ctrl->ctrl.admin_q);
@@ -1598,7 +1600,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
/* fail with DNR on cmd timeout */
nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
- return BLK_EH_HANDLED;
+ return BLK_EH_DONE;
}
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index ea91fccd1bc0..01390f0e1671 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -148,8 +148,8 @@ TRACE_EVENT(nvme_complete_rq,
__entry->flags = nvme_req(req)->flags;
__entry->status = nvme_req(req)->status;
),
- TP_printk("cmdid=%u, qid=%d, res=%llu, retries=%u, flags=0x%x, status=%u",
- __entry->cid, __entry->qid, __entry->result,
+ TP_printk("qid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u",
+ __entry->qid, __entry->cid, __entry->result,
__entry->retries, __entry->flags, __entry->status)
);
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index 488250189c99..8118c93391c6 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -6,8 +6,8 @@ obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
-nvmet-y += core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \
- discovery.o
+nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
+ discovery.o io-cmd-file.o io-cmd-bdev.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 5e0e9fcc0d4d..ead8fbe6922e 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -32,6 +32,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len;
}
+static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
+{
+ nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
+}
+
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
@@ -45,6 +50,10 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
return NVME_SC_INVALID_NS;
}
+ /* we don't have the right data for file backed ns */
+ if (!ns->bdev)
+ goto out;
+
host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
@@ -54,6 +63,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
put_unaligned_le64(host_writes, &slog->host_writes[0]);
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
+out:
nvmet_put_namespace(ns);
return NVME_SC_SUCCESS;
@@ -71,6 +81,9 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
rcu_read_lock();
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ /* we don't have the right data for file backed ns */
+ if (!ns->bdev)
+ continue;
host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
data_units_read +=
part_stat_read(ns->bdev->bd_part, sectors[READ]);
@@ -89,74 +102,50 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
return NVME_SC_SUCCESS;
}
-static u16 nvmet_get_smart_log(struct nvmet_req *req,
- struct nvme_smart_log *slog)
+static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
{
- u16 status;
+ struct nvme_smart_log *log;
+ u16 status = NVME_SC_INTERNAL;
+
+ if (req->data_len != sizeof(*log))
+ goto out;
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ goto out;
- WARN_ON(req == NULL || slog == NULL);
if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
- status = nvmet_get_smart_log_all(req, slog);
+ status = nvmet_get_smart_log_all(req, log);
else
- status = nvmet_get_smart_log_nsid(req, slog);
- return status;
+ status = nvmet_get_smart_log_nsid(req, log);
+ if (status)
+ goto out;
+
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+out:
+ nvmet_req_complete(req, status);
}
-static void nvmet_execute_get_log_page(struct nvmet_req *req)
+static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
{
- struct nvme_smart_log *smart_log;
- size_t data_len = nvmet_get_log_page_len(req->cmd);
- void *buf;
- u16 status = 0;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = NVME_SC_INTERNAL;
+ size_t len;
- buf = kzalloc(data_len, GFP_KERNEL);
- if (!buf) {
- status = NVME_SC_INTERNAL;
+ if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
goto out;
- }
- switch (req->cmd->get_log_page.lid) {
- case NVME_LOG_ERROR:
- /*
- * We currently never set the More bit in the status field,
- * so all error log entries are invalid and can be zeroed out.
- * This is called a minum viable implementation (TM) of this
- * mandatory log page.
- */
- break;
- case NVME_LOG_SMART:
- /*
- * XXX: fill out actual smart log
- *
- * We might have a hard time coming up with useful values for
- * many of the fields, and even when we have useful data
- * available (e.g. units or commands read/written) those aren't
- * persistent over power loss.
- */
- if (data_len != sizeof(*smart_log)) {
- status = NVME_SC_INTERNAL;
- goto err;
- }
- smart_log = buf;
- status = nvmet_get_smart_log(req, smart_log);
- if (status)
- goto err;
- break;
- case NVME_LOG_FW_SLOT:
- /*
- * We only support a single firmware slot which always is
- * active, so we can zero out the whole firmware slot log and
- * still claim to fully implement this mandatory log page.
- */
- break;
- default:
- BUG();
- }
-
- status = nvmet_copy_to_sgl(req, 0, buf, data_len);
-
-err:
- kfree(buf);
+ mutex_lock(&ctrl->lock);
+ if (ctrl->nr_changed_ns == U32_MAX)
+ len = sizeof(__le32);
+ else
+ len = ctrl->nr_changed_ns * sizeof(__le32);
+ status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
+ if (!status)
+ status = nvmet_zero_sgl(req, len, req->data_len - len);
+ ctrl->nr_changed_ns = 0;
+ clear_bit(NVME_AEN_CFG_NS_ATTR, &ctrl->aen_masked);
+ mutex_unlock(&ctrl->lock);
out:
nvmet_req_complete(req, status);
}
@@ -201,7 +190,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->ver = cpu_to_le32(ctrl->subsys->ver);
/* XXX: figure out what to do about RTD3R/RTD3 */
- id->oaes = cpu_to_le32(1 << 8);
+ id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
id->ctratt = cpu_to_le32(1 << 0);
id->oacs = 0;
@@ -447,6 +436,16 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
nvmet_set_result(req, req->sq->ctrl->kato);
break;
+ case NVME_FEAT_ASYNC_EVENT:
+ val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
+ if (val32 & ~NVMET_AEN_CFG_ALL) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
+ nvmet_set_result(req, val32);
+ break;
case NVME_FEAT_HOST_ID:
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
break;
@@ -485,9 +484,10 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
break;
case NVME_FEAT_WRITE_ATOMIC:
break;
+#endif
case NVME_FEAT_ASYNC_EVENT:
+ nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
break;
-#endif
case NVME_FEAT_VOLATILE_WC:
nvmet_set_result(req, 1);
break;
@@ -548,8 +548,6 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
struct nvme_command *cmd = req->cmd;
u16 ret;
- req->ns = NULL;
-
ret = nvmet_check_ctrl_status(req, cmd);
if (unlikely(ret))
return ret;
@@ -560,9 +558,28 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
switch (cmd->get_log_page.lid) {
case NVME_LOG_ERROR:
+ /*
+ * We currently never set the More bit in the status
+ * field, so all error log entries are invalid and can
+ * be zeroed out. This is called a minum viable
+ * implementation (TM) of this mandatory log page.
+ */
+ req->execute = nvmet_execute_get_log_page_noop;
+ return 0;
case NVME_LOG_SMART:
+ req->execute = nvmet_execute_get_log_page_smart;
+ return 0;
case NVME_LOG_FW_SLOT:
- req->execute = nvmet_execute_get_log_page;
+ /*
+ * We only support a single firmware slot which always
+ * is active, so we can zero out the whole firmware slot
+ * log and still claim to fully implement this mandatory
+ * log page.
+ */
+ req->execute = nvmet_execute_get_log_page_noop;
+ return 0;
+ case NVME_LOG_CHANGED_NS:
+ req->execute = nvmet_execute_get_log_changed_ns;
return 0;
}
break;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index e95424f172fd..a03da764ecae 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -57,6 +57,13 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
return 0;
}
+u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
+{
+ if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
+ return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ return 0;
+}
+
static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
{
struct nvmet_ns *ns;
@@ -137,6 +144,51 @@ static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
schedule_work(&ctrl->async_event_work);
}
+static bool nvmet_aen_disabled(struct nvmet_ctrl *ctrl, u32 aen)
+{
+ if (!(READ_ONCE(ctrl->aen_enabled) & aen))
+ return true;
+ return test_and_set_bit(aen, &ctrl->aen_masked);
+}
+
+static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
+{
+ u32 i;
+
+ mutex_lock(&ctrl->lock);
+ if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
+ goto out_unlock;
+
+ for (i = 0; i < ctrl->nr_changed_ns; i++) {
+ if (ctrl->changed_ns_list[i] == nsid)
+ goto out_unlock;
+ }
+
+ if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
+ ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
+ ctrl->nr_changed_ns = U32_MAX;
+ goto out_unlock;
+ }
+
+ ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
+out_unlock:
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
+{
+ struct nvmet_ctrl *ctrl;
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
+ if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_NS_ATTR))
+ continue;
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ NVME_AER_NOTICE_NS_CHANGED,
+ NVME_LOG_CHANGED_NS);
+ }
+}
+
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
{
int ret = 0;
@@ -271,33 +323,31 @@ void nvmet_put_namespace(struct nvmet_ns *ns)
percpu_ref_put(&ns->ref);
}
+static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
+{
+ nvmet_bdev_ns_disable(ns);
+ nvmet_file_ns_disable(ns);
+}
+
int nvmet_ns_enable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
- struct nvmet_ctrl *ctrl;
int ret = 0;
mutex_lock(&subsys->lock);
if (ns->enabled)
goto out_unlock;
- ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
- NULL);
- if (IS_ERR(ns->bdev)) {
- pr_err("failed to open block device %s: (%ld)\n",
- ns->device_path, PTR_ERR(ns->bdev));
- ret = PTR_ERR(ns->bdev);
- ns->bdev = NULL;
+ ret = nvmet_bdev_ns_enable(ns);
+ if (ret)
+ ret = nvmet_file_ns_enable(ns);
+ if (ret)
goto out_unlock;
- }
-
- ns->size = i_size_read(ns->bdev->bd_inode);
- ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
0, GFP_KERNEL);
if (ret)
- goto out_blkdev_put;
+ goto out_dev_put;
if (ns->nsid > subsys->max_nsid)
subsys->max_nsid = ns->nsid;
@@ -320,24 +370,20 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_add_tail_rcu(&ns->dev_link, &old->dev_link);
}
- list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
-
+ nvmet_ns_changed(subsys, ns->nsid);
ns->enabled = true;
ret = 0;
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
-out_blkdev_put:
- blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
- ns->bdev = NULL;
+out_dev_put:
+ nvmet_ns_dev_disable(ns);
goto out_unlock;
}
void nvmet_ns_disable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
- struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock);
if (!ns->enabled)
@@ -363,11 +409,8 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
percpu_ref_exit(&ns->ref);
mutex_lock(&subsys->lock);
- list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
-
- if (ns->bdev)
- blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
+ nvmet_ns_changed(subsys, ns->nsid);
+ nvmet_ns_dev_disable(ns);
out_unlock:
mutex_unlock(&subsys->lock);
}
@@ -499,6 +542,25 @@ int nvmet_sq_init(struct nvmet_sq *sq)
}
EXPORT_SYMBOL_GPL(nvmet_sq_init);
+static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+ u16 ret;
+
+ ret = nvmet_check_ctrl_status(req, cmd);
+ if (unlikely(ret))
+ return ret;
+
+ req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
+ if (unlikely(!req->ns))
+ return NVME_SC_INVALID_NS | NVME_SC_DNR;
+
+ if (req->ns->file)
+ return nvmet_file_parse_io_cmd(req);
+ else
+ return nvmet_bdev_parse_io_cmd(req);
+}
+
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
{
@@ -710,15 +772,14 @@ out:
u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
{
if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
- pr_err("got io cmd %d while CC.EN == 0 on qid = %d\n",
+ pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
cmd->common.opcode, req->sq->qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
}
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
- pr_err("got io cmd %d while CSTS.RDY == 0 on qid = %d\n",
+ pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
cmd->common.opcode, req->sq->qid);
- req->ns = NULL;
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
}
return 0;
@@ -809,12 +870,18 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
+ WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
+
+ ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
+ sizeof(__le32), GFP_KERNEL);
+ if (!ctrl->changed_ns_list)
+ goto out_free_ctrl;
ctrl->cqs = kcalloc(subsys->max_qid + 1,
sizeof(struct nvmet_cq *),
GFP_KERNEL);
if (!ctrl->cqs)
- goto out_free_ctrl;
+ goto out_free_changed_ns_list;
ctrl->sqs = kcalloc(subsys->max_qid + 1,
sizeof(struct nvmet_sq *),
@@ -872,6 +939,8 @@ out_free_sqs:
kfree(ctrl->sqs);
out_free_cqs:
kfree(ctrl->cqs);
+out_free_changed_ns_list:
+ kfree(ctrl->changed_ns_list);
out_free_ctrl:
kfree(ctrl);
out_put_subsystem:
@@ -898,6 +967,7 @@ static void nvmet_ctrl_free(struct kref *ref)
kfree(ctrl->sqs);
kfree(ctrl->cqs);
+ kfree(ctrl->changed_ns_list);
kfree(ctrl);
nvmet_subsys_put(subsys);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 231e04e0a496..08656b849bd6 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -187,8 +187,6 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
- req->ns = NULL;
-
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
pr_err("got cmd %d while not ready\n",
cmd->common.opcode);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 19e9e42ae943..d84ae004cb85 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -77,8 +77,6 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
- req->ns = NULL;
-
switch (cmd->fabrics.fctype) {
case nvme_fabrics_type_property_set:
req->data_len = 0;
@@ -242,8 +240,6 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
- req->ns = NULL;
-
if (cmd->common.opcode != nvme_fabrics_command) {
pr_err("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 33ee8d3145f8..408279cb6f2c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -31,7 +31,7 @@
/* *************************** Data Structures/Defines ****************** */
-#define NVMET_LS_CTX_COUNT 4
+#define NVMET_LS_CTX_COUNT 256
/* for this implementation, assume small single frame rqst/rsp */
#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd-bdev.c
index cd2344179673..e0b0f7df70c2 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -16,6 +16,34 @@
#include <linux/module.h>
#include "nvmet.h"
+int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
+{
+ int ret;
+
+ ns->bdev = blkdev_get_by_path(ns->device_path,
+ FMODE_READ | FMODE_WRITE, NULL);
+ if (IS_ERR(ns->bdev)) {
+ ret = PTR_ERR(ns->bdev);
+ if (ret != -ENOTBLK) {
+ pr_err("failed to open block device %s: (%ld)\n",
+ ns->device_path, PTR_ERR(ns->bdev));
+ }
+ ns->bdev = NULL;
+ return ret;
+ }
+ ns->size = i_size_read(ns->bdev->bd_inode);
+ ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+ return 0;
+}
+
+void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
+{
+ if (ns->bdev) {
+ blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
+ ns->bdev = NULL;
+ }
+}
+
static void nvmet_bio_done(struct bio *bio)
{
struct nvmet_req *req = bio->bi_private;
@@ -23,20 +51,14 @@ static void nvmet_bio_done(struct bio *bio)
nvmet_req_complete(req,
bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
- if (bio != &req->inline_bio)
+ if (bio != &req->b.inline_bio)
bio_put(bio);
}
-static inline u32 nvmet_rw_len(struct nvmet_req *req)
-{
- return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
- req->ns->blksize_shift;
-}
-
-static void nvmet_execute_rw(struct nvmet_req *req)
+static void nvmet_bdev_execute_rw(struct nvmet_req *req)
{
int sg_cnt = req->sg_cnt;
- struct bio *bio = &req->inline_bio;
+ struct bio *bio = &req->b.inline_bio;
struct scatterlist *sg;
sector_t sector;
blk_qc_t cookie;
@@ -89,9 +111,9 @@ static void nvmet_execute_rw(struct nvmet_req *req)
blk_poll(bdev_get_queue(req->ns->bdev), cookie);
}
-static void nvmet_execute_flush(struct nvmet_req *req)
+static void nvmet_bdev_execute_flush(struct nvmet_req *req)
{
- struct bio *bio = &req->inline_bio;
+ struct bio *bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio_set_dev(bio, req->ns->bdev);
@@ -102,7 +124,7 @@ static void nvmet_execute_flush(struct nvmet_req *req)
submit_bio(bio);
}
-static u16 nvmet_discard_range(struct nvmet_ns *ns,
+static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns,
struct nvme_dsm_range *range, struct bio **bio)
{
int ret;
@@ -116,7 +138,7 @@ static u16 nvmet_discard_range(struct nvmet_ns *ns,
return 0;
}
-static void nvmet_execute_discard(struct nvmet_req *req)
+static void nvmet_bdev_execute_discard(struct nvmet_req *req)
{
struct nvme_dsm_range range;
struct bio *bio = NULL;
@@ -129,7 +151,7 @@ static void nvmet_execute_discard(struct nvmet_req *req)
if (status)
break;
- status = nvmet_discard_range(req->ns, &range, &bio);
+ status = nvmet_bdev_discard_range(req->ns, &range, &bio);
if (status)
break;
}
@@ -148,11 +170,11 @@ static void nvmet_execute_discard(struct nvmet_req *req)
}
}
-static void nvmet_execute_dsm(struct nvmet_req *req)
+static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
{
switch (le32_to_cpu(req->cmd->dsm.attributes)) {
case NVME_DSMGMT_AD:
- nvmet_execute_discard(req);
+ nvmet_bdev_execute_discard(req);
return;
case NVME_DSMGMT_IDR:
case NVME_DSMGMT_IDW:
@@ -163,7 +185,7 @@ static void nvmet_execute_dsm(struct nvmet_req *req)
}
}
-static void nvmet_execute_write_zeroes(struct nvmet_req *req)
+static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
{
struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
struct bio *bio = NULL;
@@ -189,38 +211,27 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req)
}
}
-u16 nvmet_parse_io_cmd(struct nvmet_req *req)
+u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
- u16 ret;
-
- ret = nvmet_check_ctrl_status(req, cmd);
- if (unlikely(ret)) {
- req->ns = NULL;
- return ret;
- }
-
- req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
- if (unlikely(!req->ns))
- return NVME_SC_INVALID_NS | NVME_SC_DNR;
switch (cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
- req->execute = nvmet_execute_rw;
+ req->execute = nvmet_bdev_execute_rw;
req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
- req->execute = nvmet_execute_flush;
+ req->execute = nvmet_bdev_execute_flush;
req->data_len = 0;
return 0;
case nvme_cmd_dsm:
- req->execute = nvmet_execute_dsm;
+ req->execute = nvmet_bdev_execute_dsm;
req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
sizeof(struct nvme_dsm_range);
return 0;
case nvme_cmd_write_zeroes:
- req->execute = nvmet_execute_write_zeroes;
+ req->execute = nvmet_bdev_execute_write_zeroes;
return 0;
default:
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
new file mode 100644
index 000000000000..8c42b3a8c420
--- /dev/null
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe Over Fabrics Target File I/O commands implementation.
+ * Copyright (c) 2017-2018 Western Digital Corporation or its
+ * affiliates.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/uio.h>
+#include <linux/falloc.h>
+#include <linux/file.h>
+#include "nvmet.h"
+
+#define NVMET_MAX_MPOOL_BVEC 16
+#define NVMET_MIN_MPOOL_OBJ 16
+
+void nvmet_file_ns_disable(struct nvmet_ns *ns)
+{
+ if (ns->file) {
+ mempool_destroy(ns->bvec_pool);
+ ns->bvec_pool = NULL;
+ kmem_cache_destroy(ns->bvec_cache);
+ ns->bvec_cache = NULL;
+ fput(ns->file);
+ ns->file = NULL;
+ }
+}
+
+int nvmet_file_ns_enable(struct nvmet_ns *ns)
+{
+ int ret;
+ struct kstat stat;
+
+ ns->file = filp_open(ns->device_path,
+ O_RDWR | O_LARGEFILE | O_DIRECT, 0);
+ if (IS_ERR(ns->file)) {
+ pr_err("failed to open file %s: (%ld)\n",
+ ns->device_path, PTR_ERR(ns->file));
+ return PTR_ERR(ns->file);
+ }
+
+ ret = vfs_getattr(&ns->file->f_path,
+ &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
+ if (ret)
+ goto err;
+
+ ns->size = stat.size;
+ ns->blksize_shift = file_inode(ns->file)->i_blkbits;
+
+ ns->bvec_cache = kmem_cache_create("nvmet-bvec",
+ NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!ns->bvec_cache) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
+ mempool_free_slab, ns->bvec_cache);
+
+ if (!ns->bvec_pool) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return ret;
+err:
+ ns->size = 0;
+ ns->blksize_shift = 0;
+ nvmet_file_ns_disable(ns);
+ return ret;
+}
+
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
+{
+ bv->bv_page = sg_page_iter_page(iter);
+ bv->bv_offset = iter->sg->offset;
+ bv->bv_len = PAGE_SIZE - iter->sg->offset;
+}
+
+static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
+ unsigned long nr_segs, size_t count)
+{
+ struct kiocb *iocb = &req->f.iocb;
+ ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
+ struct iov_iter iter;
+ int ki_flags = 0, rw;
+ ssize_t ret;
+
+ if (req->cmd->rw.opcode == nvme_cmd_write) {
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+ ki_flags = IOCB_DSYNC;
+ call_iter = req->ns->file->f_op->write_iter;
+ rw = WRITE;
+ } else {
+ call_iter = req->ns->file->f_op->read_iter;
+ rw = READ;
+ }
+
+ iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count);
+
+ iocb->ki_pos = pos;
+ iocb->ki_filp = req->ns->file;
+ iocb->ki_flags = IOCB_DIRECT | ki_flags;
+
+ ret = call_iter(iocb, &iter);
+
+ if (ret != -EIOCBQUEUED && iocb->ki_complete)
+ iocb->ki_complete(iocb, ret, 0);
+
+ return ret;
+}
+
+static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
+{
+ struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
+
+ if (req->f.bvec != req->inline_bvec) {
+ if (likely(req->f.mpool_alloc == false))
+ kfree(req->f.bvec);
+ else
+ mempool_free(req->f.bvec, req->ns->bvec_pool);
+ }
+
+ nvmet_req_complete(req, ret != req->data_len ?
+ NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+}
+
+static void nvmet_file_execute_rw(struct nvmet_req *req)
+{
+ ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
+ struct sg_page_iter sg_pg_iter;
+ unsigned long bv_cnt = 0;
+ bool is_sync = false;
+ size_t len = 0, total_len = 0;
+ ssize_t ret = 0;
+ loff_t pos;
+
+ if (!req->sg_cnt || !nr_bvec) {
+ nvmet_req_complete(req, 0);
+ return;
+ }
+
+ if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
+ req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
+ GFP_KERNEL);
+ else
+ req->f.bvec = req->inline_bvec;
+
+ req->f.mpool_alloc = false;
+ if (unlikely(!req->f.bvec)) {
+ /* fallback under memory pressure */
+ req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
+ req->f.mpool_alloc = true;
+ if (nr_bvec > NVMET_MAX_MPOOL_BVEC)
+ is_sync = true;
+ }
+
+ pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
+
+ memset(&req->f.iocb, 0, sizeof(struct kiocb));
+ for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
+ nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
+ len += req->f.bvec[bv_cnt].bv_len;
+ total_len += req->f.bvec[bv_cnt].bv_len;
+ bv_cnt++;
+
+ WARN_ON_ONCE((nr_bvec - 1) < 0);
+
+ if (unlikely(is_sync) &&
+ (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
+ ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len);
+ if (ret < 0)
+ goto out;
+ pos += len;
+ bv_cnt = 0;
+ len = 0;
+ }
+ nr_bvec--;
+ }
+
+ if (WARN_ON_ONCE(total_len != req->data_len))
+ ret = -EIO;
+out:
+ if (unlikely(is_sync || ret)) {
+ nvmet_file_io_done(&req->f.iocb, ret < 0 ? ret : total_len, 0);
+ return;
+ }
+ req->f.iocb.ki_complete = nvmet_file_io_done;
+ nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
+}
+
+static void nvmet_file_flush_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+ int ret;
+
+ ret = vfs_fsync(req->ns->file, 1);
+
+ nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+}
+
+static void nvmet_file_execute_flush(struct nvmet_req *req)
+{
+ INIT_WORK(&req->f.work, nvmet_file_flush_work);
+ schedule_work(&req->f.work);
+}
+
+static void nvmet_file_execute_discard(struct nvmet_req *req)
+{
+ int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+ struct nvme_dsm_range range;
+ loff_t offset;
+ loff_t len;
+ int i, ret;
+
+ for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
+ if (nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+ sizeof(range)))
+ break;
+ offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
+ len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
+ ret = vfs_fallocate(req->ns->file, mode, offset, len);
+ if (ret)
+ break;
+ }
+
+ nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+}
+
+static void nvmet_file_dsm_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+ switch (le32_to_cpu(req->cmd->dsm.attributes)) {
+ case NVME_DSMGMT_AD:
+ nvmet_file_execute_discard(req);
+ return;
+ case NVME_DSMGMT_IDR:
+ case NVME_DSMGMT_IDW:
+ default:
+ /* Not supported yet */
+ nvmet_req_complete(req, 0);
+ return;
+ }
+}
+
+static void nvmet_file_execute_dsm(struct nvmet_req *req)
+{
+ INIT_WORK(&req->f.work, nvmet_file_dsm_work);
+ schedule_work(&req->f.work);
+}
+
+static void nvmet_file_write_zeroes_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+ struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
+ int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
+ loff_t offset;
+ loff_t len;
+ int ret;
+
+ offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
+ len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
+ req->ns->blksize_shift);
+
+ ret = vfs_fallocate(req->ns->file, mode, offset, len);
+ nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+}
+
+static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
+{
+ INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
+ schedule_work(&req->f.work);
+}
+
+u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ req->execute = nvmet_file_execute_rw;
+ req->data_len = nvmet_rw_len(req);
+ return 0;
+ case nvme_cmd_flush:
+ req->execute = nvmet_file_execute_flush;
+ req->data_len = 0;
+ return 0;
+ case nvme_cmd_dsm:
+ req->execute = nvmet_file_execute_dsm;
+ req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
+ sizeof(struct nvme_dsm_range);
+ return 0;
+ case nvme_cmd_write_zeroes:
+ req->execute = nvmet_file_execute_write_zeroes;
+ req->data_len = 0;
+ return 0;
+ default:
+ pr_err("unhandled cmd for file ns %d on qid %d\n",
+ cmd->common.opcode, req->sq->qid);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 27a8561c0cb9..1304ec3a7ede 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -45,6 +45,7 @@ struct nvme_loop_ctrl {
struct nvme_ctrl ctrl;
struct nvmet_ctrl *target_ctrl;
+ struct nvmet_port *port;
};
static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
@@ -63,7 +64,8 @@ struct nvme_loop_queue {
unsigned long flags;
};
-static struct nvmet_port *nvmet_loop_port;
+static LIST_HEAD(nvme_loop_ports);
+static DEFINE_MUTEX(nvme_loop_ports_mutex);
static LIST_HEAD(nvme_loop_ctrl_list);
static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
@@ -146,7 +148,7 @@ nvme_loop_timeout(struct request *rq, bool reserved)
/* fail with DNR on admin cmd timeout */
nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
- return BLK_EH_HANDLED;
+ return BLK_EH_DONE;
}
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -169,12 +171,12 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(req);
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
- iod->req.port = nvmet_loop_port;
+ iod->req.port = queue->ctrl->port;
if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
&queue->nvme_sq, &nvme_loop_ops))
return BLK_STS_OK;
- if (blk_rq_payload_bytes(req)) {
+ if (blk_rq_nr_phys_segments(req)) {
iod->sg_table.sgl = iod->first_sgl;
if (sg_alloc_table_chained(&iod->sg_table,
blk_rq_nr_phys_segments(req),
@@ -517,6 +519,7 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
.free_ctrl = nvme_loop_free_ctrl,
.submit_async_event = nvme_loop_submit_async_event,
.delete_ctrl = nvme_loop_delete_ctrl_host,
+ .get_address = nvmf_get_address,
};
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -565,6 +568,23 @@ out_destroy_queues:
return ret;
}
+static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
+{
+ struct nvmet_port *p, *found = NULL;
+
+ mutex_lock(&nvme_loop_ports_mutex);
+ list_for_each_entry(p, &nvme_loop_ports, entry) {
+ /* if no transport address is specified use the first port */
+ if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
+ strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
+ continue;
+ found = p;
+ break;
+ }
+ mutex_unlock(&nvme_loop_ports_mutex);
+ return found;
+}
+
static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
struct nvmf_ctrl_options *opts)
{
@@ -589,6 +609,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
+ ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
GFP_KERNEL);
@@ -646,27 +667,17 @@ out_put_ctrl:
static int nvme_loop_add_port(struct nvmet_port *port)
{
- /*
- * XXX: disalow adding more than one port so
- * there is no connection rejections when a
- * a subsystem is assigned to a port for which
- * loop doesn't have a pointer.
- * This scenario would be possible if we allowed
- * more than one port to be added and a subsystem
- * was assigned to a port other than nvmet_loop_port.
- */
-
- if (nvmet_loop_port)
- return -EPERM;
-
- nvmet_loop_port = port;
+ mutex_lock(&nvme_loop_ports_mutex);
+ list_add_tail(&port->entry, &nvme_loop_ports);
+ mutex_unlock(&nvme_loop_ports_mutex);
return 0;
}
static void nvme_loop_remove_port(struct nvmet_port *port)
{
- if (port == nvmet_loop_port)
- nvmet_loop_port = NULL;
+ mutex_lock(&nvme_loop_ports_mutex);
+ list_del_init(&port->entry);
+ mutex_unlock(&nvme_loop_ports_mutex);
}
static const struct nvmet_fabrics_ops nvme_loop_ops = {
@@ -682,6 +693,7 @@ static struct nvmf_transport_ops nvme_loop_transport = {
.name = "loop",
.module = THIS_MODULE,
.create_ctrl = nvme_loop_create_ctrl,
+ .allowed_opts = NVMF_OPT_TRADDR,
};
static int __init nvme_loop_init_module(void)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 15fd84ab21f8..480dfe10fad9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -30,6 +30,21 @@
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
+
+/*
+ * Supported optional AENs:
+ */
+#define NVMET_AEN_CFG_OPTIONAL \
+ NVME_AEN_CFG_NS_ATTR
+
+/*
+ * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
+ */
+#define NVMET_AEN_CFG_ALL \
+ (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
+ NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
+ NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
+
/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
* The 16 bit shift is to set IATTR bit to 1, which means offending
* offset starts in the data section of connect()
@@ -43,6 +58,7 @@ struct nvmet_ns {
struct list_head dev_link;
struct percpu_ref ref;
struct block_device *bdev;
+ struct file *file;
u32 nsid;
u32 blksize_shift;
loff_t size;
@@ -57,6 +73,8 @@ struct nvmet_ns {
struct config_group group;
struct completion disable_done;
+ mempool_t *bvec_pool;
+ struct kmem_cache *bvec_cache;
};
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
@@ -82,7 +100,7 @@ struct nvmet_sq {
/**
* struct nvmet_port - Common structure to keep port
* information for the target.
- * @entry: List head for holding a list of these elements.
+ * @entry: Entry into referrals or transport list.
* @disc_addr: Address information is stored in a format defined
* for a discovery log page entry.
* @group: ConfigFS group for this element's folder.
@@ -120,6 +138,8 @@ struct nvmet_ctrl {
u16 cntlid;
u32 kato;
+ u32 aen_enabled;
+ unsigned long aen_masked;
struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
unsigned int nr_async_event_cmds;
struct list_head async_events;
@@ -132,6 +152,9 @@ struct nvmet_ctrl {
const struct nvmet_fabrics_ops *ops;
+ __le32 *changed_ns_list;
+ u32 nr_changed_ns;
+
char subsysnqn[NVMF_NQN_FIELD_LEN];
char hostnqn[NVMF_NQN_FIELD_LEN];
};
@@ -222,8 +245,18 @@ struct nvmet_req {
struct nvmet_cq *cq;
struct nvmet_ns *ns;
struct scatterlist *sg;
- struct bio inline_bio;
struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
+ union {
+ struct {
+ struct bio inline_bio;
+ } b;
+ struct {
+ bool mpool_alloc;
+ struct kiocb iocb;
+ struct bio_vec *bvec;
+ struct work_struct work;
+ } f;
+ };
int sg_cnt;
/* data length as parsed from the command: */
size_t data_len;
@@ -263,7 +296,8 @@ struct nvmet_async_event {
};
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
-u16 nvmet_parse_io_cmd(struct nvmet_req *req);
+u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
+u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
@@ -316,6 +350,7 @@ u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
size_t len);
u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
size_t len);
+u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
u32 nvmet_get_log_page_len(struct nvme_command *cmd);
@@ -338,4 +373,14 @@ extern struct rw_semaphore nvmet_config_sem;
bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
const char *hostnqn);
+int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
+int nvmet_file_ns_enable(struct nvmet_ns *ns);
+void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
+void nvmet_file_ns_disable(struct nvmet_ns *ns);
+
+static inline u32 nvmet_rw_len(struct nvmet_req *req)
+{
+ return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
+ req->ns->blksize_shift;
+}
#endif /* _NVMET_H */
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 21a3e93417c4..d29cedb3f23b 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1169,7 +1169,7 @@ void * ccio_get_iommu(const struct parisc_device *dev)
* to/from certain pages. To avoid this happening, we mark these pages
* as `used', and ensure that nothing will try to allocate from them.
*/
-void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
+void __init ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
{
unsigned int idx;
struct parisc_device *dev = parisc_parent(cujo);
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index e7bbdf947bbc..8350ca2311c7 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -91,6 +91,8 @@ static int send_command(struct cros_ec_device *ec_dev,
usleep_range(10000, 11000);
ret = (*xfer_fxn)(ec_dev, status_msg);
+ if (ret == -EAGAIN)
+ continue;
if (ret < 0)
break;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index bc309c5327ff..566644bb496a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -168,8 +168,8 @@ config DELL_WMI
depends on DMI
depends on INPUT
depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on DELL_SMBIOS
select DELL_WMI_DESCRIPTOR
- select DELL_SMBIOS
select INPUT_SPARSEKMAP
---help---
Say Y here if you want to support WMI-based hotkeys on Dell laptops.
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a32c5c00e0e7..ffffb9909ae1 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -163,6 +163,16 @@ MODULE_LICENSE("GPL");
static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
+static bool ashs_present(void)
+{
+ int i = 0;
+ while (ashs_ids[i]) {
+ if (acpi_dev_found(ashs_ids[i++]))
+ return true;
+ }
+ return false;
+}
+
struct bios_args {
u32 arg0;
u32 arg1;
@@ -1025,6 +1035,9 @@ static int asus_new_rfkill(struct asus_wmi *asus,
static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
{
+ if (asus->driver->wlan_ctrl_by_user && ashs_present())
+ return;
+
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
@@ -2121,16 +2134,6 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
return 0;
}
-static bool ashs_present(void)
-{
- int i = 0;
- while (ashs_ids[i]) {
- if (acpi_dev_found(ashs_ids[i++]))
- return true;
- }
- return false;
-}
-
/*
* WMI Driver
*/
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 360e06b20c53..ac18f2f27881 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -110,7 +110,7 @@ static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */
UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */
- UNIPHIER_RESETX(12, 0x200c, 5), /* GIO (PCIe, USB3) */
+ UNIPHIER_RESETX(14, 0x200c, 5), /* USB30 */
UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */
UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */
UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */
@@ -127,8 +127,8 @@ static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = {
UNIPHIER_RESETX(6, 0x200c, 9), /* Ether0 */
UNIPHIER_RESETX(7, 0x200c, 10), /* Ether1 */
UNIPHIER_RESETX(8, 0x200c, 12), /* STDMAC */
- UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link (GIO0) */
- UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link (GIO1) */
+ UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link */
+ UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link */
UNIPHIER_RESETX(16, 0x200c, 16), /* USB30-PHY0 */
UNIPHIER_RESETX(17, 0x200c, 18), /* USB30-PHY1 */
UNIPHIER_RESETX(18, 0x200c, 20), /* USB30-PHY2 */
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 04143c08bd6e..cb9b685118da 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3034,7 +3034,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
cqr->callback_data = req;
cqr->status = DASD_CQR_FILLED;
cqr->dq = dq;
- req->completion_data = cqr;
+ *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
+
blk_mq_start_request(req);
spin_lock(&block->queue_lock);
list_add_tail(&cqr->blocklist, &block->ccw_queue);
@@ -3053,19 +3054,20 @@ out:
*
* Return values:
* BLK_EH_RESET_TIMER if the request should be left running
- * BLK_EH_NOT_HANDLED if the request is handled or terminated
+ * BLK_EH_DONE if the request is handled or terminated
* by the driver.
*/
enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
{
- struct dasd_ccw_req *cqr = req->completion_data;
struct dasd_block *block = req->q->queuedata;
struct dasd_device *device;
+ struct dasd_ccw_req *cqr;
unsigned long flags;
int rc = 0;
+ cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
if (!cqr)
- return BLK_EH_NOT_HANDLED;
+ return BLK_EH_DONE;
spin_lock_irqsave(&cqr->dq->lock, flags);
device = cqr->startdev ? cqr->startdev : block->base;
@@ -3124,7 +3126,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
spin_unlock(&block->queue_lock);
spin_unlock_irqrestore(&cqr->dq->lock, flags);
- return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
+ return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
}
static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -3169,6 +3171,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
int rc;
block->tag_set.ops = &dasd_mq_ops;
+ block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 439991d71b14..4c14ce428e92 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -141,7 +141,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
int i;
for (i = 0; i < nr_queues; i++) {
- q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+ q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
{
struct ciw *ciw;
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
- int rc;
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
- rc = -EINVAL;
- goto out_err;
+ return -EINVAL;
}
irq_ptr->equeue = *ciw;
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
- rc = -EINVAL;
- goto out_err;
+ return -EINVAL;
}
irq_ptr->aqueue = *ciw;
@@ -512,9 +509,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
init_data->cdev->handler = qdio_int_handler;
spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
return 0;
-out_err:
- qdio_release_memory(irq_ptr);
- return rc;
}
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 2c7550797ec2..dce92b2a895d 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -715,6 +715,10 @@ void cp_free(struct channel_program *cp)
* and stores the result to ccwchain list. @cp must have been
* initialized by a previous call with cp_init(). Otherwise, undefined
* behavior occurs.
+ * For each chain composing the channel program:
+ * - On entry ch_len holds the count of CCWs to be translated.
+ * - On exit ch_len is adjusted to the count of successfully translated CCWs.
+ * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
*
* The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
* as helpers to do ccw chain translation inside the kernel. Basically
@@ -749,11 +753,18 @@ int cp_prefetch(struct channel_program *cp)
for (idx = 0; idx < len; idx++) {
ret = ccwchain_fetch_one(chain, idx, cp);
if (ret)
- return ret;
+ goto out_err;
}
}
return 0;
+out_err:
+ /* Only cleanup the chain elements that were actually translated. */
+ chain->ch_len = idx;
+ list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
+ chain->ch_len = 0;
+ }
+ return ret;
}
/**
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index a8b831000b2d..18c4f933e8b9 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -4,7 +4,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#define KMSG_COMPONENT "zfcp"
@@ -308,6 +308,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
+/**
+ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock must not be held.
+ */
+void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+ struct zfcp_port *port, struct scsi_device *sdev,
+ u8 want, u8 need)
+{
+ unsigned long flags;
+
+ read_lock_irqsave(&adapter->erp_lock, flags);
+ zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
+ read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
/**
* zfcp_dbf_rec_run_lvl - trace event related to running recovery
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index bf8ea4df2bb8..e5eed8aac0ce 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -4,7 +4,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2018
*/
#ifndef ZFCP_EXT_H
@@ -35,6 +35,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+ struct scsi_device *sdev, u8 want, u8 need);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
struct zfcp_erp_action *erp);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 4d2ba5682493..22f9562f415c 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -4,7 +4,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#define KMSG_COMPONENT "zfcp"
@@ -618,9 +618,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
ids.port_id = port->d_id;
ids.roles = FC_RPORT_ROLE_FCP_TARGET;
- zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+ zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!rport) {
dev_err(&port->adapter->ccw_device->dev,
@@ -642,9 +642,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
struct fc_rport *rport = port->rport;
if (rport) {
- zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+ zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
fc_remote_port_delete(rport);
port->rport = NULL;
}
diff --git a/drivers/sbus/char/Kconfig b/drivers/sbus/char/Kconfig
index bf3c5f735614..89edd13fd572 100644
--- a/drivers/sbus/char/Kconfig
+++ b/drivers/sbus/char/Kconfig
@@ -28,13 +28,6 @@ config TADPOLE_TS102_UCTRL
events, and can also notice the attachment/detachment of external
monitors and mice.
-config SUN_JSFLASH
- tristate "JavaStation OS Flash SIMM"
- depends on SPARC32
- help
- If you say Y here, you will be able to boot from your JavaStation's
- Flash memory.
-
config BBC_I2C
tristate "UltraSPARC-III bootbus i2c controller driver"
depends on PCI && SPARC64
diff --git a/drivers/sbus/char/Makefile b/drivers/sbus/char/Makefile
index 8c48ed96683f..44347c918f6b 100644
--- a/drivers/sbus/char/Makefile
+++ b/drivers/sbus/char/Makefile
@@ -15,6 +15,5 @@ obj-$(CONFIG_DISPLAY7SEG) += display7seg.o
obj-$(CONFIG_OBP_FLASH) += flash.o
obj-$(CONFIG_SUN_OPENPROMIO) += openprom.o
obj-$(CONFIG_TADPOLE_TS102_UCTRL) += uctrl.o
-obj-$(CONFIG_SUN_JSFLASH) += jsflash.o
obj-$(CONFIG_BBC_I2C) += bbc.o
obj-$(CONFIG_ORACLE_DAX) += oradax.o
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
deleted file mode 100644
index 14f377ac1280..000000000000
--- a/drivers/sbus/char/jsflash.c
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * drivers/sbus/char/jsflash.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds (drivers/char/mem.c)
- * Copyright (C) 1997 Eddie C. Dost (drivers/sbus/char/flash.c)
- * Copyright (C) 1997-2000 Pavel Machek <pavel@ucw.cz> (drivers/block/nbd.c)
- * Copyright (C) 1999-2000 Pete Zaitcev
- *
- * This driver is used to program OS into a Flash SIMM on
- * Krups and Espresso platforms.
- *
- * TODO: do not allow erase/programming if file systems are mounted.
- * TODO: Erase/program both banks of a 8MB SIMM.
- *
- * It is anticipated that programming an OS Flash will be a routine
- * procedure. In the same time it is exceedingly dangerous because
- * a user can program its OBP flash with OS image and effectively
- * kill the machine.
- *
- * This driver uses an interface different from Eddie's flash.c
- * as a silly safeguard.
- *
- * XXX The flash.c manipulates page caching characteristics in a certain
- * dubious way; also it assumes that remap_pfn_range() can remap
- * PCI bus locations, which may be false. ioremap() must be used
- * instead. We should discuss this.
- */
-
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/miscdevice.h>
-#include <linux/fcntl.h>
-#include <linux/poll.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/genhd.h>
-#include <linux/blkdev.h>
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/io.h>
-#include <asm/pcic.h>
-#include <asm/oplib.h>
-
-#include <asm/jsflash.h> /* ioctl arguments. <linux/> ?? */
-#define JSFIDSZ (sizeof(struct jsflash_ident_arg))
-#define JSFPRGSZ (sizeof(struct jsflash_program_arg))
-
-/*
- * Our device numbers have no business in system headers.
- * The only thing a user knows is the device name /dev/jsflash.
- *
- * Block devices are laid out like this:
- * minor+0 - Bootstrap, for 8MB SIMM 0x20400000[0x800000]
- * minor+1 - Filesystem to mount, normally 0x20400400[0x7ffc00]
- * minor+2 - Whole flash area for any case... 0x20000000[0x01000000]
- * Total 3 minors per flash device.
- *
- * It is easier to have static size vectors, so we define
- * a total minor range JSF_MAX, which must cover all minors.
- */
-/* character device */
-#define JSF_MINOR 178 /* 178 is registered with hpa */
-/* block device */
-#define JSF_MAX 3 /* 3 minors wasted total so far. */
-#define JSF_NPART 3 /* 3 minors per flash device */
-#define JSF_PART_BITS 2 /* 2 bits of minors to cover JSF_NPART */
-#define JSF_PART_MASK 0x3 /* 2 bits mask */
-
-static DEFINE_MUTEX(jsf_mutex);
-
-/*
- * Access functions.
- * We could ioremap(), but it's easier this way.
- */
-static unsigned int jsf_inl(unsigned long addr)
-{
- unsigned long retval;
-
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (retval) :
- "r" (addr), "i" (ASI_M_BYPASS));
- return retval;
-}
-
-static void jsf_outl(unsigned long addr, __u32 data)
-{
-
- __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
- "r" (data), "r" (addr), "i" (ASI_M_BYPASS) :
- "memory");
-}
-
-/*
- * soft carrier
- */
-
-struct jsfd_part {
- unsigned long dbase;
- unsigned long dsize;
-};
-
-struct jsflash {
- unsigned long base;
- unsigned long size;
- unsigned long busy; /* In use? */
- struct jsflash_ident_arg id;
- /* int mbase; */ /* Minor base, typically zero */
- struct jsfd_part dv[JSF_NPART];
-};
-
-/*
- * We do not map normal memory or obio as a safety precaution.
- * But offsets are real, for ease of userland programming.
- */
-#define JSF_BASE_TOP 0x30000000
-#define JSF_BASE_ALL 0x20000000
-
-#define JSF_BASE_JK 0x20400000
-
-/*
- */
-static struct gendisk *jsfd_disk[JSF_MAX];
-
-/*
- * Let's pretend we may have several of these...
- */
-static struct jsflash jsf0;
-
-/*
- * Wait for AMD to finish its embedded algorithm.
- * We use the Toggle bit DQ6 (0x40) because it does not
- * depend on the data value as /DATA bit DQ7 does.
- *
- * XXX Do we need any timeout here? So far it never hanged, beware broken hw.
- */
-static void jsf_wait(unsigned long p) {
- unsigned int x1, x2;
-
- for (;;) {
- x1 = jsf_inl(p);
- x2 = jsf_inl(p);
- if ((x1 & 0x40404040) == (x2 & 0x40404040)) return;
- }
-}
-
-/*
- * Programming will only work if Flash is clean,
- * we leave it to the programmer application.
- *
- * AMD must be programmed one byte at a time;
- * thus, Simple Tech SIMM must be written 4 bytes at a time.
- *
- * Write waits for the chip to become ready after the write
- * was finished. This is done so that application would read
- * consistent data after the write is done.
- */
-static void jsf_write4(unsigned long fa, u32 data) {
-
- jsf_outl(fa, 0xAAAAAAAA); /* Unlock 1 Write 1 */
- jsf_outl(fa, 0x55555555); /* Unlock 1 Write 2 */
- jsf_outl(fa, 0xA0A0A0A0); /* Byte Program */
- jsf_outl(fa, data);
-
- jsf_wait(fa);
-}
-
-/*
- */
-static void jsfd_read(char *buf, unsigned long p, size_t togo) {
- union byte4 {
- char s[4];
- unsigned int n;
- } b;
-
- while (togo >= 4) {
- togo -= 4;
- b.n = jsf_inl(p);
- memcpy(buf, b.s, 4);
- p += 4;
- buf += 4;
- }
-}
-
-static int jsfd_queue;
-
-static struct request *jsfd_next_request(void)
-{
- struct request_queue *q;
- struct request *rq;
- int old_pos = jsfd_queue;
-
- do {
- q = jsfd_disk[jsfd_queue]->queue;
- if (++jsfd_queue == JSF_MAX)
- jsfd_queue = 0;
- if (q) {
- rq = blk_fetch_request(q);
- if (rq)
- return rq;
- }
- } while (jsfd_queue != old_pos);
-
- return NULL;
-}
-
-static void jsfd_request(void)
-{
- struct request *req;
-
- req = jsfd_next_request();
- while (req) {
- struct jsfd_part *jdp = req->rq_disk->private_data;
- unsigned long offset = blk_rq_pos(req) << 9;
- size_t len = blk_rq_cur_bytes(req);
- blk_status_t err = BLK_STS_IOERR;
-
- if ((offset + len) > jdp->dsize)
- goto end;
-
- if (rq_data_dir(req) != READ) {
- printk(KERN_ERR "jsfd: write\n");
- goto end;
- }
-
- if ((jdp->dbase & 0xff000000) != 0x20000000) {
- printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase);
- goto end;
- }
-
- jsfd_read(bio_data(req->bio), jdp->dbase + offset, len);
- err = BLK_STS_OK;
- end:
- if (!__blk_end_request_cur(req, err))
- req = jsfd_next_request();
- }
-}
-
-static void jsfd_do_request(struct request_queue *q)
-{
- jsfd_request();
-}
-
-/*
- * The memory devices use the full 32/64 bits of the offset, and so we cannot
- * check against negative addresses: they are ok. The return value is weird,
- * though, in that case (0).
- *
- * also note that seeking relative to the "end of file" isn't supported:
- * it has no meaning, so it returns -EINVAL.
- */
-static loff_t jsf_lseek(struct file * file, loff_t offset, int orig)
-{
- loff_t ret;
-
- mutex_lock(&jsf_mutex);
- switch (orig) {
- case 0:
- file->f_pos = offset;
- ret = file->f_pos;
- break;
- case 1:
- file->f_pos += offset;
- ret = file->f_pos;
- break;
- default:
- ret = -EINVAL;
- }
- mutex_unlock(&jsf_mutex);
- return ret;
-}
-
-/*
- * OS SIMM Cannot be read in other size but a 32bits word.
- */
-static ssize_t jsf_read(struct file * file, char __user * buf,
- size_t togo, loff_t *ppos)
-{
- unsigned long p = *ppos;
- char __user *tmp = buf;
-
- union byte4 {
- char s[4];
- unsigned int n;
- } b;
-
- if (p < JSF_BASE_ALL || p >= JSF_BASE_TOP) {
- return 0;
- }
-
- if ((p + togo) < p /* wrap */
- || (p + togo) >= JSF_BASE_TOP) {
- togo = JSF_BASE_TOP - p;
- }
-
- if (p < JSF_BASE_ALL && togo != 0) {
-#if 0 /* __bzero XXX */
- size_t x = JSF_BASE_ALL - p;
- if (x > togo) x = togo;
- clear_user(tmp, x);
- tmp += x;
- p += x;
- togo -= x;
-#else
- /*
- * Implementation of clear_user() calls __bzero
- * without regard to modversions,
- * so we cannot build a module.
- */
- return 0;
-#endif
- }
-
- while (togo >= 4) {
- togo -= 4;
- b.n = jsf_inl(p);
- if (copy_to_user(tmp, b.s, 4))
- return -EFAULT;
- tmp += 4;
- p += 4;
- }
-
- /*
- * XXX Small togo may remain if 1 byte is ordered.
- * It would be nice if we did a word size read and unpacked it.
- */
-
- *ppos = p;
- return tmp-buf;
-}
-
-static ssize_t jsf_write(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- return -ENOSPC;
-}
-
-/*
- */
-static int jsf_ioctl_erase(unsigned long arg)
-{
- unsigned long p;
-
- /* p = jsf0.base; hits wrong bank */
- p = 0x20400000;
-
- jsf_outl(p, 0xAAAAAAAA); /* Unlock 1 Write 1 */
- jsf_outl(p, 0x55555555); /* Unlock 1 Write 2 */
- jsf_outl(p, 0x80808080); /* Erase setup */
- jsf_outl(p, 0xAAAAAAAA); /* Unlock 2 Write 1 */
- jsf_outl(p, 0x55555555); /* Unlock 2 Write 2 */
- jsf_outl(p, 0x10101010); /* Chip erase */
-
-#if 0
- /*
- * This code is ok, except that counter based timeout
- * has no place in this world. Let's just drop timeouts...
- */
- {
- int i;
- __u32 x;
- for (i = 0; i < 1000000; i++) {
- x = jsf_inl(p);
- if ((x & 0x80808080) == 0x80808080) break;
- }
- if ((x & 0x80808080) != 0x80808080) {
- printk("jsf0: erase timeout with 0x%08x\n", x);
- } else {
- printk("jsf0: erase done with 0x%08x\n", x);
- }
- }
-#else
- jsf_wait(p);
-#endif
-
- return 0;
-}
-
-/*
- * Program a block of flash.
- * Very simple because we can do it byte by byte anyway.
- */
-static int jsf_ioctl_program(void __user *arg)
-{
- struct jsflash_program_arg abuf;
- char __user *uptr;
- unsigned long p;
- unsigned int togo;
- union {
- unsigned int n;
- char s[4];
- } b;
-
- if (copy_from_user(&abuf, arg, JSFPRGSZ))
- return -EFAULT;
- p = abuf.off;
- togo = abuf.size;
- if ((togo & 3) || (p & 3)) return -EINVAL;
-
- uptr = (char __user *) (unsigned long) abuf.data;
- while (togo != 0) {
- togo -= 4;
- if (copy_from_user(&b.s[0], uptr, 4))
- return -EFAULT;
- jsf_write4(p, b.n);
- p += 4;
- uptr += 4;
- }
-
- return 0;
-}
-
-static long jsf_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
- mutex_lock(&jsf_mutex);
- int error = -ENOTTY;
- void __user *argp = (void __user *)arg;
-
- if (!capable(CAP_SYS_ADMIN)) {
- mutex_unlock(&jsf_mutex);
- return -EPERM;
- }
- switch (cmd) {
- case JSFLASH_IDENT:
- if (copy_to_user(argp, &jsf0.id, JSFIDSZ)) {
- mutex_unlock(&jsf_mutex);
- return -EFAULT;
- }
- break;
- case JSFLASH_ERASE:
- error = jsf_ioctl_erase(arg);
- break;
- case JSFLASH_PROGRAM:
- error = jsf_ioctl_program(argp);
- break;
- }
-
- mutex_unlock(&jsf_mutex);
- return error;
-}
-
-static int jsf_mmap(struct file * file, struct vm_area_struct * vma)
-{
- return -ENXIO;
-}
-
-static int jsf_open(struct inode * inode, struct file * filp)
-{
- mutex_lock(&jsf_mutex);
- if (jsf0.base == 0) {
- mutex_unlock(&jsf_mutex);
- return -ENXIO;
- }
- if (test_and_set_bit(0, (void *)&jsf0.busy) != 0) {
- mutex_unlock(&jsf_mutex);
- return -EBUSY;
- }
-
- mutex_unlock(&jsf_mutex);
- return 0; /* XXX What security? */
-}
-
-static int jsf_release(struct inode *inode, struct file *file)
-{
- jsf0.busy = 0;
- return 0;
-}
-
-static const struct file_operations jsf_fops = {
- .owner = THIS_MODULE,
- .llseek = jsf_lseek,
- .read = jsf_read,
- .write = jsf_write,
- .unlocked_ioctl = jsf_ioctl,
- .mmap = jsf_mmap,
- .open = jsf_open,
- .release = jsf_release,
-};
-
-static struct miscdevice jsf_dev = { JSF_MINOR, "jsflash", &jsf_fops };
-
-static const struct block_device_operations jsfd_fops = {
- .owner = THIS_MODULE,
-};
-
-static int jsflash_init(void)
-{
- int rc;
- struct jsflash *jsf;
- phandle node;
- char banner[128];
- struct linux_prom_registers reg0;
-
- node = prom_getchild(prom_root_node);
- node = prom_searchsiblings(node, "flash-memory");
- if (node != 0 && (s32)node != -1) {
- if (prom_getproperty(node, "reg",
- (char *)&reg0, sizeof(reg0)) == -1) {
- printk("jsflash: no \"reg\" property\n");
- return -ENXIO;
- }
- if (reg0.which_io != 0) {
- printk("jsflash: bus number nonzero: 0x%x:%x\n",
- reg0.which_io, reg0.phys_addr);
- return -ENXIO;
- }
- /*
- * Flash may be somewhere else, for instance on Ebus.
- * So, don't do the following check for IIep flash space.
- */
-#if 0
- if ((reg0.phys_addr >> 24) != 0x20) {
- printk("jsflash: suspicious address: 0x%x:%x\n",
- reg0.which_io, reg0.phys_addr);
- return -ENXIO;
- }
-#endif
- if ((int)reg0.reg_size <= 0) {
- printk("jsflash: bad size 0x%x\n", (int)reg0.reg_size);
- return -ENXIO;
- }
- } else {
- /* XXX Remove this code once PROLL ID12 got widespread */
- printk("jsflash: no /flash-memory node, use PROLL >= 12\n");
- prom_getproperty(prom_root_node, "banner-name", banner, 128);
- if (strcmp (banner, "JavaStation-NC") != 0 &&
- strcmp (banner, "JavaStation-E") != 0) {
- return -ENXIO;
- }
- reg0.which_io = 0;
- reg0.phys_addr = 0x20400000;
- reg0.reg_size = 0x00800000;
- }
-
- /* Let us be really paranoid for modifications to probing code. */
- if (sparc_cpu_model != sun4m) {
- /* We must be on sun4m because we use MMU Bypass ASI. */
- return -ENXIO;
- }
-
- if (jsf0.base == 0) {
- jsf = &jsf0;
-
- jsf->base = reg0.phys_addr;
- jsf->size = reg0.reg_size;
-
- /* XXX Redo the userland interface. */
- jsf->id.off = JSF_BASE_ALL;
- jsf->id.size = 0x01000000; /* 16M - all segments */
- strcpy(jsf->id.name, "Krups_all");
-
- jsf->dv[0].dbase = jsf->base;
- jsf->dv[0].dsize = jsf->size;
- jsf->dv[1].dbase = jsf->base + 1024;
- jsf->dv[1].dsize = jsf->size - 1024;
- jsf->dv[2].dbase = JSF_BASE_ALL;
- jsf->dv[2].dsize = 0x01000000;
-
- printk("Espresso Flash @0x%lx [%d MB]\n", jsf->base,
- (int) (jsf->size / (1024*1024)));
- }
-
- if ((rc = misc_register(&jsf_dev)) != 0) {
- printk(KERN_ERR "jsf: unable to get misc minor %d\n",
- JSF_MINOR);
- jsf0.base = 0;
- return rc;
- }
-
- return 0;
-}
-
-static int jsfd_init(void)
-{
- static DEFINE_SPINLOCK(lock);
- struct jsflash *jsf;
- struct jsfd_part *jdp;
- int err;
- int i;
-
- if (jsf0.base == 0)
- return -ENXIO;
-
- err = -ENOMEM;
- for (i = 0; i < JSF_MAX; i++) {
- struct gendisk *disk = alloc_disk(1);
- if (!disk)
- goto out;
- disk->queue = blk_init_queue(jsfd_do_request, &lock);
- if (!disk->queue) {
- put_disk(disk);
- goto out;
- }
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
- jsfd_disk[i] = disk;
- }
-
- if (register_blkdev(JSFD_MAJOR, "jsfd")) {
- err = -EIO;
- goto out;
- }
-
- for (i = 0; i < JSF_MAX; i++) {
- struct gendisk *disk = jsfd_disk[i];
- if ((i & JSF_PART_MASK) >= JSF_NPART) continue;
- jsf = &jsf0; /* actually, &jsfv[i >> JSF_PART_BITS] */
- jdp = &jsf->dv[i&JSF_PART_MASK];
-
- disk->major = JSFD_MAJOR;
- disk->first_minor = i;
- sprintf(disk->disk_name, "jsfd%d", i);
- disk->fops = &jsfd_fops;
- set_capacity(disk, jdp->dsize >> 9);
- disk->private_data = jdp;
- add_disk(disk);
- set_disk_ro(disk, 1);
- }
- return 0;
-out:
- while (i--)
- put_disk(jsfd_disk[i]);
- return err;
-}
-
-MODULE_LICENSE("GPL");
-
-static int __init jsflash_init_module(void) {
- int rc;
-
- if ((rc = jsflash_init()) == 0) {
- jsfd_init();
- return 0;
- }
- return rc;
-}
-
-static void __exit jsflash_cleanup_module(void)
-{
- int i;
-
- for (i = 0; i < JSF_MAX; i++) {
- if ((i & JSF_PART_MASK) >= JSF_NPART) continue;
- del_gendisk(jsfd_disk[i]);
- blk_cleanup_queue(jsfd_disk[i]->queue);
- put_disk(jsfd_disk[i]);
- }
- if (jsf0.busy)
- printk("jsf0: cleaning busy unit\n");
- jsf0.base = 0;
- jsf0.busy = 0;
-
- misc_deregister(&jsf_dev);
- unregister_blkdev(JSFD_MAJOR, "jsfd");
-}
-
-module_init(jsflash_init_module);
-module_exit(jsflash_cleanup_module);
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e29f9b8fd66d..56c940394729 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -182,7 +182,7 @@ zalon7xx-objs := zalon.o ncr53c8xx.o
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
# Files generated that shall be removed upon make clean
-clean-files := 53c700_d.h 53c700_u.h
+clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 0156c9623c35..d62ddd63f4fe 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -724,6 +724,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
int wait;
unsigned long flags = 0;
unsigned long mflags = 0;
+ struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
+ fibptr->hw_fib_va;
fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
if (callback) {
@@ -734,11 +736,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
wait = 1;
- if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
- struct aac_hba_cmd_req *hbacmd =
- (struct aac_hba_cmd_req *)fibptr->hw_fib_va;
+ hbacmd->iu_type = command;
- hbacmd->iu_type = command;
+ if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
/* bit1 of request_id must be 0 */
hbacmd->request_id =
cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index c35f05c4c6bb..85604795d8ee 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -3882,7 +3882,7 @@ static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
u8 b, t;
unsigned long flags;
- enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
+ enum blk_eh_timer_return retval = BLK_EH_DONE;
TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
b = scp->device->channel;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 15a2fef51e38..71bdc0b52cf9 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1963,7 +1963,7 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
{
- enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
+ enum blk_eh_timer_return rc = BLK_EH_DONE;
struct iscsi_task *task = NULL, *running_task;
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
@@ -1982,7 +1982,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
* Raced with completion. Blk layer has taken ownership
* so let timeout code complete it now.
*/
- rc = BLK_EH_HANDLED;
+ rc = BLK_EH_DONE;
goto done;
}
@@ -1997,7 +1997,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
if (unlikely(system_state != SYSTEM_RUNNING)) {
sc->result = DID_NO_CONNECT << 16;
ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
- rc = BLK_EH_HANDLED;
+ rc = BLK_EH_DONE;
goto done;
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index b89c6e6c0589..ce656c466ca9 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2772,7 +2772,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
if (time_after(jiffies, scmd->jiffies_at_alloc +
(scmd_timeout * 2) * HZ)) {
- return BLK_EH_NOT_HANDLED;
+ return BLK_EH_DONE;
}
instance = (struct megasas_instance *)scmd->device->host->hostdata;
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index fe97401ad192..afd27165cd93 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2155,7 +2155,7 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
mvumi_return_cmd(mhba, cmd);
spin_unlock_irqrestore(mhba->shost->host_lock, flags);
- return BLK_EH_NOT_HANDLED;
+ return BLK_EH_DONE;
}
static int
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index e18877177f1b..5a33e1ad9881 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -99,7 +99,7 @@ static int _osd_get_print_system_info(struct osd_dev *od,
int nelem = ARRAY_SIZE(get_attrs), a = 0;
int ret;
- or = osd_start_request(od, GFP_KERNEL);
+ or = osd_start_request(od);
if (!or)
return -ENOMEM;
@@ -409,16 +409,15 @@ static void _osd_request_free(struct osd_request *or)
kfree(or);
}
-struct osd_request *osd_start_request(struct osd_dev *dev, gfp_t gfp)
+struct osd_request *osd_start_request(struct osd_dev *dev)
{
struct osd_request *or;
- or = _osd_request_alloc(gfp);
+ or = _osd_request_alloc(GFP_KERNEL);
if (!or)
return NULL;
or->osd_dev = dev;
- or->alloc_flags = gfp;
or->timeout = dev->def_timeout;
or->retries = OSD_REQ_RETRIES;
@@ -546,7 +545,7 @@ static int _osd_realloc_seg(struct osd_request *or,
if (seg->alloc_size >= max_bytes)
return 0;
- buff = krealloc(seg->buff, max_bytes, or->alloc_flags);
+ buff = krealloc(seg->buff, max_bytes, GFP_KERNEL);
if (!buff) {
OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes,
seg->alloc_size);
@@ -728,7 +727,7 @@ static int _osd_req_list_objects(struct osd_request *or,
_osd_req_encode_olist(or, list);
WARN_ON(or->in.bio);
- bio = bio_map_kern(q, list, len, or->alloc_flags);
+ bio = bio_map_kern(q, list, len, GFP_KERNEL);
if (IS_ERR(bio)) {
OSD_ERR("!!! Failed to allocate list_objects BIO\n");
return PTR_ERR(bio);
@@ -1190,14 +1189,14 @@ static int _req_append_segment(struct osd_request *or,
pad_buff = io->pad_buff;
ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
- or->alloc_flags);
+ GFP_KERNEL);
if (ret)
return ret;
io->total_bytes += padding;
}
ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
- or->alloc_flags);
+ GFP_KERNEL);
if (ret)
return ret;
@@ -1564,14 +1563,14 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
* osd_finalize_request and helpers
*/
static struct request *_make_request(struct request_queue *q, bool has_write,
- struct _osd_io_info *oii, gfp_t flags)
+ struct _osd_io_info *oii)
{
struct request *req;
struct bio *bio = oii->bio;
int ret;
req = blk_get_request(q, has_write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
- flags);
+ 0);
if (IS_ERR(req))
return req;
@@ -1589,13 +1588,12 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
static int _init_blk_request(struct osd_request *or,
bool has_in, bool has_out)
{
- gfp_t flags = or->alloc_flags;
struct scsi_device *scsi_device = or->osd_dev->scsi_device;
struct request_queue *q = scsi_device->request_queue;
struct request *req;
int ret;
- req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
+ req = _make_request(q, has_out, has_out ? &or->out : &or->in);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto out;
@@ -1611,7 +1609,7 @@ static int _init_blk_request(struct osd_request *or,
or->out.req = req;
if (has_in) {
/* allocate bidi request */
- req = _make_request(q, false, &or->in, flags);
+ req = _make_request(q, false, &or->in);
if (IS_ERR(req)) {
OSD_DEBUG("blk_get_request for bidi failed\n");
ret = PTR_ERR(req);
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 20ec1c01dbd5..2bbe797f8c3d 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -368,7 +368,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
int write = (data_direction == DMA_TO_DEVICE);
req = blk_get_request(SRpnt->stp->device->request_queue,
- write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
+ write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
if (IS_ERR(req))
return DRIVER_ERROR << 24;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 94c14ce94da2..0e13349dce57 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1848,7 +1848,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
struct iscsi_cls_session *session;
struct iscsi_session *sess;
unsigned long flags;
- enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
+ enum blk_eh_timer_return ret = BLK_EH_DONE;
session = starget_to_session(scsi_target(sc->device));
sess = session->dd_data;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 946039117bf4..9c02ba2e7ef3 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -282,7 +282,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
enum blk_eh_timer_return scsi_times_out(struct request *req)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
- enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
+ enum blk_eh_timer_return rtn = BLK_EH_DONE;
struct Scsi_Host *host = scmd->device->host;
trace_scsi_dispatch_cmd_timeout(scmd);
@@ -294,7 +294,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
if (host->hostt->eh_timed_out)
rtn = host->hostt->eh_timed_out(scmd);
- if (rtn == BLK_EH_NOT_HANDLED) {
+ if (rtn == BLK_EH_DONE) {
if (scsi_abort_command(scmd) != SUCCESS) {
set_host_byte(scmd, DID_TIME_OUT);
scsi_eh_scmd_add(scmd);
@@ -1933,11 +1933,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
struct request *req;
struct scsi_request *rq;
- /*
- * blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
- * request becomes available
- */
- req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, GFP_KERNEL);
+ req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0);
if (IS_ERR(req))
return;
rq = scsi_req(req);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e9b4f279d29c..f125fd71c0f2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -265,7 +265,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
struct scsi_request *rq;
int ret = DRIVER_ERROR << 24;
- req = blk_get_request_flags(sdev->request_queue,
+ req = blk_get_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
if (IS_ERR(req))
@@ -273,7 +273,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
rq = scsi_req(req);
if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
- buffer, bufflen, __GFP_RECLAIM))
+ buffer, bufflen, GFP_NOIO))
goto out;
rq->cmd_len = COMMAND_SIZE(cmd[0]);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index be3be0f9cb2d..1da3d71e9f61 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2087,7 +2087,7 @@ fc_eh_timed_out(struct scsi_cmnd *scmd)
if (rport->port_state == FC_PORTSTATE_BLOCKED)
return BLK_EH_RESET_TIMER;
- return BLK_EH_NOT_HANDLED;
+ return BLK_EH_DONE;
}
EXPORT_SYMBOL(fc_eh_timed_out);
@@ -3591,10 +3591,9 @@ fc_bsg_job_timeout(struct request *req)
}
/* the blk_end_sync_io() doesn't check the error */
- if (!inflight)
- return BLK_EH_NOT_HANDLED;
- else
- return BLK_EH_HANDLED;
+ if (inflight)
+ blk_mq_complete_request(req);
+ return BLK_EH_DONE;
}
/**
@@ -3781,8 +3780,7 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
snprintf(bsg_name, sizeof(bsg_name),
"fc_host%d", shost->host_no);
- q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size,
- NULL);
+ q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size);
if (IS_ERR(q)) {
dev_err(dev,
"fc_host%d: bsg interface failed to initialize - setup queue\n",
@@ -3827,8 +3825,8 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
if (!i->f->bsg_request)
return -ENOTSUPP;
- q = bsg_setup_queue(dev, NULL, fc_bsg_dispatch, i->f->dd_bsg_size,
- NULL);
+ q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch,
+ i->f->dd_bsg_size);
if (IS_ERR(q)) {
dev_err(dev, "failed to setup bsg queue\n");
return PTR_ERR(q);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 65f6c94f2e9b..6fd2fe210fc3 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1542,7 +1542,7 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
return -ENOTSUPP;
snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
- q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0, NULL);
+ q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0);
if (IS_ERR(q)) {
shost_printk(KERN_ERR, shost, "bsg interface failed to "
"initialize - no request queue\n");
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 08acbabfae07..e2953b416746 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -187,16 +187,6 @@ static int sas_smp_dispatch(struct bsg_job *job)
return 0;
}
-static void sas_host_release(struct device *dev)
-{
- struct Scsi_Host *shost = dev_to_shost(dev);
- struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
- struct request_queue *q = sas_host->q;
-
- if (q)
- blk_cleanup_queue(q);
-}
-
static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
{
struct request_queue *q;
@@ -208,7 +198,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
if (rphy) {
q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev),
- sas_smp_dispatch, 0, NULL);
+ sas_smp_dispatch, 0);
if (IS_ERR(q))
return PTR_ERR(q);
rphy->q = q;
@@ -217,7 +207,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
snprintf(name, sizeof(name), "sas_host%d", shost->host_no);
q = bsg_setup_queue(&shost->shost_gendev, name,
- sas_smp_dispatch, 0, sas_host_release);
+ sas_smp_dispatch, 0);
if (IS_ERR(q))
return PTR_ERR(q);
to_sas_host_attrs(shost)->q = q;
@@ -260,8 +250,11 @@ static int sas_host_remove(struct transport_container *tc, struct device *dev,
struct Scsi_Host *shost = dev_to_shost(dev);
struct request_queue *q = to_sas_host_attrs(shost)->q;
- if (q)
+ if (q) {
bsg_unregister_queue(q);
+ blk_cleanup_queue(q);
+ }
+
return 0;
}
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 36f6190931bc..4e46fdb2d7c9 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -51,6 +51,8 @@ struct srp_internal {
struct transport_container rport_attr_cont;
};
+static int scsi_is_srp_rport(const struct device *dev);
+
#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
@@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
return dev_to_shost(r->dev.parent);
}
+static int find_child_rport(struct device *dev, void *data)
+{
+ struct device **child = data;
+
+ if (scsi_is_srp_rport(dev)) {
+ WARN_ON_ONCE(*child);
+ *child = dev;
+ }
+ return 0;
+}
+
static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
{
- return transport_class_to_srp_rport(&shost->shost_gendev);
+ struct device *child = NULL;
+
+ WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
+ find_child_rport) < 0);
+ return child ? dev_to_rport(child) : NULL;
}
/**
@@ -587,7 +604,7 @@ EXPORT_SYMBOL(srp_reconnect_rport);
*
* If a timeout occurs while an rport is in the blocked state, ask the SCSI
* EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
- * handle the timeout (BLK_EH_NOT_HANDLED).
+ * handle the timeout (BLK_EH_DONE).
*
* Note: This function is called from soft-IRQ context and with the request
* queue lock held.
@@ -600,9 +617,10 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
struct srp_rport *rport = shost_to_rport(shost);
pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
- return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
+ return rport && rport->fast_io_fail_tmo < 0 &&
+ rport->dev_loss_tmo < 0 &&
i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
- BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
+ BLK_EH_RESET_TIMER : BLK_EH_DONE;
}
EXPORT_SYMBOL(srp_timed_out);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8ff687158704..6fc58e2c99d3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1714,7 +1714,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
* does not sleep except under memory pressure.
*/
rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
@@ -1893,7 +1893,7 @@ retry:
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
- schp->pages[k] = alloc_pages(gfp_mask, order);
+ schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
if (!schp->pages[k])
goto out;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 2a21f2d48592..35fab1e18adc 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -188,9 +188,13 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
struct scsi_device *SDev;
struct scsi_sense_hdr sshdr;
int result, err = 0, retries = 0;
+ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL;
SDev = cd->device;
+ if (cgc->sense)
+ senseptr = sense_buffer;
+
retry:
if (!scsi_block_when_processing_errors(SDev)) {
err = -ENODEV;
@@ -198,10 +202,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
}
result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
- cgc->buffer, cgc->buflen,
- (unsigned char *)cgc->sense, &sshdr,
+ cgc->buffer, cgc->buflen, senseptr, &sshdr,
cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
+ if (cgc->sense)
+ memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense));
+
/* Minimal error checking. Ignore cases we know about, and report the rest. */
if (driver_byte(result) != 0) {
switch (sshdr.sense_key) {
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 6c399480783d..a427ce9497be 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -545,7 +545,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
req = blk_get_request(SRpnt->stp->device->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
if (IS_ERR(req))
return DRIVER_ERROR << 24;
rq = scsi_req(req);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 00e79057f870..d0a1674915a1 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6497,12 +6497,12 @@ static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
bool found = false;
if (!scmd || !scmd->device || !scmd->device->host)
- return BLK_EH_NOT_HANDLED;
+ return BLK_EH_DONE;
host = scmd->device->host;
hba = shost_priv(host);
if (!hba)
- return BLK_EH_NOT_HANDLED;
+ return BLK_EH_DONE;
spin_lock_irqsave(host->host_lock, flags);
@@ -6520,7 +6520,7 @@ static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
* SCSI command was not actually dispatched to UFS driver, otherwise
* let SCSI layer handle the error as usual.
*/
- return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
+ return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
}
static const struct attribute_group *ufshcd_driver_groups[] = {
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c374e3b5c678..777e5f1e52d1 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -609,7 +609,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
break;
case BTSTAT_ABORTQUEUE:
- cmd->result = (DID_ABORT << 16);
+ cmd->result = (DID_BUS_BUSY << 16);
break;
case BTSTAT_SCSIPARITY:
diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c
index 8d8659463b3e..feeb17cebc25 100644
--- a/drivers/soc/lantiq/gphy.c
+++ b/drivers/soc/lantiq/gphy.c
@@ -30,7 +30,6 @@ struct xway_gphy_priv {
struct clk *gphy_clk_gate;
struct reset_control *gphy_reset;
struct reset_control *gphy_reset2;
- struct notifier_block gphy_reboot_nb;
void __iomem *membase;
char *fw_name;
};
@@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = {
};
MODULE_DEVICE_TABLE(of, xway_gphy_match);
-static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb)
-{
- return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb);
-}
-
-static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb,
- unsigned long code, void *unused)
-{
- struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb);
-
- if (priv) {
- reset_control_assert(priv->gphy_reset);
- reset_control_assert(priv->gphy_reset2);
- }
-
- return NOTIFY_DONE;
-}
-
static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv,
dma_addr_t *dev_addr)
{
@@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev)
reset_control_deassert(priv->gphy_reset);
reset_control_deassert(priv->gphy_reset2);
- /* assert the gphy reset because it can hang after a reboot: */
- priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify;
- priv->gphy_reboot_nb.priority = -1;
-
- ret = register_reboot_notifier(&priv->gphy_reboot_nb);
- if (ret)
- dev_warn(dev, "Failed to register reboot notifier\n");
-
platform_set_drvdata(pdev, priv);
return ret;
@@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev)
static int xway_gphy_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct xway_gphy_priv *priv = platform_get_drvdata(pdev);
- int ret;
-
- reset_control_assert(priv->gphy_reset);
- reset_control_assert(priv->gphy_reset2);
iowrite32be(0, priv->membase);
clk_disable_unprepare(priv->gphy_clk_gate);
- ret = unregister_reboot_notifier(&priv->gphy_reboot_nb);
- if (ret)
- dev_warn(dev, "Failed to unregister reboot notifier\n");
-
return 0;
}
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 1596d35498c5..6573152ce893 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
{
- if (!has_bspi(qspi) || (qspi->bspi_enabled))
+ if (!has_bspi(qspi))
return;
qspi->bspi_enabled = 1;
@@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
{
- if (!has_bspi(qspi) || (!qspi->bspi_enabled))
+ if (!has_bspi(qspi))
return;
qspi->bspi_enabled = 0;
@@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
{
- u32 data = 0;
+ u32 rd = 0;
+ u32 wr = 0;
- if (qspi->curr_cs == cs)
- return;
if (qspi->base[CHIP_SELECT]) {
- data = bcm_qspi_read(qspi, CHIP_SELECT, 0);
- data = (data & ~0xff) | (1 << cs);
- bcm_qspi_write(qspi, CHIP_SELECT, 0, data);
+ rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
+ wr = (rd & ~0xff) | (1 << cs);
+ if (rd == wr)
+ return;
+ bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
usleep_range(10, 20);
}
+
+ dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
qspi->curr_cs = cs;
}
@@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
}
mspi_cdram = MSPI_CDRAM_CONT_BIT;
- mspi_cdram |= (~(1 << spi->chip_select) &
- MSPI_CDRAM_PCS);
+
+ if (has_bspi(qspi))
+ mspi_cdram &= ~1;
+ else
+ mspi_cdram |= (~(1 << spi->chip_select) &
+ MSPI_CDRAM_PCS);
+
mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
MSPI_CDRAM_BITSE_BIT);
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 1431cb98fe40..3094d818cf06 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -184,6 +184,11 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
irqreturn_t ret = IRQ_NONE;
+ /* IRQ may be shared, so return if our interrupts are disabled */
+ if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
+ (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
+ return ret;
+
/* check if we have data to read */
while (bs->rx_len &&
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 5c9516ae4942..4a001634023e 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -313,6 +313,14 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
(xspi->tx_bytes > 0)) {
+
+ /* When xspi in busy condition, bytes may send failed,
+ * then spi control did't work thoroughly, add one byte delay
+ */
+ if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
+ CDNS_SPI_IXR_TXFULL)
+ usleep_range(10, 20);
+
if (xspi->txbuf)
cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
else
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 6f57592a7f95..a056ee88a960 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1701,7 +1701,7 @@ static struct platform_driver spi_imx_driver = {
};
module_platform_driver(spi_imx_driver);
-MODULE_DESCRIPTION("SPI Master Controller driver");
+MODULE_DESCRIPTION("SPI Controller driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 513ec6c6e25b..0ae7defd3492 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -38,7 +38,7 @@ struct driver_data {
/* SSP register addresses */
void __iomem *ioaddr;
- u32 ssdr_physical;
+ phys_addr_t ssdr_physical;
/* SSP masks*/
u32 dma_cr1;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index ae086aab57d5..8171eedbfc90 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -283,6 +283,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
}
k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
+ brps = min_t(int, brps, 32);
scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
sh_msiof_write(p, TSCR, scr);
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 9371651d8017..c574dd210500 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -117,7 +117,7 @@ config SSB_SERIAL
config SSB_DRIVER_PCICORE_POSSIBLE
bool
- depends on SSB_PCIHOST && SSB = y
+ depends on SSB_PCIHOST
default y
config SSB_DRIVER_PCICORE
@@ -131,7 +131,7 @@ config SSB_DRIVER_PCICORE
config SSB_PCICORE_HOSTMODE
bool "Hostmode support for SSB PCI core"
- depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS
+ depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && SSB = y
help
PCIcore hostmode operation (external PCI bus).
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 60429011292a..ce1321a5cb7b 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -94,8 +94,8 @@ static int iblock_configure_device(struct se_device *dev)
return -EINVAL;
}
- ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
- if (!ib_dev->ibd_bio_set) {
+ ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ if (ret) {
pr_err("IBLOCK: Unable to create bioset\n");
goto out;
}
@@ -141,7 +141,7 @@ static int iblock_configure_device(struct se_device *dev)
bi = bdev_get_integrity(bd);
if (bi) {
- struct bio_set *bs = ib_dev->ibd_bio_set;
+ struct bio_set *bs = &ib_dev->ibd_bio_set;
if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
!strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
@@ -164,7 +164,7 @@ static int iblock_configure_device(struct se_device *dev)
goto out_blkdev_put;
}
pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
- bs->bio_integrity_pool);
+ &bs->bio_integrity_pool);
}
dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
}
@@ -174,8 +174,7 @@ static int iblock_configure_device(struct se_device *dev)
out_blkdev_put:
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
out_free_bioset:
- bioset_free(ib_dev->ibd_bio_set);
- ib_dev->ibd_bio_set = NULL;
+ bioset_exit(&ib_dev->ibd_bio_set);
out:
return ret;
}
@@ -199,8 +198,7 @@ static void iblock_destroy_device(struct se_device *dev)
if (ib_dev->ibd_bd != NULL)
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
- if (ib_dev->ibd_bio_set != NULL)
- bioset_free(ib_dev->ibd_bio_set);
+ bioset_exit(&ib_dev->ibd_bio_set);
}
static unsigned long long iblock_emulate_read_cap_with_block_size(
@@ -332,7 +330,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
if (sg_num > BIO_MAX_PAGES)
sg_num = BIO_MAX_PAGES;
- bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+ bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
return NULL;
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index b4aeb2584ad4..9cc3843404d4 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -22,7 +22,7 @@ struct iblock_dev {
struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
u32 ibd_flags;
- struct bio_set *ibd_bio_set;
+ struct bio_set ibd_bio_set;
struct block_device *ibd_bd;
bool ibd_readonly;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 6cb933ecc084..668934ea74cb 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -986,8 +986,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req = blk_get_request(pdv->pdv_sd->request_queue,
cmd->data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
- GFP_KERNEL);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
if (IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed\n");
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 4ad89ea71a70..4f26bdc3d1dc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -2121,6 +2121,8 @@ static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
if (val >= 0) {
udev->qfull_time_out = val * MSEC_PER_SEC;
+ } else if (val == -1) {
+ udev->qfull_time_out = val;
} else {
printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
return -EINVAL;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 0124a91c8d71..dd46b758852a 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -238,6 +238,17 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
if (IS_ERR(shm))
return PTR_ERR(shm);
+ /*
+ * Ensure offset + size does not overflow offset
+ * and does not overflow the size of the referred
+ * shared memory object.
+ */
+ if ((ip.a + ip.b) < ip.a ||
+ (ip.a + ip.b) > shm->size) {
+ tee_shm_put(shm);
+ return -EINVAL;
+ }
+
params[n].u.memref.shm_offs = ip.a;
params[n].u.memref.size = ip.b;
params[n].u.memref.shm = shm;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 556960a1bab3..07d3be6f0780 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -360,9 +360,10 @@ int tee_shm_get_fd(struct tee_shm *shm)
if (!(shm->flags & TEE_SHM_DMA_BUF))
return -EINVAL;
+ get_dma_buf(shm->dmabuf);
fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
- if (fd >= 0)
- get_dma_buf(shm->dmabuf);
+ if (fd < 0)
+ dma_buf_put(shm->dmabuf);
return fd;
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 2d2ceda9aa26..500911f16498 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1255,7 +1255,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
/* Map empty entries to null UUID */
uuid[0] = 0;
uuid[1] = 0;
- } else {
+ } else if (uuid[0] != 0 || uuid[1] != 0) {
/* Upper two DWs are always one's */
uuid[2] = 0xffffffff;
uuid[3] = 0xffffffff;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 72ebbc908e19..32cd52ca8318 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -354,7 +354,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
slot_id = 0;
for (i = 0; i < MAX_HC_SLOTS; i++) {
- if (!xhci->devs[i])
+ if (!xhci->devs[i] || !xhci->devs[i]->udev)
continue;
speed = xhci->devs[i]->udev->speed;
if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index e7f99d55922a..15a42cee0a9c 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2524,8 +2524,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
u8 devctl;
+ int ret;
- musb_port_suspend(musb, true);
+ ret = musb_port_suspend(musb, true);
+ if (ret)
+ return ret;
if (!is_host_active(musb))
return 0;
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 72392bbcd0a4..2999845632ce 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -67,7 +67,7 @@ extern void musb_host_rx(struct musb *, u8);
extern void musb_root_disconnect(struct musb *musb);
extern void musb_host_resume_root_hub(struct musb *musb);
extern void musb_host_poke_root_hub(struct musb *musb);
-extern void musb_port_suspend(struct musb *musb, bool do_suspend);
+extern int musb_port_suspend(struct musb *musb, bool do_suspend);
extern void musb_port_reset(struct musb *musb, bool do_reset);
extern void musb_host_finish_resume(struct work_struct *work);
#else
@@ -99,7 +99,10 @@ static inline void musb_root_disconnect(struct musb *musb) {}
static inline void musb_host_resume_root_hub(struct musb *musb) {}
static inline void musb_host_poll_rh_status(struct musb *musb) {}
static inline void musb_host_poke_root_hub(struct musb *musb) {}
-static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {}
+static inline int musb_port_suspend(struct musb *musb, bool do_suspend)
+{
+ return 0;
+}
static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
static inline void musb_host_finish_resume(struct work_struct *work) {}
#endif
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 5165d2b07ade..2f8dd9826e94 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -48,14 +48,14 @@ void musb_host_finish_resume(struct work_struct *work)
spin_unlock_irqrestore(&musb->lock, flags);
}
-void musb_port_suspend(struct musb *musb, bool do_suspend)
+int musb_port_suspend(struct musb *musb, bool do_suspend)
{
struct usb_otg *otg = musb->xceiv->otg;
u8 power;
void __iomem *mbase = musb->mregs;
if (!is_host_active(musb))
- return;
+ return 0;
/* NOTE: this doesn't necessarily put PHY into low power mode,
* turning off its clock; that's a function of PHY integration and
@@ -66,16 +66,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
if (do_suspend) {
int retries = 10000;
- power &= ~MUSB_POWER_RESUME;
- power |= MUSB_POWER_SUSPENDM;
- musb_writeb(mbase, MUSB_POWER, power);
+ if (power & MUSB_POWER_RESUME)
+ return -EBUSY;
- /* Needed for OPT A tests */
- power = musb_readb(mbase, MUSB_POWER);
- while (power & MUSB_POWER_SUSPENDM) {
+ if (!(power & MUSB_POWER_SUSPENDM)) {
+ power |= MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER, power);
+
+ /* Needed for OPT A tests */
power = musb_readb(mbase, MUSB_POWER);
- if (retries-- < 1)
- break;
+ while (power & MUSB_POWER_SUSPENDM) {
+ power = musb_readb(mbase, MUSB_POWER);
+ if (retries-- < 1)
+ break;
+ }
}
musb_dbg(musb, "Root port suspended, power %02x", power);
@@ -111,6 +115,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
schedule_delayed_work(&musb->finish_resume_work,
msecs_to_jiffies(USB_RESUME_TIMEOUT));
}
+ return 0;
}
void musb_port_reset(struct musb *musb, bool do_reset)
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 14a72357800a..35618ceb2791 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -73,6 +73,7 @@ struct bus_id_priv {
struct stub_device *sdev;
struct usb_device *udev;
char shutdown_busid;
+ spinlock_t busid_lock;
};
/* stub_priv is allocated from stub_priv_cache */
@@ -83,6 +84,7 @@ extern struct usb_device_driver stub_driver;
/* stub_main.c */
struct bus_id_priv *get_busid_priv(const char *busid);
+void put_busid_priv(struct bus_id_priv *bid);
int del_match_busid(char *busid);
void stub_device_cleanup_urbs(struct stub_device *sdev);
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index dd8ef36ab10e..c0d6ff1baa72 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -300,9 +300,9 @@ static int stub_probe(struct usb_device *udev)
struct stub_device *sdev = NULL;
const char *udev_busid = dev_name(&udev->dev);
struct bus_id_priv *busid_priv;
- int rc;
+ int rc = 0;
- dev_dbg(&udev->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter probe\n");
/* check we should claim or not by busid_table */
busid_priv = get_busid_priv(udev_busid);
@@ -317,13 +317,15 @@ static int stub_probe(struct usb_device *udev)
* other matched drivers by the driver core.
* See driver_probe_device() in driver/base/dd.c
*/
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
udev_busid);
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
@@ -331,13 +333,16 @@ static int stub_probe(struct usb_device *udev)
"%s is attached on vhci_hcd... skip!\n",
udev_busid);
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
/* ok, this is my device */
sdev = stub_device_alloc(udev);
- if (!sdev)
- return -ENOMEM;
+ if (!sdev) {
+ rc = -ENOMEM;
+ goto call_put_busid_priv;
+ }
dev_info(&udev->dev,
"usbip-host: register new device (bus %u dev %u)\n",
@@ -369,7 +374,9 @@ static int stub_probe(struct usb_device *udev)
}
busid_priv->status = STUB_BUSID_ALLOC;
- return 0;
+ rc = 0;
+ goto call_put_busid_priv;
+
err_files:
usb_hub_release_port(udev->parent, udev->portnum,
(struct usb_dev_state *) udev);
@@ -379,6 +386,9 @@ err_port:
busid_priv->sdev = NULL;
stub_device_free(sdev);
+
+call_put_busid_priv:
+ put_busid_priv(busid_priv);
return rc;
}
@@ -404,7 +414,7 @@ static void stub_disconnect(struct usb_device *udev)
struct bus_id_priv *busid_priv;
int rc;
- dev_dbg(&udev->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter disconnect\n");
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv) {
@@ -417,7 +427,7 @@ static void stub_disconnect(struct usb_device *udev)
/* get stub_device */
if (!sdev) {
dev_err(&udev->dev, "could not get device");
- return;
+ goto call_put_busid_priv;
}
dev_set_drvdata(&udev->dev, NULL);
@@ -432,12 +442,12 @@ static void stub_disconnect(struct usb_device *udev)
(struct usb_dev_state *) udev);
if (rc) {
dev_dbg(&udev->dev, "unable to release port\n");
- return;
+ goto call_put_busid_priv;
}
/* If usb reset is called from event handler */
if (usbip_in_eh(current))
- return;
+ goto call_put_busid_priv;
/* shutdown the current connection */
shutdown_busid(busid_priv);
@@ -448,12 +458,11 @@ static void stub_disconnect(struct usb_device *udev)
busid_priv->sdev = NULL;
stub_device_free(sdev);
- if (busid_priv->status == STUB_BUSID_ALLOC) {
+ if (busid_priv->status == STUB_BUSID_ALLOC)
busid_priv->status = STUB_BUSID_ADDED;
- } else {
- busid_priv->status = STUB_BUSID_OTHER;
- del_match_busid((char *)udev_busid);
- }
+
+call_put_busid_priv:
+ put_busid_priv(busid_priv);
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index d41d0cdeec0f..bf8a5feb0ee9 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -14,6 +14,7 @@
#define DRIVER_DESC "USB/IP Host Driver"
struct kmem_cache *stub_priv_cache;
+
/*
* busid_tables defines matching busids that usbip can grab. A user can change
* dynamically what device is locally used and what device is exported to a
@@ -25,6 +26,8 @@ static spinlock_t busid_table_lock;
static void init_busid_table(void)
{
+ int i;
+
/*
* This also sets the bus_table[i].status to
* STUB_BUSID_OTHER, which is 0.
@@ -32,6 +35,9 @@ static void init_busid_table(void)
memset(busid_table, 0, sizeof(busid_table));
spin_lock_init(&busid_table_lock);
+
+ for (i = 0; i < MAX_BUSID; i++)
+ spin_lock_init(&busid_table[i].busid_lock);
}
/*
@@ -43,15 +49,20 @@ static int get_busid_idx(const char *busid)
int i;
int idx = -1;
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (busid_table[i].name[0])
if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
idx = i;
+ spin_unlock(&busid_table[i].busid_lock);
break;
}
+ spin_unlock(&busid_table[i].busid_lock);
+ }
return idx;
}
+/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
struct bus_id_priv *get_busid_priv(const char *busid)
{
int idx;
@@ -59,13 +70,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
spin_lock(&busid_table_lock);
idx = get_busid_idx(busid);
- if (idx >= 0)
+ if (idx >= 0) {
bid = &(busid_table[idx]);
+ /* get busid_lock before returning */
+ spin_lock(&bid->busid_lock);
+ }
spin_unlock(&busid_table_lock);
return bid;
}
+void put_busid_priv(struct bus_id_priv *bid)
+{
+ if (bid)
+ spin_unlock(&bid->busid_lock);
+}
+
static int add_match_busid(char *busid)
{
int i;
@@ -78,15 +98,19 @@ static int add_match_busid(char *busid)
goto out;
}
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (!busid_table[i].name[0]) {
strlcpy(busid_table[i].name, busid, BUSID_SIZE);
if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
(busid_table[i].status != STUB_BUSID_REMOV))
busid_table[i].status = STUB_BUSID_ADDED;
ret = 0;
+ spin_unlock(&busid_table[i].busid_lock);
break;
}
+ spin_unlock(&busid_table[i].busid_lock);
+ }
out:
spin_unlock(&busid_table_lock);
@@ -107,6 +131,8 @@ int del_match_busid(char *busid)
/* found */
ret = 0;
+ spin_lock(&busid_table[idx].busid_lock);
+
if (busid_table[idx].status == STUB_BUSID_OTHER)
memset(busid_table[idx].name, 0, BUSID_SIZE);
@@ -114,6 +140,7 @@ int del_match_busid(char *busid)
(busid_table[idx].status != STUB_BUSID_ADDED))
busid_table[idx].status = STUB_BUSID_REMOV;
+ spin_unlock(&busid_table[idx].busid_lock);
out:
spin_unlock(&busid_table_lock);
@@ -126,9 +153,12 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf)
char *out = buf;
spin_lock(&busid_table_lock);
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (busid_table[i].name[0])
out += sprintf(out, "%s ", busid_table[i].name);
+ spin_unlock(&busid_table[i].busid_lock);
+ }
spin_unlock(&busid_table_lock);
out += sprintf(out, "\n");
@@ -169,6 +199,51 @@ static ssize_t match_busid_store(struct device_driver *dev, const char *buf,
}
static DRIVER_ATTR_RW(match_busid);
+static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
+{
+ int ret;
+
+ /* device_attach() callers should hold parent lock for USB */
+ if (busid_priv->udev->dev.parent)
+ device_lock(busid_priv->udev->dev.parent);
+ ret = device_attach(&busid_priv->udev->dev);
+ if (busid_priv->udev->dev.parent)
+ device_unlock(busid_priv->udev->dev.parent);
+ if (ret < 0) {
+ dev_err(&busid_priv->udev->dev, "rebind failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void stub_device_rebind(void)
+{
+#if IS_MODULE(CONFIG_USBIP_HOST)
+ struct bus_id_priv *busid_priv;
+ int i;
+
+ /* update status to STUB_BUSID_OTHER so probe ignores the device */
+ spin_lock(&busid_table_lock);
+ for (i = 0; i < MAX_BUSID; i++) {
+ if (busid_table[i].name[0] &&
+ busid_table[i].shutdown_busid) {
+ busid_priv = &(busid_table[i]);
+ busid_priv->status = STUB_BUSID_OTHER;
+ }
+ }
+ spin_unlock(&busid_table_lock);
+
+ /* now run rebind - no need to hold locks. driver files are removed */
+ for (i = 0; i < MAX_BUSID; i++) {
+ if (busid_table[i].name[0] &&
+ busid_table[i].shutdown_busid) {
+ busid_priv = &(busid_table[i]);
+ do_rebind(busid_table[i].name, busid_priv);
+ }
+ }
+#endif
+}
+
static ssize_t rebind_store(struct device_driver *dev, const char *buf,
size_t count)
{
@@ -186,16 +261,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
if (!bid)
return -ENODEV;
- /* device_attach() callers should hold parent lock for USB */
- if (bid->udev->dev.parent)
- device_lock(bid->udev->dev.parent);
- ret = device_attach(&bid->udev->dev);
- if (bid->udev->dev.parent)
- device_unlock(bid->udev->dev.parent);
- if (ret < 0) {
- dev_err(&bid->udev->dev, "rebind failed\n");
+ /* mark the device for deletion so probe ignores it during rescan */
+ bid->status = STUB_BUSID_OTHER;
+ /* release the busid lock */
+ put_busid_priv(bid);
+
+ ret = do_rebind((char *) buf, bid);
+ if (ret < 0)
return ret;
- }
+
+ /* delete device from busid_table */
+ del_match_busid((char *) buf);
return count;
}
@@ -317,6 +393,9 @@ static void __exit usbip_host_exit(void)
*/
usb_deregister_device_driver(&stub_driver);
+ /* initiate scan to attach devices */
+ stub_device_rebind();
+
kmem_cache_destroy(stub_priv_cache);
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 5c212bf29640..3c082451ab1a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -404,6 +404,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
{
unsigned long pfn = 0;
long ret, pinned = 0, lock_acct = 0;
+ bool rsvd;
dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
/* This code path is only user initiated */
@@ -414,23 +415,14 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
if (ret)
return ret;
- if (is_invalid_reserved_pfn(*pfn_base)) {
- struct vm_area_struct *vma;
-
- down_read(&current->mm->mmap_sem);
- vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
- pinned = min_t(long, npage, vma_pages(vma));
- up_read(&current->mm->mmap_sem);
- return pinned;
- }
-
pinned++;
+ rsvd = is_invalid_reserved_pfn(*pfn_base);
/*
* Reserved pages aren't counted against the user, externally pinned
* pages are already counted against the user.
*/
- if (!vfio_find_vpfn(dma, iova)) {
+ if (!rsvd && !vfio_find_vpfn(dma, iova)) {
if (!lock_cap && current->mm->locked_vm + 1 > limit) {
put_pfn(*pfn_base, dma->prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
@@ -450,12 +442,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
if (ret)
break;
- if (pfn != *pfn_base + pinned) {
+ if (pfn != *pfn_base + pinned ||
+ rsvd != is_invalid_reserved_pfn(pfn)) {
put_pfn(pfn, dma->prot);
break;
}
- if (!vfio_find_vpfn(dma, iova)) {
+ if (!rsvd && !vfio_find_vpfn(dma, iova)) {
if (!lock_cap &&
current->mm->locked_vm + lock_acct + 1 > limit) {
put_pfn(pfn, dma->prot);
@@ -473,8 +466,10 @@ out:
unpin_out:
if (ret) {
- for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
- put_pfn(pfn, dma->prot);
+ if (!rsvd) {
+ for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
+ put_pfn(pfn, dma->prot);
+ }
return ret;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 986058a57917..eeaf6739215f 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -105,7 +105,9 @@ struct vhost_net_virtqueue {
/* vhost zerocopy support fields below: */
/* last used idx for outstanding DMA zerocopy buffers */
int upend_idx;
- /* first used idx for DMA done zerocopy buffers */
+ /* For TX, first used idx for DMA done zerocopy buffers
+ * For RX, number of batched heads
+ */
int done_idx;
/* an array of userspace buffers info */
struct ubuf_info *ubuf_info;
@@ -626,6 +628,18 @@ static int sk_has_rx_data(struct sock *sk)
return skb_queue_empty(&sk->sk_receive_queue);
}
+static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_dev *dev = vq->dev;
+
+ if (!nvq->done_idx)
+ return;
+
+ vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
+ nvq->done_idx = 0;
+}
+
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
{
struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
@@ -635,6 +649,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
int len = peek_head_len(rvq, sk);
if (!len && vq->busyloop_timeout) {
+ /* Flush batched heads first */
+ vhost_rx_signal_used(rvq);
/* Both tx vq and rx socket were polled here */
mutex_lock_nested(&vq->mutex, 1);
vhost_disable_notify(&net->dev, vq);
@@ -762,7 +778,7 @@ static void handle_rx(struct vhost_net *net)
};
size_t total_len = 0;
int err, mergeable;
- s16 headcount, nheads = 0;
+ s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
struct socket *sock;
@@ -790,8 +806,8 @@ static void handle_rx(struct vhost_net *net)
while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
- headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len,
- &in, vq_log, &log,
+ headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
+ vhost_len, &in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
@@ -862,12 +878,9 @@ static void handle_rx(struct vhost_net *net)
vhost_discard_vq_desc(vq, headcount);
goto out;
}
- nheads += headcount;
- if (nheads > VHOST_RX_BATCH) {
- vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
- nheads);
- nheads = 0;
- }
+ nvq->done_idx += headcount;
+ if (nvq->done_idx > VHOST_RX_BATCH)
+ vhost_rx_signal_used(nvq);
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len;
@@ -878,9 +891,7 @@ static void handle_rx(struct vhost_net *net)
}
vhost_net_enable_vq(net, vq);
out:
- if (nheads)
- vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
- nheads);
+ vhost_rx_signal_used(nvq);
mutex_unlock(&vq->mutex);
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f3bd8e941224..f0be5f35ab28 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -981,6 +981,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
{
int ret = 0;
+ mutex_lock(&dev->mutex);
vhost_dev_lock_vqs(dev);
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
@@ -1016,6 +1017,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
}
vhost_dev_unlock_vqs(dev);
+ mutex_unlock(&dev->mutex);
+
return ret;
}
ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index e1c60899fdbc..a6f9ba85dc4b 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -351,7 +351,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
* physical address */
phys = xen_bus_to_phys(dev_addr);
- if (((dev_addr + size - 1 > dma_mask)) ||
+ if (((dev_addr + size - 1 <= dma_mask)) ||
range_straddles_page_boundary(phys, size))
xen_destroy_contiguous_region(phys, order);