aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/procfs-diskstats5
-rw-r--r--Documentation/ABI/testing/sysfs-block6
-rw-r--r--Documentation/admin-guide/device-mapper/dm-integrity.rst5
-rw-r--r--Documentation/admin-guide/device-mapper/dm-raid.rst2
-rw-r--r--Documentation/admin-guide/iostats.rst9
-rw-r--r--Documentation/block/stat.rst14
-rw-r--r--Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt53
-rw-r--r--Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt22
-rw-r--r--Documentation/devicetree/bindings/security/tpm/google,cr50.txt19
-rw-r--r--Documentation/driver-api/libata.rst14
-rw-r--r--MAINTAINERS30
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h17
-rw-r--r--arch/arm64/include/asm/uaccess.h27
-rw-r--r--arch/arm64/lib/clear_user.S2
-rw-r--r--arch/arm64/lib/copy_from_user.S2
-rw-r--r--arch/arm64/lib/copy_in_user.S2
-rw-r--r--arch/arm64/lib/copy_to_user.S2
-rw-r--r--arch/arm64/lib/uaccess_flushcache.c6
-rw-r--r--block/Kconfig4
-rw-r--r--block/Kconfig.iosched1
-rw-r--r--block/Makefile1
-rw-r--r--block/bfq-cgroup.c85
-rw-r--r--block/bfq-iosched.c4
-rw-r--r--block/bfq-iosched.h10
-rw-r--r--block/blk-cgroup-rwstat.c129
-rw-r--r--block/blk-cgroup-rwstat.h149
-rw-r--r--block/blk-cgroup.c304
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c15
-rw-r--r--block/blk-merge.c17
-rw-r--r--block/blk-mq-sysfs.c31
-rw-r--r--block/blk-mq-tag.c8
-rw-r--r--block/blk-mq-tag.h1
-rw-r--r--block/blk-mq.c136
-rw-r--r--block/blk-mq.h9
-rw-r--r--block/blk-softirq.c4
-rw-r--r--block/blk-stat.c7
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/blk-throttle.c71
-rw-r--r--block/blk-zoned.c453
-rw-r--r--block/blk.h7
-rw-r--r--block/elevator.c9
-rw-r--r--block/genhd.c8
-rw-r--r--block/ioctl.c42
-rw-r--r--block/opal_proto.h6
-rw-r--r--block/partition-generic.c231
-rw-r--r--block/sed-opal.c318
-rw-r--r--block/t10-pi.c8
-rw-r--r--crypto/asymmetric_keys/asym_tpm.c101
-rw-r--r--drivers/ata/acard-ahci.c6
-rw-r--r--drivers/ata/ahci.c13
-rw-r--r--drivers/ata/ahci_tegra.c6
-rw-r--r--drivers/ata/ata_piix.c14
-rw-r--r--drivers/ata/libahci.c6
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/libata-sff.c12
-rw-r--r--drivers/ata/pata_artop.c4
-rw-r--r--drivers/ata/pata_macio.c6
-rw-r--r--drivers/ata/pata_pxa.c8
-rw-r--r--drivers/ata/pdc_adma.c7
-rw-r--r--drivers/ata/sata_fsl.c4
-rw-r--r--drivers/ata/sata_inic162x.c4
-rw-r--r--drivers/ata/sata_mv.c34
-rw-r--r--drivers/ata/sata_nv.c18
-rw-r--r--drivers/ata/sata_promise.c6
-rw-r--r--drivers/ata/sata_qstor.c8
-rw-r--r--drivers/ata/sata_rcar.c6
-rw-r--r--drivers/ata/sata_sil.c8
-rw-r--r--drivers/ata/sata_sil24.c6
-rw-r--r--drivers/ata/sata_sx4.c6
-rw-r--r--drivers/block/loop.c39
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/null_blk.h19
-rw-r--r--drivers/block/null_blk_main.c125
-rw-r--r--drivers/block/null_blk_zoned.c87
-rw-r--r--drivers/char/tpm/Kconfig7
-rw-r--r--drivers/char/tpm/Makefile4
-rw-r--r--drivers/char/tpm/tpm-interface.c64
-rw-r--r--drivers/char/tpm/tpm-sysfs.c45
-rw-r--r--drivers/char/tpm/tpm.h248
-rw-r--r--drivers/char/tpm/tpm1-cmd.c15
-rw-r--r--drivers/char/tpm/tpm2-cmd.c311
-rw-r--r--drivers/char/tpm/tpm_crb.c123
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.c79
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c143
-rw-r--r--drivers/char/tpm/tpm_tis_spi.h53
-rw-r--r--drivers/char/tpm/tpm_tis_spi_cr50.c322
-rw-r--r--drivers/char/virtio_console.c28
-rw-r--r--drivers/gpio/gpio-bd70528.c6
-rw-r--r--drivers/gpio/gpio-max77620.c6
-rw-r--r--drivers/gpio/gpiolib-acpi.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c61
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c50
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/md/Kconfig54
-rw-r--r--drivers/md/bcache/Makefile2
-rw-r--r--drivers/md/bcache/alloc.c5
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/bset.c17
-rw-r--r--drivers/md/bcache/btree.c19
-rw-r--r--drivers/md/bcache/closure.c7
-rw-r--r--drivers/md/bcache/request.c12
-rw-r--r--drivers/md/bcache/super.c56
-rw-r--r--drivers/md/bcache/sysfs.c7
-rw-r--r--drivers/md/bcache/writeback.c4
-rw-r--r--drivers/md/dm-bio-prison-v1.c27
-rw-r--r--drivers/md/dm-bio-prison-v2.c26
-rw-r--r--drivers/md/dm-cache-target.c77
-rw-r--r--drivers/md/dm-clone-metadata.c29
-rw-r--r--drivers/md/dm-clone-metadata.h4
-rw-r--r--drivers/md/dm-clone-target.c62
-rw-r--r--drivers/md/dm-crypt.c9
-rw-r--r--drivers/md/dm-dust.c97
-rw-r--r--drivers/md/dm-flakey.c25
-rw-r--r--drivers/md/dm-integrity.c28
-rw-r--r--drivers/md/dm-linear.c22
-rw-r--r--drivers/md/dm-raid.c164
-rw-r--r--drivers/md/dm-stripe.c15
-rw-r--r--drivers/md/dm-table.c27
-rw-r--r--drivers/md/dm-thin.c118
-rw-r--r--drivers/md/dm-writecache.c5
-rw-r--r--drivers/md/dm-zoned-metadata.c166
-rw-r--r--drivers/md/dm-zoned-reclaim.c8
-rw-r--r--drivers/md/dm-zoned-target.c54
-rw-r--r--drivers/md/dm-zoned.h2
-rw-r--r--drivers/md/dm.c135
-rw-r--r--drivers/md/md-bitmap.c2
-rw-r--r--drivers/md/md-linear.c5
-rw-r--r--drivers/md/md-multipath.c5
-rw-r--r--drivers/md/md.c57
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/raid0.c7
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c79
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c8
-rw-r--r--drivers/mtd/chips/cfi_util.c2
-rw-r--r--drivers/mtd/devices/mchp23k256.c20
-rw-r--r--drivers/mtd/devices/spear_smi.c42
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c1
-rw-r--r--drivers/mtd/maps/Kconfig11
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/l440gx.c2
-rw-r--r--drivers/mtd/maps/physmap-core.c5
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.c132
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.h17
-rw-r--r--drivers/mtd/mtdchar.c10
-rw-r--r--drivers/mtd/mtdcore.c26
-rw-r--r--drivers/mtd/mtdswap.c8
-rw-r--r--drivers/mtd/nand/raw/Kconfig7
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c23
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c3030
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c59
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c4
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c1
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c4
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c4
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c4
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c1
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c4
-rw-r--r--drivers/mtd/nand/raw/nand_base.c8
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c4
-rw-r--r--drivers/mtd/nand/raw/omap2.c8
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c4
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c5
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c4
-rw-r--r--drivers/mtd/spi-nor/aspeed-smc.c23
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c58
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c23
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c6
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c58
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c25
-rw-r--r--drivers/mtd/spi-nor/nxp-spifi.c23
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c1491
-rw-r--r--drivers/mtd/ubi/debug.c131
-rw-r--r--drivers/net/can/m_can/m_can_platform.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c15
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c19
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c11
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c7
-rw-r--r--drivers/net/ethernet/sfc/ptp.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c38
-rw-r--r--drivers/net/phy/mdio-sun4i.c3
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/phylink.c29
-rw-r--r--drivers/net/usb/r8152.c30
-rw-r--r--drivers/nfc/port100.c2
-rw-r--r--drivers/nvme/host/Kconfig10
-rw-r--r--drivers/nvme/host/Makefile1
-rw-r--r--drivers/nvme/host/core.c42
-rw-r--r--drivers/nvme/host/fc.c49
-rw-r--r--drivers/nvme/host/hwmon.c259
-rw-r--r--drivers/nvme/host/multipath.c13
-rw-r--r--drivers/nvme/host/nvme.h33
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/host/tcp.c4
-rw-r--r--drivers/nvme/target/admin-cmd.c133
-rw-r--r--drivers/nvme/target/core.c20
-rw-r--r--drivers/nvme/target/discovery.c70
-rw-r--r--drivers/nvme/target/fabrics-cmd.c15
-rw-r--r--drivers/nvme/target/fc.c31
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c43
-rw-r--r--drivers/nvme/target/io-cmd-file.c20
-rw-r--r--drivers/nvme/target/loop.c7
-rw-r--r--drivers/nvme/target/nvmet.h10
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/nvme/target/tcp.c14
-rw-r--r--drivers/s390/block/dasd_genhd.c4
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c10
-rw-r--r--drivers/s390/net/qeth_l2_main.c21
-rw-r--r--drivers/s390/net/qeth_l2_sys.c14
-rw-r--r--drivers/scsi/sd.c15
-rw-r--r--drivers/scsi/sd.h12
-rw-r--r--drivers/scsi/sd_zbc.c257
-rw-r--r--drivers/virtio/virtio_balloon.c20
-rw-r--r--drivers/virtio/virtio_ring.c4
-rw-r--r--fs/Kconfig3
-rw-r--r--fs/Makefile1
-rw-r--r--fs/afs/callback.c1
-rw-r--r--fs/afs/rxrpc.c1
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/block_dev.c69
-rw-r--r--fs/cramfs/inode.c4
-rw-r--r--fs/f2fs/segment.c3
-rw-r--r--fs/f2fs/super.c51
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/file.c2
-rw-r--r--fs/io-wq.c1065
-rw-r--r--fs/io-wq.h74
-rw-r--r--fs/io_uring.c2197
-rw-r--r--fs/ocfs2/xattr.c56
-rw-r--r--fs/open.c6
-rw-r--r--fs/pipe.c6
-rw-r--r--include/Kbuild2
-rw-r--r--include/keys/trusted_tpm.h (renamed from include/keys/trusted.h)49
-rw-r--r--include/linux/blk-cgroup.h199
-rw-r--r--include/linux/blk-mq.h300
-rw-r--r--include/linux/blk_types.h28
-rw-r--r--include/linux/blkdev.h31
-rw-r--r--include/linux/device-mapper.h27
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/genhd.h5
-rw-r--r--include/linux/libata.h13
-rw-r--r--include/linux/mtd/spi-nor.h64
-rw-r--r--include/linux/nvme-fc.h182
-rw-r--r--include/linux/nvme.h60
-rw-r--r--include/linux/platform_data/intel-spi.h1
-rw-r--r--include/linux/sbitmap.h9
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/sed-opal.h1
-rw-r--r--include/linux/skbuff.h6
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/tpm.h250
-rw-r--r--include/net/tls.h2
-rw-r--r--include/trace/events/io_uring.h358
-rw-r--r--include/trace/events/wbt.h12
-rw-r--r--include/uapi/linux/blkzoned.h17
-rw-r--r--include/uapi/linux/fcntl.h9
-rw-r--r--include/uapi/linux/io_uring.h24
-rw-r--r--include/uapi/linux/sed-opal.h20
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/bpf/offload.c4
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/power/qos.c8
-rw-r--r--kernel/sched/core.c16
-rw-r--r--lib/sbitmap.c17
-rw-r--r--mm/ksm.c14
-rw-r--r--mm/memory_hotplug.c16
-rw-r--r--net/atm/clip.c6
-rw-r--r--net/core/net-sysfs.c25
-rw-r--r--net/core/rtnetlink.c23
-rw-r--r--net/ipv4/sysctl_net_ipv4.c2
-rw-r--r--net/ipv4/udp.c27
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/sched/act_pedit.c12
-rw-r--r--net/sched/act_tunnel_key.c4
-rw-r--r--net/sched/sch_taprio.c28
-rw-r--r--net/socket.c66
-rw-r--r--net/tls/tls_main.c1
-rw-r--r--net/tls/tls_sw.c11
-rw-r--r--security/keys/Makefile2
-rw-r--r--security/keys/trusted-keys/Makefile8
-rw-r--r--security/keys/trusted-keys/trusted_tpm1.c (renamed from security/keys/trusted.c)98
-rw-r--r--security/keys/trusted-keys/trusted_tpm2.c314
-rw-r--r--tools/gpio/Build1
-rw-r--r--tools/gpio/Makefile10
-rw-r--r--tools/include/uapi/linux/fcntl.h9
331 files changed, 13315 insertions, 5508 deletions
diff --git a/Documentation/ABI/testing/procfs-diskstats b/Documentation/ABI/testing/procfs-diskstats
index 2c44b4f1b060..70dcaf2481f4 100644
--- a/Documentation/ABI/testing/procfs-diskstats
+++ b/Documentation/ABI/testing/procfs-diskstats
@@ -29,4 +29,9 @@ Description:
17 - sectors discarded
18 - time spent discarding
+ Kernel 5.5+ appends two more fields for flush requests:
+
+ 19 - flush requests completed successfully
+ 20 - time spent flushing
+
For more details refer to Documentation/admin-guide/iostats.rst
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index f8c7c7126bb1..ed8c14f161ee 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -15,6 +15,12 @@ Description:
9 - I/Os currently in progress
10 - time spent doing I/Os (ms)
11 - weighted time spent doing I/Os (ms)
+ 12 - discards completed
+ 13 - discards merged
+ 14 - sectors discarded
+ 15 - time spent discarding (ms)
+ 16 - flush requests completed
+ 17 - time spent flushing (ms)
For more details refer Documentation/admin-guide/iostats.rst
diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst
index a30aa91b5fbe..594095b54b29 100644
--- a/Documentation/admin-guide/device-mapper/dm-integrity.rst
+++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst
@@ -177,6 +177,11 @@ bitmap_flush_interval:number
The bitmap flush interval in milliseconds. The metadata buffers
are synchronized when this interval expires.
+fix_padding
+ Use a smaller padding of the tag area that is more
+ space-efficient. If this option is not present, large padding is
+ used - that is for compatibility with older kernels.
+
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
be changed when reloading the target (load an inactive table and swap the
diff --git a/Documentation/admin-guide/device-mapper/dm-raid.rst b/Documentation/admin-guide/device-mapper/dm-raid.rst
index 2fe255b130fb..f6344675e395 100644
--- a/Documentation/admin-guide/device-mapper/dm-raid.rst
+++ b/Documentation/admin-guide/device-mapper/dm-raid.rst
@@ -417,3 +417,5 @@ Version History
deadlock/potential data corruption. Update superblock when
specific devices are requested via rebuild. Fix RAID leg
rebuild errors.
+ 1.15.0 Fix size extensions not being synchronized in case of new MD bitmap
+ pages allocated; also fix those not occuring after previous reductions
diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst
index 5d63b18bd6d1..4f0462af3ca7 100644
--- a/Documentation/admin-guide/iostats.rst
+++ b/Documentation/admin-guide/iostats.rst
@@ -121,6 +121,15 @@ Field 15 -- # of milliseconds spent discarding
This is the total number of milliseconds spent by all discards (as
measured from __make_request() to end_that_request_last()).
+Field 16 -- # of flush requests completed
+ This is the total number of flush requests completed successfully.
+
+ Block layer combines flush requests and executes at most one at a time.
+ This counts flush requests executed by disk. Not tracked for partitions.
+
+Field 17 -- # of milliseconds spent flushing
+ This is the total number of milliseconds spent by all flush requests.
+
To avoid introducing performance bottlenecks, no locks are held while
modifying these counters. This implies that minor inaccuracies may be
introduced when changes collide, so (for instance) adding up all the
diff --git a/Documentation/block/stat.rst b/Documentation/block/stat.rst
index 9c07bc22b0bc..77311335c08b 100644
--- a/Documentation/block/stat.rst
+++ b/Documentation/block/stat.rst
@@ -41,6 +41,8 @@ discard I/Os requests number of discard I/Os processed
discard merges requests number of discard I/Os merged with in-queue I/O
discard sectors sectors number of sectors discarded
discard ticks milliseconds total wait time for discard requests
+flush I/Os requests number of flush I/Os processed
+flush ticks milliseconds total wait time for flush requests
=============== ============= =================================================
read I/Os, write I/Os, discard I/0s
@@ -48,6 +50,14 @@ read I/Os, write I/Os, discard I/0s
These values increment when an I/O request completes.
+flush I/Os
+==========
+
+These values increment when an flush I/O request completes.
+
+Block layer combines flush requests and executes at most one at a time.
+This counts flush requests executed by disk. Not tracked for partitions.
+
read merges, write merges, discard merges
=========================================
@@ -62,8 +72,8 @@ discarded from this block device. The "sectors" in question are the
standard UNIX 512-byte sectors, not any device- or filesystem-specific
block size. The counters are incremented when the I/O completes.
-read ticks, write ticks, discard ticks
-======================================
+read ticks, write ticks, discard ticks, flush ticks
+===================================================
These values count the number of milliseconds that I/O requests have
waited on this block device. If there are multiple I/O requests waiting,
diff --git a/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt b/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
new file mode 100644
index 000000000000..f3893c4d3c6a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
@@ -0,0 +1,53 @@
+* Cadence NAND controller
+
+Required properties:
+ - compatible : "cdns,hp-nfc"
+ - reg : Contains two entries, each of which is a tuple consisting of a
+ physical address and length. The first entry is the address and
+ length of the controller register set. The second entry is the
+ address and length of the Slave DMA data port.
+ - reg-names: should contain "reg" and "sdma"
+ - #address-cells: should be 1. The cell encodes the chip select connection.
+ - #size-cells : should be 0.
+ - interrupts : The interrupt number.
+ - clocks: phandle of the controller core clock (nf_clk).
+
+Optional properties:
+ - dmas: shall reference DMA channel associated to the NAND controller
+ - cdns,board-delay-ps : Estimated Board delay. The value includes the total
+ round trip delay for the signals and is used for deciding on values
+ associated with data read capture. The example formula for SDR mode is
+ the following:
+ board delay = RE#PAD delay + PCB trace to device + PCB trace from device
+ + DQ PAD delay
+
+Child nodes represent the available NAND chips.
+
+Required properties of NAND chips:
+ - reg: shall contain the native Chip Select ids from 0 to max supported by
+ the cadence nand flash controller
+
+See Documentation/devicetree/bindings/mtd/nand.txt for more details on
+generic bindings.
+
+Example:
+
+nand_controller: nand-controller@60000000 {
+ compatible = "cdns,hp-nfc";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x60000000 0x10000>, <0x80000000 0x10000>;
+ reg-names = "reg", "sdma";
+ clocks = <&nf_clk>;
+ cdns,board-delay-ps = <4830>;
+ interrupts = <2 0>;
+ nand@0 {
+ reg = <0>;
+ label = "nand-1";
+ };
+ nand@1 {
+ reg = <1>;
+ label = "nand-2";
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt b/Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt
new file mode 100644
index 000000000000..4bdcb92ae381
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt
@@ -0,0 +1,22 @@
+Flash device on Intel IXP4xx SoC
+
+This flash is regular CFI compatible (Intel or AMD extended) flash chips with
+specific big-endian or mixed-endian memory access pattern.
+
+Required properties:
+- compatible : must be "intel,ixp4xx-flash", "cfi-flash";
+- reg : memory address for the flash chip
+- bank-width : width in bytes of flash interface, should be <2>
+
+For the rest of the properties, see mtd-physmap.txt.
+
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
+
+Example:
+
+flash@50000000 {
+ compatible = "intel,ixp4xx-flash", "cfi-flash";
+ reg = <0x50000000 0x01000000>;
+ bank-width = <2>;
+};
diff --git a/Documentation/devicetree/bindings/security/tpm/google,cr50.txt b/Documentation/devicetree/bindings/security/tpm/google,cr50.txt
new file mode 100644
index 000000000000..cd69c2efdd37
--- /dev/null
+++ b/Documentation/devicetree/bindings/security/tpm/google,cr50.txt
@@ -0,0 +1,19 @@
+* H1 Secure Microcontroller with Cr50 Firmware on SPI Bus.
+
+H1 Secure Microcontroller running Cr50 firmware provides several
+functions, including TPM-like functionality. It communicates over
+SPI using the FIFO protocol described in the PTP Spec, section 6.
+
+Required properties:
+- compatible: Should be "google,cr50".
+- spi-max-frequency: Maximum SPI frequency.
+
+Example:
+
+&spi0 {
+ tpm@0 {
+ compatible = "google,cr50";
+ reg = <0>;
+ spi-max-frequency = <800000>;
+ };
+};
diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst
index 70e180e6b93d..207f0d24de69 100644
--- a/Documentation/driver-api/libata.rst
+++ b/Documentation/driver-api/libata.rst
@@ -250,23 +250,23 @@ High-level taskfile hooks
::
- void (*qc_prep) (struct ata_queued_cmd *qc);
+ enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc);
int (*qc_issue) (struct ata_queued_cmd *qc);
-Higher-level hooks, these two hooks can potentially supercede several of
+Higher-level hooks, these two hooks can potentially supersede several of
the above taskfile/DMA engine hooks. ``->qc_prep`` is called after the
buffers have been DMA-mapped, and is typically used to populate the
-hardware's DMA scatter-gather table. Most drivers use the standard
-:c:func:`ata_qc_prep` helper function, but more advanced drivers roll their
-own.
+hardware's DMA scatter-gather table. Some drivers use the standard
+:c:func:`ata_bmdma_qc_prep` and :c:func:`ata_bmdma_dumb_qc_prep` helper
+functions, but more advanced drivers roll their own.
``->qc_issue`` is used to make a command active, once the hardware and S/G
tables have been prepared. IDE BMDMA drivers use the helper function
-:c:func:`ata_qc_issue_prot` for taskfile protocol-based dispatch. More
+:c:func:`ata_sff_qc_issue` for taskfile protocol-based dispatch. More
advanced drivers implement their own ``->qc_issue``.
-:c:func:`ata_qc_issue_prot` calls ``->tf_load()``, ``->bmdma_setup()``, and
+:c:func:`ata_sff_qc_issue` calls ``->sff_tf_load()``, ``->bmdma_setup()``, and
``->bmdma_start()`` as necessary to initiate a transfer.
Exception and probe handling (EH)
diff --git a/MAINTAINERS b/MAINTAINERS
index e4f170d8bc29..598749df53e2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -643,7 +643,7 @@ F: drivers/net/ethernet/alacritech/*
FORCEDETH GIGABIT ETHERNET DRIVER
M: Rain River <rain.1986.08.12@gmail.com>
-M: Zhu Yanjun <yanjun.zhu@oracle.com>
+M: Zhu Yanjun <zyjzyj2000@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/nvidia/*
@@ -3595,6 +3595,13 @@ S: Maintained
F: Documentation/devicetree/bindings/media/cdns,*.txt
F: drivers/media/platform/cadence/cdns-csi2*
+CADENCE NAND DRIVER
+M: Piotr Sroka <piotrs@cadence.com>
+L: linux-mtd@lists.infradead.org
+S: Maintained
+F: drivers/mtd/nand/raw/cadence-nand-controller.c
+F: Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
+
CADET FM/AM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -8564,12 +8571,13 @@ F: include/linux/iova.h
IO_URING
M: Jens Axboe <axboe@kernel.dk>
-L: linux-block@vger.kernel.org
-L: linux-fsdevel@vger.kernel.org
+L: io-uring@vger.kernel.org
T: git git://git.kernel.dk/linux-block
T: git git://git.kernel.dk/liburing
S: Maintained
F: fs/io_uring.c
+F: fs/io-wq.c
+F: fs/io-wq.h
F: include/uapi/linux/io_uring.h
IPMI SUBSYSTEM
@@ -10536,15 +10544,13 @@ F: include/linux/vmalloc.h
F: mm/
MEMORY TECHNOLOGY DEVICES (MTD)
-M: David Woodhouse <dwmw2@infradead.org>
-M: Brian Norris <computersforpeace@gmail.com>
-M: Marek Vasut <marek.vasut@gmail.com>
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Richard Weinberger <richard@nod.at>
M: Vignesh Raghavendra <vigneshr@ti.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
+C: irc://irc.oftc.net/mtd
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git mtd/fixes
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git mtd/next
S: Maintained
@@ -11637,6 +11643,7 @@ F: drivers/nvme/target/fcloop.c
NVM EXPRESS TARGET DRIVER
M: Christoph Hellwig <hch@lst.de>
M: Sagi Grimberg <sagi@grimberg.me>
+M: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
L: linux-nvme@lists.infradead.org
T: git://git.infradead.org/nvme.git
W: http://git.infradead.org/nvme.git
@@ -15294,7 +15301,6 @@ F: arch/arm/boot/dts/spear*
F: arch/arm/mach-spear/
SPI NOR SUBSYSTEM
-M: Marek Vasut <marek.vasut@gmail.com>
M: Tudor Ambarus <tudor.ambarus@microchip.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
@@ -16591,10 +16597,9 @@ F: drivers/media/pci/tw686x/
UBI FILE SYSTEM (UBIFS)
M: Richard Weinberger <richard@nod.at>
-M: Artem Bityutskiy <dedekind1@gmail.com>
-M: Adrian Hunter <adrian.hunter@intel.com>
L: linux-mtd@lists.infradead.org
-T: git git://git.infradead.org/ubifs-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git fixes
W: http://www.linux-mtd.infradead.org/doc/ubifs.html
S: Supported
F: Documentation/filesystems/ubifs.txt
@@ -16709,11 +16714,11 @@ S: Maintained
F: drivers/scsi/ufs/ufs-mediatek*
UNSORTED BLOCK IMAGES (UBI)
-M: Artem Bityutskiy <dedekind1@gmail.com>
M: Richard Weinberger <richard@nod.at>
W: http://www.linux-mtd.infradead.org/
L: linux-mtd@lists.infradead.org
-T: git git://git.infradead.org/ubifs-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git fixes
S: Supported
F: drivers/mtd/ubi/
F: include/linux/mtd/ubi.h
@@ -17215,6 +17220,7 @@ F: virt/lib/
VIRTIO AND VHOST VSOCK DRIVER
M: Stefan Hajnoczi <stefanha@redhat.com>
+M: Stefano Garzarella <sgarzare@redhat.com>
L: kvm@vger.kernel.org
L: virtualization@lists.linux-foundation.org
L: netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 9cd289196267..d4d36c61940b 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 5bf963830b17..c764cc8fb3b6 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -59,23 +59,6 @@ alternative_else_nop_endif
#endif
/*
- * These macros are no-ops when UAO is present.
- */
- .macro uaccess_disable_not_uao, tmp1, tmp2
- uaccess_ttbr0_disable \tmp1, \tmp2
-alternative_if ARM64_ALT_PAN_NOT_UAO
- SET_PSTATE_PAN(1)
-alternative_else_nop_endif
- .endm
-
- .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
- uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
-alternative_if ARM64_ALT_PAN_NOT_UAO
- SET_PSTATE_PAN(0)
-alternative_else_nop_endif
- .endm
-
-/*
* Remove the address tag from a virtual address, if present.
*/
.macro untagged_addr, dst, addr
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 097d6bfac0b7..127712b0b970 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -378,20 +378,34 @@ do { \
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
#define raw_copy_from_user(to, from, n) \
({ \
- __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
+ unsigned long __acfu_ret; \
+ uaccess_enable_not_uao(); \
+ __acfu_ret = __arch_copy_from_user((to), \
+ __uaccess_mask_ptr(from), (n)); \
+ uaccess_disable_not_uao(); \
+ __acfu_ret; \
})
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
#define raw_copy_to_user(to, from, n) \
({ \
- __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
+ unsigned long __actu_ret; \
+ uaccess_enable_not_uao(); \
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
+ (from), (n)); \
+ uaccess_disable_not_uao(); \
+ __actu_ret; \
})
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
#define raw_copy_in_user(to, from, n) \
({ \
- __arch_copy_in_user(__uaccess_mask_ptr(to), \
- __uaccess_mask_ptr(from), (n)); \
+ unsigned long __aciu_ret; \
+ uaccess_enable_not_uao(); \
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
+ __uaccess_mask_ptr(from), (n)); \
+ uaccess_disable_not_uao(); \
+ __aciu_ret; \
})
#define INLINE_COPY_TO_USER
@@ -400,8 +414,11 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
{
- if (access_ok(to, n))
+ if (access_ok(to, n)) {
+ uaccess_enable_not_uao();
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
+ uaccess_disable_not_uao();
+ }
return n;
}
#define clear_user __clear_user
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 10415572e82f..aeafc03e961a 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -20,7 +20,6 @@
* Alignment fixed up by hardware.
*/
ENTRY(__arch_clear_user)
- uaccess_enable_not_uao x2, x3, x4
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
@@ -40,7 +39,6 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
- uaccess_disable_not_uao x2, x3
ret
ENDPROC(__arch_clear_user)
EXPORT_SYMBOL(__arch_clear_user)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 680e74409ff9..ebb3c06cbb5d 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -54,10 +54,8 @@
end .req x5
ENTRY(__arch_copy_from_user)
- uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3, x4
mov x0, #0 // Nothing to copy
ret
ENDPROC(__arch_copy_from_user)
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 0bedae3f3792..3d8153a1ebce 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -56,10 +56,8 @@
end .req x5
ENTRY(__arch_copy_in_user)
- uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3, x4
mov x0, #0
ret
ENDPROC(__arch_copy_in_user)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 2d88c736e8f2..357eae2c18eb 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -53,10 +53,8 @@
end .req x5
ENTRY(__arch_copy_to_user)
- uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3, x4
mov x0, #0
ret
ENDPROC(__arch_copy_to_user)
diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c
index cbfcbe6470a5..bfa30b75b2b8 100644
--- a/arch/arm64/lib/uaccess_flushcache.c
+++ b/arch/arm64/lib/uaccess_flushcache.c
@@ -28,7 +28,11 @@ void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
unsigned long __copy_user_flushcache(void *to, const void __user *from,
unsigned long n)
{
- unsigned long rc = __arch_copy_from_user(to, from, n);
+ unsigned long rc;
+
+ uaccess_enable_not_uao();
+ rc = __arch_copy_from_user(to, from, n);
+ uaccess_disable_not_uao();
/* See above */
__clean_dcache_area_pop(to, n - rc);
diff --git a/block/Kconfig b/block/Kconfig
index 41c0917ce622..c23094a14a2b 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -32,6 +32,9 @@ config BLK_RQ_ALLOC_TIME
config BLK_SCSI_REQUEST
bool
+config BLK_CGROUP_RWSTAT
+ bool
+
config BLK_DEV_BSG
bool "Block layer SG support v4"
default y
@@ -86,6 +89,7 @@ config BLK_DEV_ZONED
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y
+ select BLK_CGROUP_RWSTAT
---help---
Block layer bio throttling support. It can be used to limit
the IO rate to a device. IO rate policies are per cgroup and
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index b89310a022ad..7df14133adc8 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -31,6 +31,7 @@ config IOSCHED_BFQ
config BFQ_GROUP_IOSCHED
bool "BFQ hierarchical scheduling support"
depends on IOSCHED_BFQ && BLK_CGROUP
+ select BLK_CGROUP_RWSTAT
---help---
Enable hierarchical scheduling in BFQ, using the blkio
diff --git a/block/Makefile b/block/Makefile
index 9ef57ace90d4..205a5f2fef17 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
+obj-$(CONFIG_BLK_CGROUP_RWSTAT) += blk-cgroup-rwstat.o
obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o
obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 86a607cf19a1..cea0ae12f937 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -347,6 +347,14 @@ void bfqg_and_blkg_put(struct bfq_group *bfqg)
bfqg_put(bfqg);
}
+void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
+{
+ struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
+
+ blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
+ blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
+}
+
/* @stats = 0 */
static void bfqg_stats_reset(struct bfqg_stats *stats)
{
@@ -431,6 +439,8 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
static void bfqg_stats_exit(struct bfqg_stats *stats)
{
+ blkg_rwstat_exit(&stats->bytes);
+ blkg_rwstat_exit(&stats->ios);
#ifdef CONFIG_BFQ_CGROUP_DEBUG
blkg_rwstat_exit(&stats->merged);
blkg_rwstat_exit(&stats->service_time);
@@ -448,6 +458,10 @@ static void bfqg_stats_exit(struct bfqg_stats *stats)
static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
{
+ if (blkg_rwstat_init(&stats->bytes, gfp) ||
+ blkg_rwstat_init(&stats->ios, gfp))
+ return -ENOMEM;
+
#ifdef CONFIG_BFQ_CGROUP_DEBUG
if (blkg_rwstat_init(&stats->merged, gfp) ||
blkg_rwstat_init(&stats->service_time, gfp) ||
@@ -1057,18 +1071,35 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
return bfq_io_set_device_weight(of, buf, nbytes, off);
}
-#ifdef CONFIG_BFQ_CGROUP_DEBUG
-static int bfqg_print_stat(struct seq_file *sf, void *v)
+static int bfqg_print_rwstat(struct seq_file *sf, void *v)
{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
- &blkcg_policy_bfq, seq_cft(sf)->private, false);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
+ &blkcg_policy_bfq, seq_cft(sf)->private, true);
return 0;
}
-static int bfqg_print_rwstat(struct seq_file *sf, void *v)
+static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
- &blkcg_policy_bfq, seq_cft(sf)->private, true);
+ struct blkg_rwstat_sample sum;
+
+ blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
+ return __blkg_prfill_rwstat(sf, pd, &sum);
+}
+
+static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
+ seq_cft(sf)->private, true);
+ return 0;
+}
+
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
+static int bfqg_print_stat(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
+ &blkcg_policy_bfq, seq_cft(sf)->private, false);
return 0;
}
@@ -1097,15 +1128,6 @@ static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
return __blkg_prfill_u64(sf, pd, sum);
}
-static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct blkg_rwstat_sample sum;
-
- blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
- return __blkg_prfill_rwstat(sf, pd, &sum);
-}
-
static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
{
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
@@ -1114,18 +1136,11 @@ static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
return 0;
}
-static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
- seq_cft(sf)->private, true);
- return 0;
-}
-
static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
int off)
{
- u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
+ struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
+ u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
return __blkg_prfill_u64(sf, pd, sum >> 9);
}
@@ -1142,8 +1157,8 @@ static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
{
struct blkg_rwstat_sample tmp;
- blkg_rwstat_recursive_sum(pd->blkg, NULL,
- offsetof(struct blkcg_gq, stat_bytes), &tmp);
+ blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
+ offsetof(struct bfq_group, stats.bytes), &tmp);
return __blkg_prfill_u64(sf, pd,
(tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
@@ -1226,13 +1241,13 @@ struct cftype bfq_blkcg_legacy_files[] = {
/* statistics, covers only the tasks in the bfqg */
{
.name = "bfq.io_service_bytes",
- .private = (unsigned long)&blkcg_policy_bfq,
- .seq_show = blkg_print_stat_bytes,
+ .private = offsetof(struct bfq_group, stats.bytes),
+ .seq_show = bfqg_print_rwstat,
},
{
.name = "bfq.io_serviced",
- .private = (unsigned long)&blkcg_policy_bfq,
- .seq_show = blkg_print_stat_ios,
+ .private = offsetof(struct bfq_group, stats.ios),
+ .seq_show = bfqg_print_rwstat,
},
#ifdef CONFIG_BFQ_CGROUP_DEBUG
{
@@ -1269,13 +1284,13 @@ struct cftype bfq_blkcg_legacy_files[] = {
/* the same statistics which cover the bfqg and its descendants */
{
.name = "bfq.io_service_bytes_recursive",
- .private = (unsigned long)&blkcg_policy_bfq,
- .seq_show = blkg_print_stat_bytes_recursive,
+ .private = offsetof(struct bfq_group, stats.bytes),
+ .seq_show = bfqg_print_rwstat_recursive,
},
{
.name = "bfq.io_serviced_recursive",
- .private = (unsigned long)&blkcg_policy_bfq,
- .seq_show = blkg_print_stat_ios_recursive,
+ .private = offsetof(struct bfq_group, stats.ios),
+ .seq_show = bfqg_print_rwstat_recursive,
},
#ifdef CONFIG_BFQ_CGROUP_DEBUG
{
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 0c6214497fcc..ad4af4aaf2ce 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5484,6 +5484,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool idle_timer_disabled = false;
unsigned int cmd_flags;
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
+ if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
+ bfqg_stats_update_legacy_io(q, rq);
+#endif
spin_lock_irq(&bfqd->lock);
if (blk_mq_sched_try_insert_merge(q, rq)) {
spin_unlock_irq(&bfqd->lock);
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 5d1a519640f6..8526f20c53bc 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -10,6 +10,8 @@
#include <linux/hrtimer.h>
#include <linux/blk-cgroup.h>
+#include "blk-cgroup-rwstat.h"
+
#define BFQ_IOPRIO_CLASSES 3
#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
@@ -809,6 +811,9 @@ struct bfq_stat {
};
struct bfqg_stats {
+ /* basic stats */
+ struct blkg_rwstat bytes;
+ struct blkg_rwstat ios;
#ifdef CONFIG_BFQ_CGROUP_DEBUG
/* number of ios merged */
struct blkg_rwstat merged;
@@ -956,6 +961,7 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
/* ---------------- cgroups-support interface ---------------- */
+void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq);
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op);
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
@@ -1062,6 +1068,8 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
char pid_str[MAX_PID_STR_LENGTH]; \
+ if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \
+ break; \
bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
blk_add_cgroup_trace_msg((bfqd)->queue, \
bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \
@@ -1078,6 +1086,8 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
char pid_str[MAX_PID_STR_LENGTH]; \
+ if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \
+ break; \
bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str, \
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
diff --git a/block/blk-cgroup-rwstat.c b/block/blk-cgroup-rwstat.c
new file mode 100644
index 000000000000..85d5790ac49b
--- /dev/null
+++ b/block/blk-cgroup-rwstat.c
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT.
+ * Do not use in new code.
+ */
+#include "blk-cgroup-rwstat.h"
+
+int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
+{
+ int i, ret;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+ ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
+ if (ret) {
+ while (--i >= 0)
+ percpu_counter_destroy(&rwstat->cpu_cnt[i]);
+ return ret;
+ }
+ atomic64_set(&rwstat->aux_cnt[i], 0);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blkg_rwstat_init);
+
+void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
+{
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ percpu_counter_destroy(&rwstat->cpu_cnt[i]);
+}
+EXPORT_SYMBOL_GPL(blkg_rwstat_exit);
+
+/**
+ * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @rwstat: rwstat to print
+ *
+ * Print @rwstat to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat_sample *rwstat)
+{
+ static const char *rwstr[] = {
+ [BLKG_RWSTAT_READ] = "Read",
+ [BLKG_RWSTAT_WRITE] = "Write",
+ [BLKG_RWSTAT_SYNC] = "Sync",
+ [BLKG_RWSTAT_ASYNC] = "Async",
+ [BLKG_RWSTAT_DISCARD] = "Discard",
+ };
+ const char *dname = blkg_dev_name(pd->blkg);
+ u64 v;
+ int i;
+
+ if (!dname)
+ return 0;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
+ rwstat->cnt[i]);
+
+ v = rwstat->cnt[BLKG_RWSTAT_READ] +
+ rwstat->cnt[BLKG_RWSTAT_WRITE] +
+ rwstat->cnt[BLKG_RWSTAT_DISCARD];
+ seq_printf(sf, "%s Total %llu\n", dname, v);
+ return v;
+}
+EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
+
+/**
+ * blkg_prfill_rwstat - prfill callback for blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_rwstat in @pd
+ *
+ * prfill callback for printing a blkg_rwstat.
+ */
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
+{
+ struct blkg_rwstat_sample rwstat = { };
+
+ blkg_rwstat_read((void *)pd + off, &rwstat);
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
+}
+EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
+
+/**
+ * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
+ * @blkg: blkg of interest
+ * @pol: blkcg_policy which contains the blkg_rwstat
+ * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
+ * @sum: blkg_rwstat_sample structure containing the results
+ *
+ * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
+ * online descendants and their aux counts. The caller must be holding the
+ * queue lock for online tests.
+ *
+ * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
+ * is at @off bytes into @blkg's blkg_policy_data of the policy.
+ */
+void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
+ int off, struct blkg_rwstat_sample *sum)
+{
+ struct blkcg_gq *pos_blkg;
+ struct cgroup_subsys_state *pos_css;
+ unsigned int i;
+
+ lockdep_assert_held(&blkg->q->queue_lock);
+
+ rcu_read_lock();
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
+ struct blkg_rwstat *rwstat;
+
+ if (!pos_blkg->online)
+ continue;
+
+ if (pol)
+ rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
+ else
+ rwstat = (void *)pos_blkg + off;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
diff --git a/block/blk-cgroup-rwstat.h b/block/blk-cgroup-rwstat.h
new file mode 100644
index 000000000000..ee746919c41f
--- /dev/null
+++ b/block/blk-cgroup-rwstat.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT.
+ * Do not use in new code.
+ */
+#ifndef _BLK_CGROUP_RWSTAT_H
+#define _BLK_CGROUP_RWSTAT_H
+
+#include <linux/blk-cgroup.h>
+
+enum blkg_rwstat_type {
+ BLKG_RWSTAT_READ,
+ BLKG_RWSTAT_WRITE,
+ BLKG_RWSTAT_SYNC,
+ BLKG_RWSTAT_ASYNC,
+ BLKG_RWSTAT_DISCARD,
+
+ BLKG_RWSTAT_NR,
+ BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
+};
+
+/*
+ * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
+ * recursive. Used to carry stats of dead children.
+ */
+struct blkg_rwstat {
+ struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
+ atomic64_t aux_cnt[BLKG_RWSTAT_NR];
+};
+
+struct blkg_rwstat_sample {
+ u64 cnt[BLKG_RWSTAT_NR];
+};
+
+static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
+ unsigned int idx)
+{
+ return atomic64_read(&rwstat->aux_cnt[idx]) +
+ percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
+}
+
+int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp);
+void blkg_rwstat_exit(struct blkg_rwstat *rwstat);
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat_sample *rwstat);
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off);
+void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
+ int off, struct blkg_rwstat_sample *sum);
+
+
+/**
+ * blkg_rwstat_add - add a value to a blkg_rwstat
+ * @rwstat: target blkg_rwstat
+ * @op: REQ_OP and flags
+ * @val: value to add
+ *
+ * Add @val to @rwstat. The counters are chosen according to @rw. The
+ * caller is responsible for synchronizing calls to this function.
+ */
+static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
+ unsigned int op, uint64_t val)
+{
+ struct percpu_counter *cnt;
+
+ if (op_is_discard(op))
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
+ else if (op_is_write(op))
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
+ else
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
+
+ percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
+
+ if (op_is_sync(op))
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
+ else
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
+
+ percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
+}
+
+/**
+ * blkg_rwstat_read - read the current values of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Read the current snapshot of @rwstat and return it in the aux counts.
+ */
+static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
+ struct blkg_rwstat_sample *result)
+{
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ result->cnt[i] =
+ percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
+}
+
+/**
+ * blkg_rwstat_total - read the total count of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Return the total count of @rwstat regardless of the IO direction. This
+ * function can be called without synchronization and takes care of u64
+ * atomicity.
+ */
+static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
+{
+ struct blkg_rwstat_sample tmp = { };
+
+ blkg_rwstat_read(rwstat, &tmp);
+ return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+}
+
+/**
+ * blkg_rwstat_reset - reset a blkg_rwstat
+ * @rwstat: blkg_rwstat to reset
+ */
+static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+{
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+ percpu_counter_set(&rwstat->cpu_cnt[i], 0);
+ atomic64_set(&rwstat->aux_cnt[i], 0);
+ }
+}
+
+/**
+ * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
+ * @to: the destination blkg_rwstat
+ * @from: the source
+ *
+ * Add @from's count including the aux one to @to's aux count.
+ */
+static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
+ struct blkg_rwstat *from)
+{
+ u64 sum[BLKG_RWSTAT_NR];
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
+ &to->aux_cnt[i]);
+}
+#endif /* _BLK_CGROUP_RWSTAT_H */
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 1eb8895be4c6..708dea92dac8 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -80,8 +80,7 @@ static void blkg_free(struct blkcg_gq *blkg)
if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
- blkg_rwstat_exit(&blkg->stat_ios);
- blkg_rwstat_exit(&blkg->stat_bytes);
+ free_percpu(blkg->iostat_cpu);
percpu_ref_exit(&blkg->refcnt);
kfree(blkg);
}
@@ -146,7 +145,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
gfp_t gfp_mask)
{
struct blkcg_gq *blkg;
- int i;
+ int i, cpu;
/* alloc and init base part */
blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
@@ -156,8 +155,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
goto err_free;
- if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
- blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
+ blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask);
+ if (!blkg->iostat_cpu)
goto err_free;
blkg->q = q;
@@ -167,6 +166,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
blkg->blkcg = blkcg;
+ u64_stats_init(&blkg->iostat.sync);
+ for_each_possible_cpu(cpu)
+ u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync);
+
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
struct blkg_policy_data *pd;
@@ -393,7 +396,6 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
- struct blkcg_gq *parent = blkg->parent;
int i;
lockdep_assert_held(&blkg->q->queue_lock);
@@ -410,11 +412,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
pol->pd_offline_fn(blkg->pd[i]);
}
- if (parent) {
- blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
- blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
- }
-
blkg->online = false;
radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
@@ -464,7 +461,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
{
struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
- int i;
+ int i, cpu;
mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
@@ -475,8 +472,12 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
* anyway. If you get hit by a race, retry.
*/
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
- blkg_rwstat_reset(&blkg->stat_bytes);
- blkg_rwstat_reset(&blkg->stat_ios);
+ for_each_possible_cpu(cpu) {
+ struct blkg_iostat_set *bis =
+ per_cpu_ptr(blkg->iostat_cpu, cpu);
+ memset(bis, 0, sizeof(*bis));
+ }
+ memset(&blkg->iostat, 0, sizeof(blkg->iostat));
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -560,186 +561,6 @@ u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
}
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
-/**
- * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
- * @sf: seq_file to print to
- * @pd: policy private data of interest
- * @rwstat: rwstat to print
- *
- * Print @rwstat to @sf for the device assocaited with @pd.
- */
-u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- const struct blkg_rwstat_sample *rwstat)
-{
- static const char *rwstr[] = {
- [BLKG_RWSTAT_READ] = "Read",
- [BLKG_RWSTAT_WRITE] = "Write",
- [BLKG_RWSTAT_SYNC] = "Sync",
- [BLKG_RWSTAT_ASYNC] = "Async",
- [BLKG_RWSTAT_DISCARD] = "Discard",
- };
- const char *dname = blkg_dev_name(pd->blkg);
- u64 v;
- int i;
-
- if (!dname)
- return 0;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
- rwstat->cnt[i]);
-
- v = rwstat->cnt[BLKG_RWSTAT_READ] +
- rwstat->cnt[BLKG_RWSTAT_WRITE] +
- rwstat->cnt[BLKG_RWSTAT_DISCARD];
- seq_printf(sf, "%s Total %llu\n", dname, v);
- return v;
-}
-EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
-
-/**
- * blkg_prfill_rwstat - prfill callback for blkg_rwstat
- * @sf: seq_file to print to
- * @pd: policy private data of interest
- * @off: offset to the blkg_rwstat in @pd
- *
- * prfill callback for printing a blkg_rwstat.
- */
-u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- int off)
-{
- struct blkg_rwstat_sample rwstat = { };
-
- blkg_rwstat_read((void *)pd + off, &rwstat);
- return __blkg_prfill_rwstat(sf, pd, &rwstat);
-}
-EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
-
-static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct blkg_rwstat_sample rwstat = { };
-
- blkg_rwstat_read((void *)pd->blkg + off, &rwstat);
- return __blkg_prfill_rwstat(sf, pd, &rwstat);
-}
-
-/**
- * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
- * @sf: seq_file to print to
- * @v: unused
- *
- * To be used as cftype->seq_show to print blkg->stat_bytes.
- * cftype->private must be set to the blkcg_policy.
- */
-int blkg_print_stat_bytes(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
- offsetof(struct blkcg_gq, stat_bytes), true);
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
-
-/**
- * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
- * @sf: seq_file to print to
- * @v: unused
- *
- * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
- * must be set to the blkcg_policy.
- */
-int blkg_print_stat_ios(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
- offsetof(struct blkcg_gq, stat_ios), true);
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
-
-static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
- struct blkg_policy_data *pd,
- int off)
-{
- struct blkg_rwstat_sample rwstat;
-
- blkg_rwstat_recursive_sum(pd->blkg, NULL, off, &rwstat);
- return __blkg_prfill_rwstat(sf, pd, &rwstat);
-}
-
-/**
- * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
- * @sf: seq_file to print to
- * @v: unused
- */
-int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- blkg_prfill_rwstat_field_recursive,
- (void *)seq_cft(sf)->private,
- offsetof(struct blkcg_gq, stat_bytes), true);
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
-
-/**
- * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
- * @sf: seq_file to print to
- * @v: unused
- */
-int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- blkg_prfill_rwstat_field_recursive,
- (void *)seq_cft(sf)->private,
- offsetof(struct blkcg_gq, stat_ios), true);
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
-
-/**
- * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
- * @blkg: blkg of interest
- * @pol: blkcg_policy which contains the blkg_rwstat
- * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
- * @sum: blkg_rwstat_sample structure containing the results
- *
- * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
- * online descendants and their aux counts. The caller must be holding the
- * queue lock for online tests.
- *
- * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
- * is at @off bytes into @blkg's blkg_policy_data of the policy.
- */
-void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
- int off, struct blkg_rwstat_sample *sum)
-{
- struct blkcg_gq *pos_blkg;
- struct cgroup_subsys_state *pos_css;
- unsigned int i;
-
- lockdep_assert_held(&blkg->q->queue_lock);
-
- rcu_read_lock();
- blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
- struct blkg_rwstat *rwstat;
-
- if (!pos_blkg->online)
- continue;
-
- if (pol)
- rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
- else
- rwstat = (void *)pos_blkg + off;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
- }
- rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
-
/* Performs queue bypass and policy enabled checks then looks up blkg. */
static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
const struct blkcg_policy *pol,
@@ -923,16 +744,18 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
struct blkcg_gq *blkg;
+ cgroup_rstat_flush(blkcg->css.cgroup);
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+ struct blkg_iostat_set *bis = &blkg->iostat;
const char *dname;
char *buf;
- struct blkg_rwstat_sample rwstat;
u64 rbytes, wbytes, rios, wios, dbytes, dios;
size_t size = seq_get_buf(sf, &buf), off = 0;
int i;
bool has_stats = false;
+ unsigned seq;
spin_lock_irq(&blkg->q->queue_lock);
@@ -951,17 +774,16 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
*/
off += scnprintf(buf+off, size-off, "%s ", dname);
- blkg_rwstat_recursive_sum(blkg, NULL,
- offsetof(struct blkcg_gq, stat_bytes), &rwstat);
- rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
- wbytes = rwstat.cnt[BLKG_RWSTAT_WRITE];
- dbytes = rwstat.cnt[BLKG_RWSTAT_DISCARD];
+ do {
+ seq = u64_stats_fetch_begin(&bis->sync);
- blkg_rwstat_recursive_sum(blkg, NULL,
- offsetof(struct blkcg_gq, stat_ios), &rwstat);
- rios = rwstat.cnt[BLKG_RWSTAT_READ];
- wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
- dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
+ rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
+ wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
+ dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
+ rios = bis->cur.ios[BLKG_IOSTAT_READ];
+ wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
+ dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
+ } while (u64_stats_fetch_retry(&bis->sync, seq));
if (rbytes || wbytes || rios || wios) {
has_stats = true;
@@ -1297,6 +1119,77 @@ static int blkcg_can_attach(struct cgroup_taskset *tset)
return ret;
}
+static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
+{
+ int i;
+
+ for (i = 0; i < BLKG_IOSTAT_NR; i++) {
+ dst->bytes[i] = src->bytes[i];
+ dst->ios[i] = src->ios[i];
+ }
+}
+
+static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
+{
+ int i;
+
+ for (i = 0; i < BLKG_IOSTAT_NR; i++) {
+ dst->bytes[i] += src->bytes[i];
+ dst->ios[i] += src->ios[i];
+ }
+}
+
+static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src)
+{
+ int i;
+
+ for (i = 0; i < BLKG_IOSTAT_NR; i++) {
+ dst->bytes[i] -= src->bytes[i];
+ dst->ios[i] -= src->ios[i];
+ }
+}
+
+static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
+{
+ struct blkcg *blkcg = css_to_blkcg(css);
+ struct blkcg_gq *blkg;
+
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+ struct blkcg_gq *parent = blkg->parent;
+ struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
+ struct blkg_iostat cur, delta;
+ unsigned seq;
+
+ /* fetch the current per-cpu values */
+ do {
+ seq = u64_stats_fetch_begin(&bisc->sync);
+ blkg_iostat_set(&cur, &bisc->cur);
+ } while (u64_stats_fetch_retry(&bisc->sync, seq));
+
+ /* propagate percpu delta to global */
+ u64_stats_update_begin(&blkg->iostat.sync);
+ blkg_iostat_set(&delta, &cur);
+ blkg_iostat_sub(&delta, &bisc->last);
+ blkg_iostat_add(&blkg->iostat.cur, &delta);
+ blkg_iostat_add(&bisc->last, &delta);
+ u64_stats_update_end(&blkg->iostat.sync);
+
+ /* propagate global delta to parent */
+ if (parent) {
+ u64_stats_update_begin(&parent->iostat.sync);
+ blkg_iostat_set(&delta, &blkg->iostat.cur);
+ blkg_iostat_sub(&delta, &blkg->iostat.last);
+ blkg_iostat_add(&parent->iostat.cur, &delta);
+ blkg_iostat_add(&blkg->iostat.last, &delta);
+ u64_stats_update_end(&parent->iostat.sync);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
static void blkcg_bind(struct cgroup_subsys_state *root_css)
{
int i;
@@ -1329,6 +1222,7 @@ struct cgroup_subsys io_cgrp_subsys = {
.css_offline = blkcg_css_offline,
.css_free = blkcg_css_free,
.can_attach = blkcg_can_attach,
+ .css_rstat_flush = blkcg_rstat_flush,
.bind = blkcg_bind,
.dfl_cftypes = blkcg_files,
.legacy_cftypes = blkcg_legacy_files,
diff --git a/block/blk-core.c b/block/blk-core.c
index d5e668ec751b..a1e228752083 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -132,6 +132,9 @@ static const char *const blk_op_name[] = {
REQ_OP_NAME(SECURE_ERASE),
REQ_OP_NAME(ZONE_RESET),
REQ_OP_NAME(ZONE_RESET_ALL),
+ REQ_OP_NAME(ZONE_OPEN),
+ REQ_OP_NAME(ZONE_CLOSE),
+ REQ_OP_NAME(ZONE_FINISH),
REQ_OP_NAME(WRITE_SAME),
REQ_OP_NAME(WRITE_ZEROES),
REQ_OP_NAME(SCSI_IN),
@@ -336,14 +339,14 @@ EXPORT_SYMBOL_GPL(blk_set_queue_dying);
*/
void blk_cleanup_queue(struct request_queue *q)
{
+ WARN_ON_ONCE(blk_queue_registered(q));
+
/* mark @q DYING, no new request or merges will be allowed afterwards */
- mutex_lock(&q->sysfs_lock);
blk_set_queue_dying(q);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
- mutex_unlock(&q->sysfs_lock);
/*
* Drain all requests queued before DYING marking. Set DEAD flag to
@@ -848,11 +851,7 @@ static inline int blk_partition_remap(struct bio *bio)
if (unlikely(bio_check_ro(bio, p)))
goto out;
- /*
- * Zone reset does not include bi_size so bio_sectors() is always 0.
- * Include a test for the reset op code and perform the remap if needed.
- */
- if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) {
+ if (bio_sectors(bio)) {
if (bio_check_eod(bio, part_nr_sects_read(p)))
goto out;
bio->bi_iter.bi_sector += p->start_sect;
@@ -936,6 +935,9 @@ generic_make_request_checks(struct bio *bio)
goto not_supported;
break;
case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
if (!blk_queue_is_zoned(q))
goto not_supported;
break;
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 1db44ca0f4a6..e20a852ae432 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -55,6 +55,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->rq_disk = bd_disk;
rq->end_io = done;
+ blk_account_io_start(rq, true);
+
/*
* don't check dying flag for MQ because the request won't
* be reused after dying flag is set
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 1eec9cbe5a0a..1777346baf06 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -136,6 +136,17 @@ static void blk_flush_queue_rq(struct request *rq, bool add_front)
blk_mq_add_to_requeue_list(rq, add_front, true);
}
+static void blk_account_io_flush(struct request *rq)
+{
+ struct hd_struct *part = &rq->rq_disk->part0;
+
+ part_stat_lock();
+ part_stat_inc(part, ios[STAT_FLUSH]);
+ part_stat_add(part, nsecs[STAT_FLUSH],
+ ktime_get_ns() - rq->start_time_ns);
+ part_stat_unlock();
+}
+
/**
* blk_flush_complete_seq - complete flush sequence
* @rq: PREFLUSH/FUA request being sequenced
@@ -185,7 +196,7 @@ static void blk_flush_complete_seq(struct request *rq,
case REQ_FSEQ_DONE:
/*
- * @rq was previously adjusted by blk_flush_issue() for
+ * @rq was previously adjusted by blk_insert_flush() for
* flush sequencing and may already have gone through the
* flush data request completion path. Restore @rq for
* normal completion and end it.
@@ -212,6 +223,8 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
struct blk_mq_hw_ctx *hctx;
+ blk_account_io_flush(flush_rq);
+
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 48e6725b32ee..d783bdc4559b 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -293,7 +293,7 @@ split:
void __blk_queue_split(struct request_queue *q, struct bio **bio,
unsigned int *nr_segs)
{
- struct bio *split;
+ struct bio *split = NULL;
switch (bio_op(*bio)) {
case REQ_OP_DISCARD:
@@ -309,6 +309,21 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
nr_segs);
break;
default:
+ /*
+ * All drivers must accept single-segments bios that are <=
+ * PAGE_SIZE. This is a quick and dirty check that relies on
+ * the fact that bi_io_vec[0] is always valid if a bio has data.
+ * The check might lead to occasional false negatives when bios
+ * are cloned, but compared to the performance impact of cloned
+ * bios themselves the loop below doesn't matter anyway.
+ */
+ if (!q->limits.chunk_sectors &&
+ (*bio)->bi_vcnt == 1 &&
+ ((*bio)->bi_io_vec[0].bv_len +
+ (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
+ *nr_segs = 1;
+ break;
+ }
split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
break;
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index a0d3ce30fa08..062229395a50 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -74,10 +74,8 @@ static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
if (!entry->show)
return -EIO;
- res = -ENOENT;
mutex_lock(&q->sysfs_lock);
- if (!blk_queue_dying(q))
- res = entry->show(ctx, page);
+ res = entry->show(ctx, page);
mutex_unlock(&q->sysfs_lock);
return res;
}
@@ -97,10 +95,8 @@ static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
if (!entry->store)
return -EIO;
- res = -ENOENT;
mutex_lock(&q->sysfs_lock);
- if (!blk_queue_dying(q))
- res = entry->store(ctx, page, length);
+ res = entry->store(ctx, page, length);
mutex_unlock(&q->sysfs_lock);
return res;
}
@@ -120,10 +116,8 @@ static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
if (!entry->show)
return -EIO;
- res = -ENOENT;
mutex_lock(&q->sysfs_lock);
- if (!blk_queue_dying(q))
- res = entry->show(hctx, page);
+ res = entry->show(hctx, page);
mutex_unlock(&q->sysfs_lock);
return res;
}
@@ -144,10 +138,8 @@ static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
if (!entry->store)
return -EIO;
- res = -ENOENT;
mutex_lock(&q->sysfs_lock);
- if (!blk_queue_dying(q))
- res = entry->store(hctx, page, length);
+ res = entry->store(hctx, page, length);
mutex_unlock(&q->sysfs_lock);
return res;
}
@@ -166,20 +158,25 @@ static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
{
+ const size_t size = PAGE_SIZE - 1;
unsigned int i, first = 1;
- ssize_t ret = 0;
+ int ret = 0, pos = 0;
for_each_cpu(i, hctx->cpumask) {
if (first)
- ret += sprintf(ret + page, "%u", i);
+ ret = snprintf(pos + page, size - pos, "%u", i);
else
- ret += sprintf(ret + page, ", %u", i);
+ ret = snprintf(pos + page, size - pos, ", %u", i);
+
+ if (ret >= size - pos)
+ break;
first = 0;
+ pos += ret;
}
- ret += sprintf(ret + page, "\n");
- return ret;
+ ret = snprintf(pos + page, size + 1 - pos, "\n");
+ return pos + ret;
}
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 008388e82b5c..fbacde454718 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -15,14 +15,6 @@
#include "blk-mq.h"
#include "blk-mq-tag.h"
-bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
-{
- if (!tags)
- return true;
-
- return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
-}
-
/*
* If a previously inactive queue goes active, bump the active user count.
* We need to do this before try to allocate driver tag, then even if fail
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 61deab0b5a5a..15bc74acb57e 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -28,7 +28,6 @@ extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
struct blk_mq_ctx *ctx, unsigned int tag);
-extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tags **tags,
unsigned int depth, bool can_grow);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ec791156e9cc..323c9cb28066 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -93,7 +93,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
struct mq_inflight {
struct hd_struct *part;
- unsigned int *inflight;
+ unsigned int inflight[2];
};
static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
@@ -102,45 +102,29 @@ static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
{
struct mq_inflight *mi = priv;
- /*
- * index[0] counts the specific partition that was asked for.
- */
if (rq->part == mi->part)
- mi->inflight[0]++;
+ mi->inflight[rq_data_dir(rq)]++;
return true;
}
unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
{
- unsigned inflight[2];
- struct mq_inflight mi = { .part = part, .inflight = inflight, };
+ struct mq_inflight mi = { .part = part };
- inflight[0] = inflight[1] = 0;
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
- return inflight[0];
-}
-
-static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
- struct request *rq, void *priv,
- bool reserved)
-{
- struct mq_inflight *mi = priv;
-
- if (rq->part == mi->part)
- mi->inflight[rq_data_dir(rq)]++;
-
- return true;
+ return mi.inflight[0] + mi.inflight[1];
}
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
- struct mq_inflight mi = { .part = part, .inflight = inflight, };
+ struct mq_inflight mi = { .part = part };
- inflight[0] = inflight[1] = 0;
- blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
+ blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
+ inflight[0] = mi.inflight[0];
+ inflight[1] = mi.inflight[1];
}
void blk_freeze_queue_start(struct request_queue *q)
@@ -276,12 +260,6 @@ void blk_mq_wake_waiters(struct request_queue *q)
blk_mq_tag_wakeup_all(hctx->tags, true);
}
-bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
-{
- return blk_mq_has_free_tags(hctx->tags);
-}
-EXPORT_SYMBOL(blk_mq_can_queue);
-
/*
* Only need start/end time stamping if we have iostat or
* blk stats enabled, or using an IO scheduler.
@@ -663,18 +641,6 @@ bool blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);
-int blk_mq_request_started(struct request *rq)
-{
- return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
-}
-EXPORT_SYMBOL_GPL(blk_mq_request_started);
-
-int blk_mq_request_completed(struct request *rq)
-{
- return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
-}
-EXPORT_SYMBOL_GPL(blk_mq_request_completed);
-
void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -1064,7 +1030,7 @@ bool blk_mq_get_driver_tag(struct request *rq)
bool shared;
if (rq->tag != -1)
- goto done;
+ return true;
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
data.flags |= BLK_MQ_REQ_RESERVED;
@@ -1079,7 +1045,6 @@ bool blk_mq_get_driver_tag(struct request *rq)
data.hctx->tags->rqs[rq->tag] = rq;
}
-done:
return rq->tag != -1;
}
@@ -1486,7 +1451,7 @@ void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
-bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
int srcu_idx;
bool need_run;
@@ -1504,12 +1469,8 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
blk_mq_hctx_has_pending(hctx);
hctx_unlock(hctx, srcu_idx);
- if (need_run) {
+ if (need_run)
__blk_mq_delay_run_hw_queue(hctx, async, 0);
- return true;
- }
-
- return false;
}
EXPORT_SYMBOL(blk_mq_run_hw_queue);
@@ -2789,6 +2750,23 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int i, j, end;
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
+ if (q->nr_hw_queues < set->nr_hw_queues) {
+ struct blk_mq_hw_ctx **new_hctxs;
+
+ new_hctxs = kcalloc_node(set->nr_hw_queues,
+ sizeof(*new_hctxs), GFP_KERNEL,
+ set->numa_node);
+ if (!new_hctxs)
+ return;
+ if (hctxs)
+ memcpy(new_hctxs, hctxs, q->nr_hw_queues *
+ sizeof(*hctxs));
+ q->queue_hw_ctx = new_hctxs;
+ q->nr_hw_queues = set->nr_hw_queues;
+ kfree(hctxs);
+ hctxs = new_hctxs;
+ }
+
/* protect against switching io scheduler */
mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
@@ -2844,19 +2822,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
mutex_unlock(&q->sysfs_lock);
}
-/*
- * Maximum number of hardware queues we support. For single sets, we'll never
- * have more than the CPUs (software queues). For multiple sets, the tag_set
- * user may have set ->nr_hw_queues larger.
- */
-static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
-{
- if (set->nr_maps == 1)
- return nr_cpu_ids;
-
- return max(set->nr_hw_queues, nr_cpu_ids);
-}
-
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q,
bool elevator_init)
@@ -2876,12 +2841,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
- q->nr_queues = nr_hw_queues(set);
- q->queue_hw_ctx = kcalloc_node(q->nr_queues, sizeof(*(q->queue_hw_ctx)),
- GFP_KERNEL, set->numa_node);
- if (!q->queue_hw_ctx)
- goto err_sys_init;
-
INIT_LIST_HEAD(&q->unused_hctx_list);
spin_lock_init(&q->unused_hctx_lock);
@@ -2929,7 +2888,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
err_hctxs:
kfree(q->queue_hw_ctx);
q->nr_hw_queues = 0;
-err_sys_init:
blk_mq_sysfs_deinit(q);
err_poll:
blk_stat_free_callback(q->poll_cb);
@@ -3030,6 +2988,29 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
}
}
+static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
+ int cur_nr_hw_queues, int new_nr_hw_queues)
+{
+ struct blk_mq_tags **new_tags;
+
+ if (cur_nr_hw_queues >= new_nr_hw_queues)
+ return 0;
+
+ new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
+ GFP_KERNEL, set->numa_node);
+ if (!new_tags)
+ return -ENOMEM;
+
+ if (set->tags)
+ memcpy(new_tags, set->tags, cur_nr_hw_queues *
+ sizeof(*set->tags));
+ kfree(set->tags);
+ set->tags = new_tags;
+ set->nr_hw_queues = new_nr_hw_queues;
+
+ return 0;
+}
+
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
@@ -3083,9 +3064,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
set->nr_hw_queues = nr_cpu_ids;
- set->tags = kcalloc_node(nr_hw_queues(set), sizeof(struct blk_mq_tags *),
- GFP_KERNEL, set->numa_node);
- if (!set->tags)
+ if (blk_mq_realloc_tag_set_tags(set, 0, set->nr_hw_queues) < 0)
return -ENOMEM;
ret = -ENOMEM;
@@ -3126,7 +3105,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
int i, j;
- for (i = 0; i < nr_hw_queues(set); i++)
+ for (i = 0; i < set->nr_hw_queues; i++)
blk_mq_free_map_and_requests(set, i);
for (j = 0; j < set->nr_maps; j++) {
@@ -3271,10 +3250,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_freeze_queue(q);
/*
- * Sync with blk_mq_queue_tag_busy_iter.
- */
- synchronize_rcu();
- /*
* Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done
* updating the new sw to hw queue mappings.
@@ -3288,6 +3263,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_sysfs_unregister(q);
}
+ if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
+ 0)
+ goto reregister;
+
prev_nr_hw_queues = set->nr_hw_queues;
set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set);
@@ -3304,6 +3283,7 @@ fallback:
blk_mq_map_swqueue(q);
}
+reregister:
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_sysfs_register(q);
blk_mq_debugfs_register_hctxs(q);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 32c62c64e6c2..eaaca8fc1c28 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -128,15 +128,6 @@ extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
void blk_mq_release(struct request_queue *q);
-/**
- * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
- * @rq: target request.
- */
-static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
-{
- return READ_ONCE(rq->state);
-}
-
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 457d9ba3eb20..6e7ec87d49fa 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -42,17 +42,13 @@ static __latent_entropy void blk_done_softirq(struct softirq_action *h)
static void trigger_softirq(void *data)
{
struct request *rq = data;
- unsigned long flags;
struct list_head *list;
- local_irq_save(flags);
list = this_cpu_ptr(&blk_cpu_done);
list_add_tail(&rq->ipi_list, list);
if (list->next == &rq->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
-
- local_irq_restore(flags);
}
/*
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 940f15d600f8..7da302ff88d0 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -53,7 +53,7 @@ void blk_stat_add(struct request *rq, u64 now)
struct request_queue *q = rq->q;
struct blk_stat_callback *cb;
struct blk_rq_stat *stat;
- int bucket;
+ int bucket, cpu;
u64 value;
value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
@@ -61,6 +61,7 @@ void blk_stat_add(struct request *rq, u64 now)
blk_throtl_stat_add(rq, value);
rcu_read_lock();
+ cpu = get_cpu();
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
if (!blk_stat_is_active(cb))
continue;
@@ -69,10 +70,10 @@ void blk_stat_add(struct request *rq, u64 now)
if (bucket < 0)
continue;
- stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
+ stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
blk_rq_stat_add(stat, value);
- put_cpu_ptr(cb->cpu_stat);
}
+ put_cpu();
rcu_read_unlock();
}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 46f5198be017..fca9b158f4a0 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -801,10 +801,6 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dying(q)) {
- mutex_unlock(&q->sysfs_lock);
- return -ENOENT;
- }
res = entry->show(q, page);
mutex_unlock(&q->sysfs_lock);
return res;
@@ -823,10 +819,6 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dying(q)) {
- mutex_unlock(&q->sysfs_lock);
- return -ENOENT;
- }
res = entry->store(q, page, length);
mutex_unlock(&q->sysfs_lock);
return res;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 18f773e52dfb..98233c9c65a8 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -12,6 +12,7 @@
#include <linux/blktrace_api.h>
#include <linux/blk-cgroup.h>
#include "blk.h"
+#include "blk-cgroup-rwstat.h"
/* Max dispatch from a group in 1 round */
static int throtl_grp_quantum = 8;
@@ -176,6 +177,9 @@ struct throtl_grp {
unsigned int bio_cnt; /* total bios */
unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
unsigned long bio_cnt_reset_time;
+
+ struct blkg_rwstat stat_bytes;
+ struct blkg_rwstat stat_ios;
};
/* We measure latency for request size from <= 4k to >= 1M */
@@ -489,6 +493,12 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
if (!tg)
return NULL;
+ if (blkg_rwstat_init(&tg->stat_bytes, gfp))
+ goto err_free_tg;
+
+ if (blkg_rwstat_init(&tg->stat_ios, gfp))
+ goto err_exit_stat_bytes;
+
throtl_service_queue_init(&tg->service_queue);
for (rw = READ; rw <= WRITE; rw++) {
@@ -513,6 +523,12 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
return &tg->pd;
+
+err_exit_stat_bytes:
+ blkg_rwstat_exit(&tg->stat_bytes);
+err_free_tg:
+ kfree(tg);
+ return NULL;
}
static void throtl_pd_init(struct blkg_policy_data *pd)
@@ -611,6 +627,8 @@ static void throtl_pd_free(struct blkg_policy_data *pd)
struct throtl_grp *tg = pd_to_tg(pd);
del_timer_sync(&tg->service_queue.pending_timer);
+ blkg_rwstat_exit(&tg->stat_bytes);
+ blkg_rwstat_exit(&tg->stat_ios);
kfree(tg);
}
@@ -1464,6 +1482,32 @@ static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
return tg_set_conf(of, buf, nbytes, off, false);
}
+static int tg_print_rwstat(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ blkg_prfill_rwstat, &blkcg_policy_throtl,
+ seq_cft(sf)->private, true);
+ return 0;
+}
+
+static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct blkg_rwstat_sample sum;
+
+ blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
+ &sum);
+ return __blkg_prfill_rwstat(sf, pd, &sum);
+}
+
+static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
+ seq_cft(sf)->private, true);
+ return 0;
+}
+
static struct cftype throtl_legacy_files[] = {
{
.name = "throttle.read_bps_device",
@@ -1491,23 +1535,23 @@ static struct cftype throtl_legacy_files[] = {
},
{
.name = "throttle.io_service_bytes",
- .private = (unsigned long)&blkcg_policy_throtl,
- .seq_show = blkg_print_stat_bytes,
+ .private = offsetof(struct throtl_grp, stat_bytes),
+ .seq_show = tg_print_rwstat,
},
{
.name = "throttle.io_service_bytes_recursive",
- .private = (unsigned long)&blkcg_policy_throtl,
- .seq_show = blkg_print_stat_bytes_recursive,
+ .private = offsetof(struct throtl_grp, stat_bytes),
+ .seq_show = tg_print_rwstat_recursive,
},
{
.name = "throttle.io_serviced",
- .private = (unsigned long)&blkcg_policy_throtl,
- .seq_show = blkg_print_stat_ios,
+ .private = offsetof(struct throtl_grp, stat_ios),
+ .seq_show = tg_print_rwstat,
},
{
.name = "throttle.io_serviced_recursive",
- .private = (unsigned long)&blkcg_policy_throtl,
- .seq_show = blkg_print_stat_ios_recursive,
+ .private = offsetof(struct throtl_grp, stat_ios),
+ .seq_show = tg_print_rwstat_recursive,
},
{ } /* terminate */
};
@@ -2127,7 +2171,16 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
WARN_ON_ONCE(!rcu_read_lock_held());
/* see throtl_charge_bio() */
- if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
+ if (bio_flagged(bio, BIO_THROTTLED))
+ goto out;
+
+ if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
+ blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
+ bio->bi_iter.bi_size);
+ blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
+ }
+
+ if (!tg->has_rules[rw])
goto out;
spin_lock_irq(&q->queue_lock);
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 4bc5f260248a..6fad6f3f6980 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -93,172 +93,85 @@ unsigned int blkdev_nr_zones(struct block_device *bdev)
if (!blk_queue_is_zoned(q))
return 0;
- return __blkdev_nr_zones(q, bdev->bd_part->nr_sects);
+ return __blkdev_nr_zones(q, get_capacity(bdev->bd_disk));
}
EXPORT_SYMBOL_GPL(blkdev_nr_zones);
-/*
- * Check that a zone report belongs to this partition, and if yes, fix its start
- * sector and write pointer and return true. Return false otherwise.
- */
-static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep)
-{
- sector_t offset = get_start_sect(bdev);
-
- if (rep->start < offset)
- return false;
-
- rep->start -= offset;
- if (rep->start + rep->len > bdev->bd_part->nr_sects)
- return false;
-
- if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL)
- rep->wp = rep->start + rep->len;
- else
- rep->wp -= offset;
- return true;
-}
-
-static int blk_report_zones(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
-{
- struct request_queue *q = disk->queue;
- unsigned int z = 0, n, nrz = *nr_zones;
- sector_t capacity = get_capacity(disk);
- int ret;
-
- while (z < nrz && sector < capacity) {
- n = nrz - z;
- ret = disk->fops->report_zones(disk, sector, &zones[z], &n);
- if (ret)
- return ret;
- if (!n)
- break;
- sector += blk_queue_zone_sectors(q) * n;
- z += n;
- }
-
- WARN_ON(z > *nr_zones);
- *nr_zones = z;
-
- return 0;
-}
-
/**
* blkdev_report_zones - Get zones information
* @bdev: Target block device
* @sector: Sector from which to report zones
- * @zones: Array of zone structures where to return the zones information
- * @nr_zones: Number of zone structures in the zone array
+ * @nr_zones: Maximum number of zones to report
+ * @cb: Callback function called for each reported zone
+ * @data: Private data for the callback
*
* Description:
- * Get zone information starting from the zone containing @sector.
- * The number of zone information reported may be less than the number
- * requested by @nr_zones. The number of zones actually reported is
- * returned in @nr_zones.
- * The caller must use memalloc_noXX_save/restore() calls to control
- * memory allocations done within this function (zone array and command
- * buffer allocation by the device driver).
+ * Get zone information starting from the zone containing @sector for at most
+ * @nr_zones, and call @cb for each zone reported by the device.
+ * To report all zones in a device starting from @sector, the BLK_ALL_ZONES
+ * constant can be passed to @nr_zones.
+ * Returns the number of zones reported by the device, or a negative errno
+ * value in case of failure.
+ *
+ * Note: The caller must use memalloc_noXX_save/restore() calls to control
+ * memory allocations done within this function.
*/
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
- struct request_queue *q = bdev_get_queue(bdev);
- unsigned int i, nrz;
- int ret;
-
- if (!blk_queue_is_zoned(q))
- return -EOPNOTSUPP;
+ struct gendisk *disk = bdev->bd_disk;
+ sector_t capacity = get_capacity(disk);
- /*
- * A block device that advertized itself as zoned must have a
- * report_zones method. If it does not have one defined, the device
- * driver has a bug. So warn about that.
- */
- if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones))
+ if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
+ WARN_ON_ONCE(!disk->fops->report_zones))
return -EOPNOTSUPP;
- if (!*nr_zones || sector >= bdev->bd_part->nr_sects) {
- *nr_zones = 0;
+ if (!nr_zones || sector >= capacity)
return 0;
- }
-
- nrz = min(*nr_zones,
- __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector));
- ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector,
- zones, &nrz);
- if (ret)
- return ret;
-
- for (i = 0; i < nrz; i++) {
- if (!blkdev_report_zone(bdev, zones))
- break;
- zones++;
- }
- *nr_zones = i;
-
- return 0;
+ return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
}
EXPORT_SYMBOL_GPL(blkdev_report_zones);
-/*
- * Special case of zone reset operation to reset all zones in one command,
- * useful for applications like mkfs.
- */
-static int __blkdev_reset_all_zones(struct block_device *bdev, gfp_t gfp_mask)
-{
- struct bio *bio = bio_alloc(gfp_mask, 0);
- int ret;
-
- /* across the zones operations, don't need any sectors */
- bio_set_dev(bio, bdev);
- bio_set_op_attrs(bio, REQ_OP_ZONE_RESET_ALL, 0);
-
- ret = submit_bio_wait(bio);
- bio_put(bio);
-
- return ret;
-}
-
static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
+ sector_t sector,
sector_t nr_sectors)
{
if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
return false;
- if (nr_sectors != part_nr_sects_read(bdev->bd_part))
- return false;
/*
- * REQ_OP_ZONE_RESET_ALL can be executed only if the block device is
- * the entire disk, that is, if the blocks device start offset is 0 and
- * its capacity is the same as the entire disk.
+ * REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors
+ * of the applicable zone range is the entire disk.
*/
- return get_start_sect(bdev) == 0 &&
- part_nr_sects_read(bdev->bd_part) == get_capacity(bdev->bd_disk);
+ return !sector && nr_sectors == get_capacity(bdev->bd_disk);
}
/**
- * blkdev_reset_zones - Reset zones write pointer
+ * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
* @bdev: Target block device
- * @sector: Start sector of the first zone to reset
- * @nr_sectors: Number of sectors, at least the length of one zone
+ * @op: Operation to be performed on the zones
+ * @sector: Start sector of the first zone to operate on
+ * @nr_sectors: Number of sectors, should be at least the length of one zone and
+ * must be zone size aligned.
* @gfp_mask: Memory allocation flags (for bio_alloc)
*
* Description:
- * Reset the write pointer of the zones contained in the range
+ * Perform the specified operation on the range of zones specified by
* @sector..@sector+@nr_sectors. Specifying the entire disk sector range
* is valid, but the specified range should not contain conventional zones.
+ * The operation to execute on each zone can be a zone reset, open, close
+ * or finish request.
*/
-int blkdev_reset_zones(struct block_device *bdev,
- sector_t sector, sector_t nr_sectors,
- gfp_t gfp_mask)
+int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
+ sector_t sector, sector_t nr_sectors,
+ gfp_t gfp_mask)
{
struct request_queue *q = bdev_get_queue(bdev);
- sector_t zone_sectors;
+ sector_t zone_sectors = blk_queue_zone_sectors(q);
+ sector_t capacity = get_capacity(bdev->bd_disk);
sector_t end_sector = sector + nr_sectors;
struct bio *bio = NULL;
- struct blk_plug plug;
int ret;
if (!blk_queue_is_zoned(q))
@@ -267,45 +180,62 @@ int blkdev_reset_zones(struct block_device *bdev,
if (bdev_read_only(bdev))
return -EPERM;
- if (!nr_sectors || end_sector > bdev->bd_part->nr_sects)
+ if (!op_is_zone_mgmt(op))
+ return -EOPNOTSUPP;
+
+ if (!nr_sectors || end_sector > capacity)
/* Out of range */
return -EINVAL;
- if (blkdev_allow_reset_all_zones(bdev, nr_sectors))
- return __blkdev_reset_all_zones(bdev, gfp_mask);
-
/* Check alignment (handle eventual smaller last zone) */
- zone_sectors = blk_queue_zone_sectors(q);
if (sector & (zone_sectors - 1))
return -EINVAL;
- if ((nr_sectors & (zone_sectors - 1)) &&
- end_sector != bdev->bd_part->nr_sects)
+ if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
return -EINVAL;
- blk_start_plug(&plug);
while (sector < end_sector) {
-
bio = blk_next_bio(bio, 0, gfp_mask);
- bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
- bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
+ /*
+ * Special case for the zone reset operation that reset all
+ * zones, this is useful for applications like mkfs.
+ */
+ if (op == REQ_OP_ZONE_RESET &&
+ blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
+ bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
+ break;
+ }
+
+ bio->bi_opf = op;
+ bio->bi_iter.bi_sector = sector;
sector += zone_sectors;
/* This may take a while, so be nice to others */
cond_resched();
-
}
ret = submit_bio_wait(bio);
bio_put(bio);
- blk_finish_plug(&plug);
-
return ret;
}
-EXPORT_SYMBOL_GPL(blkdev_reset_zones);
+EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
+
+struct zone_report_args {
+ struct blk_zone __user *zones;
+};
+
+static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct zone_report_args *args = data;
+
+ if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
+ return -EFAULT;
+ return 0;
+}
/*
* BLKREPORTZONE ioctl processing.
@@ -315,9 +245,9 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
+ struct zone_report_args args;
struct request_queue *q;
struct blk_zone_report rep;
- struct blk_zone *zones;
int ret;
if (!argp)
@@ -339,44 +269,29 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
if (!rep.nr_zones)
return -EINVAL;
- rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones);
-
- zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone),
- GFP_KERNEL | __GFP_ZERO);
- if (!zones)
- return -ENOMEM;
-
- ret = blkdev_report_zones(bdev, rep.sector, zones, &rep.nr_zones);
- if (ret)
- goto out;
-
- if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) {
- ret = -EFAULT;
- goto out;
- }
-
- if (rep.nr_zones) {
- if (copy_to_user(argp + sizeof(struct blk_zone_report), zones,
- sizeof(struct blk_zone) * rep.nr_zones))
- ret = -EFAULT;
- }
-
- out:
- kvfree(zones);
+ args.zones = argp + sizeof(struct blk_zone_report);
+ ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
+ blkdev_copy_zone_to_user, &args);
+ if (ret < 0)
+ return ret;
- return ret;
+ rep.nr_zones = ret;
+ if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
+ return -EFAULT;
+ return 0;
}
/*
- * BLKRESETZONE ioctl processing.
+ * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
* Called from blkdev_ioctl.
*/
-int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct request_queue *q;
struct blk_zone_range zrange;
+ enum req_opf op;
if (!argp)
return -EINVAL;
@@ -397,8 +312,25 @@ int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
return -EFAULT;
- return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors,
- GFP_KERNEL);
+ switch (cmd) {
+ case BLKRESETZONE:
+ op = REQ_OP_ZONE_RESET;
+ break;
+ case BLKOPENZONE:
+ op = REQ_OP_ZONE_OPEN;
+ break;
+ case BLKCLOSEZONE:
+ op = REQ_OP_ZONE_CLOSE;
+ break;
+ case BLKFINISHZONE:
+ op = REQ_OP_ZONE_FINISH;
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
+ GFP_KERNEL);
}
static inline unsigned long *blk_alloc_zone_bitmap(int node,
@@ -408,37 +340,99 @@ static inline unsigned long *blk_alloc_zone_bitmap(int node,
GFP_NOIO, node);
}
+void blk_queue_free_zone_bitmaps(struct request_queue *q)
+{
+ kfree(q->seq_zones_bitmap);
+ q->seq_zones_bitmap = NULL;
+ kfree(q->seq_zones_wlock);
+ q->seq_zones_wlock = NULL;
+}
+
+struct blk_revalidate_zone_args {
+ struct gendisk *disk;
+ unsigned long *seq_zones_bitmap;
+ unsigned long *seq_zones_wlock;
+ sector_t sector;
+};
+
/*
- * Allocate an array of struct blk_zone to get nr_zones zone information.
- * The allocated array may be smaller than nr_zones.
+ * Helper function to check the validity of zones of a zoned block device.
*/
-static struct blk_zone *blk_alloc_zones(unsigned int *nr_zones)
+static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
{
- struct blk_zone *zones;
- size_t nrz = min(*nr_zones, BLK_ZONED_REPORT_MAX_ZONES);
+ struct blk_revalidate_zone_args *args = data;
+ struct gendisk *disk = args->disk;
+ struct request_queue *q = disk->queue;
+ sector_t zone_sectors = blk_queue_zone_sectors(q);
+ sector_t capacity = get_capacity(disk);
/*
- * GFP_KERNEL here is meaningless as the caller task context has
- * the PF_MEMALLOC_NOIO flag set in blk_revalidate_disk_zones()
- * with memalloc_noio_save().
+ * All zones must have the same size, with the exception on an eventual
+ * smaller last zone.
*/
- zones = kvcalloc(nrz, sizeof(struct blk_zone), GFP_KERNEL);
- if (!zones) {
- *nr_zones = 0;
- return NULL;
+ if (zone->start + zone_sectors < capacity &&
+ zone->len != zone_sectors) {
+ pr_warn("%s: Invalid zoned device with non constant zone size\n",
+ disk->disk_name);
+ return false;
}
- *nr_zones = nrz;
+ if (zone->start + zone->len >= capacity &&
+ zone->len > zone_sectors) {
+ pr_warn("%s: Invalid zoned device with larger last zone size\n",
+ disk->disk_name);
+ return -ENODEV;
+ }
+
+ /* Check for holes in the zone report */
+ if (zone->start != args->sector) {
+ pr_warn("%s: Zone gap at sectors %llu..%llu\n",
+ disk->disk_name, args->sector, zone->start);
+ return -ENODEV;
+ }
- return zones;
+ /* Check zone type */
+ switch (zone->type) {
+ case BLK_ZONE_TYPE_CONVENTIONAL:
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
+ break;
+ default:
+ pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
+ disk->disk_name, (int)zone->type, zone->start);
+ return -ENODEV;
+ }
+
+ if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL)
+ set_bit(idx, args->seq_zones_bitmap);
+
+ args->sector += zone->len;
+ return 0;
}
-void blk_queue_free_zone_bitmaps(struct request_queue *q)
+static int blk_update_zone_info(struct gendisk *disk, unsigned int nr_zones,
+ struct blk_revalidate_zone_args *args)
{
- kfree(q->seq_zones_bitmap);
- q->seq_zones_bitmap = NULL;
- kfree(q->seq_zones_wlock);
- q->seq_zones_wlock = NULL;
+ /*
+ * Ensure that all memory allocations in this context are done as
+ * if GFP_NOIO was specified.
+ */
+ unsigned int noio_flag = memalloc_noio_save();
+ struct request_queue *q = disk->queue;
+ int ret;
+
+ args->seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones);
+ if (!args->seq_zones_wlock)
+ return -ENOMEM;
+ args->seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones);
+ if (!args->seq_zones_bitmap)
+ return -ENOMEM;
+
+ ret = disk->fops->report_zones(disk, 0, nr_zones,
+ blk_revalidate_zone_cb, args);
+ memalloc_noio_restore(noio_flag);
+ return ret;
}
/**
@@ -454,13 +448,12 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk));
- unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
- unsigned int i, rep_nr_zones = 0, z = 0, nrz;
- struct blk_zone *zones = NULL;
- unsigned int noio_flag;
- sector_t sector = 0;
+ struct blk_revalidate_zone_args args = { .disk = disk };
int ret = 0;
+ if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
+ return -EIO;
+
/*
* BIO based queues do not use a scheduler so only q->nr_zones
* needs to be updated so that the sysfs exposed value is correct.
@@ -470,78 +463,28 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
return 0;
}
- /*
- * Ensure that all memory allocations in this context are done as
- * if GFP_NOIO was specified.
- */
- noio_flag = memalloc_noio_save();
-
- if (!blk_queue_is_zoned(q) || !nr_zones) {
- nr_zones = 0;
- goto update;
- }
-
- /* Allocate bitmaps */
- ret = -ENOMEM;
- seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones);
- if (!seq_zones_wlock)
- goto out;
- seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones);
- if (!seq_zones_bitmap)
- goto out;
-
- /* Get zone information and initialize seq_zones_bitmap */
- rep_nr_zones = nr_zones;
- zones = blk_alloc_zones(&rep_nr_zones);
- if (!zones)
- goto out;
-
- while (z < nr_zones) {
- nrz = min(nr_zones - z, rep_nr_zones);
- ret = blk_report_zones(disk, sector, zones, &nrz);
- if (ret)
- goto out;
- if (!nrz)
- break;
- for (i = 0; i < nrz; i++) {
- if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
- set_bit(z, seq_zones_bitmap);
- z++;
- }
- sector += nrz * blk_queue_zone_sectors(q);
- }
-
- if (WARN_ON(z != nr_zones)) {
- ret = -EIO;
- goto out;
- }
+ if (nr_zones)
+ ret = blk_update_zone_info(disk, nr_zones, &args);
-update:
/*
* Install the new bitmaps, making sure the queue is stopped and
* all I/Os are completed (i.e. a scheduler is not referencing the
* bitmaps).
*/
blk_mq_freeze_queue(q);
- q->nr_zones = nr_zones;
- swap(q->seq_zones_wlock, seq_zones_wlock);
- swap(q->seq_zones_bitmap, seq_zones_bitmap);
- blk_mq_unfreeze_queue(q);
-
-out:
- memalloc_noio_restore(noio_flag);
-
- kvfree(zones);
- kfree(seq_zones_wlock);
- kfree(seq_zones_bitmap);
-
- if (ret) {
+ if (ret >= 0) {
+ q->nr_zones = nr_zones;
+ swap(q->seq_zones_wlock, args.seq_zones_wlock);
+ swap(q->seq_zones_bitmap, args.seq_zones_bitmap);
+ ret = 0;
+ } else {
pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
- blk_mq_freeze_queue(q);
blk_queue_free_zone_bitmaps(q);
- blk_mq_unfreeze_queue(q);
}
+ blk_mq_unfreeze_queue(q);
+ kfree(args.seq_zones_wlock);
+ kfree(args.seq_zones_bitmap);
return ret;
}
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
diff --git a/block/blk.h b/block/blk.h
index 47fba9362e60..2bea40180b6f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -242,14 +242,11 @@ int blk_dev_init(void);
* Contribute to IO statistics IFF:
*
* a) it's attached to a gendisk, and
- * b) the queue had IO stats enabled when this request was started, and
- * c) it's a file system request
+ * b) the queue had IO stats enabled when this request was started
*/
static inline bool blk_do_io_stat(struct request *rq)
{
- return rq->rq_disk &&
- (rq->rq_flags & RQF_IO_STAT) &&
- !blk_rq_is_passthrough(rq);
+ return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
}
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
diff --git a/block/elevator.c b/block/elevator.c
index 076ba7308e65..4eab3d70e880 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -832,3 +832,12 @@ struct request *elv_rb_latter_request(struct request_queue *q,
return NULL;
}
EXPORT_SYMBOL(elv_rb_latter_request);
+
+static int __init elevator_setup(char *str)
+{
+ pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
+ "Please use sysfs to set IO scheduler for individual devices.\n");
+ return 1;
+}
+
+__setup("elevator=", elevator_setup);
diff --git a/block/genhd.c b/block/genhd.c
index 26b31fcae217..ff6268970ddc 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1385,7 +1385,9 @@ static int diskstats_show(struct seq_file *seqf, void *v)
"%lu %lu %lu %u "
"%lu %lu %lu %u "
"%u %u %u "
- "%lu %lu %lu %u\n",
+ "%lu %lu %lu %u "
+ "%lu %u"
+ "\n",
MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
disk_name(gp, hd->partno, buf),
part_stat_read(hd, ios[STAT_READ]),
@@ -1402,7 +1404,9 @@ static int diskstats_show(struct seq_file *seqf, void *v)
part_stat_read(hd, ios[STAT_DISCARD]),
part_stat_read(hd, merges[STAT_DISCARD]),
part_stat_read(hd, sectors[STAT_DISCARD]),
- (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD)
+ (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD),
+ part_stat_read(hd, ios[STAT_FLUSH]),
+ (unsigned int)part_stat_read_msecs(hd, STAT_FLUSH)
);
}
disk_part_iter_exit(&piter);
diff --git a/block/ioctl.c b/block/ioctl.c
index 15a0eb80ada9..7ac8a66c9787 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -155,48 +155,21 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
}
}
-/*
- * This is an exported API for the block driver, and will not
- * acquire bd_mutex. This API should be used in case that
- * caller has held bd_mutex already.
- */
-int __blkdev_reread_part(struct block_device *bdev)
+static int blkdev_reread_part(struct block_device *bdev)
{
- struct gendisk *disk = bdev->bd_disk;
+ int ret;
- if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains)
+ if (!disk_part_scan_enabled(bdev->bd_disk) || bdev != bdev->bd_contains)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- lockdep_assert_held(&bdev->bd_mutex);
-
- return rescan_partitions(disk, bdev);
-}
-EXPORT_SYMBOL(__blkdev_reread_part);
-
-/*
- * This is an exported API for the block driver, and will
- * try to acquire bd_mutex. If bd_mutex has been held already
- * in current context, please call __blkdev_reread_part().
- *
- * Make sure the held locks in current context aren't required
- * in open()/close() handler and I/O path for avoiding ABBA deadlock:
- * - bd_mutex is held before calling block driver's open/close
- * handler
- * - reading partition table may submit I/O to the block device
- */
-int blkdev_reread_part(struct block_device *bdev)
-{
- int res;
-
mutex_lock(&bdev->bd_mutex);
- res = __blkdev_reread_part(bdev);
+ ret = bdev_disk_changed(bdev, false);
mutex_unlock(&bdev->bd_mutex);
- return res;
+ return ret;
}
-EXPORT_SYMBOL(blkdev_reread_part);
static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
unsigned long arg, unsigned long flags)
@@ -532,7 +505,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKREPORTZONE:
return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
case BLKRESETZONE:
- return blkdev_reset_zones_ioctl(bdev, mode, cmd, arg);
+ case BLKOPENZONE:
+ case BLKCLOSEZONE:
+ case BLKFINISHZONE:
+ return blkdev_zone_mgmt_ioctl(bdev, mode, cmd, arg);
case BLKGETZONESZ:
return put_uint(arg, bdev_zone_sectors(bdev));
case BLKGETNRZONES:
diff --git a/block/opal_proto.h b/block/opal_proto.h
index 5532412d567c..325cbba2465f 100644
--- a/block/opal_proto.h
+++ b/block/opal_proto.h
@@ -76,7 +76,6 @@ enum opal_response_token {
* Derived from: TCG_Storage_Architecture_Core_Spec_v2.01_r1.00
* Section: 6.3 Assigned UIDs
*/
-#define OPAL_UID_LENGTH 8
#define OPAL_METHOD_LENGTH 8
#define OPAL_MSID_KEYLEN 15
#define OPAL_UID_LENGTH_HALF 4
@@ -108,6 +107,7 @@ enum opal_uid {
OPAL_C_PIN_TABLE,
OPAL_LOCKING_INFO_TABLE,
OPAL_ENTERPRISE_LOCKING_INFO_TABLE,
+ OPAL_DATASTORE,
/* C_PIN_TABLE object ID's */
OPAL_C_PIN_MSID,
OPAL_C_PIN_SID,
@@ -205,6 +205,10 @@ enum opal_lockingstate {
OPAL_LOCKING_LOCKED = 0x03,
};
+enum opal_parameter {
+ OPAL_SUM_SET_LIST = 0x060000,
+};
+
/* Packets derived from:
* TCG_Storage_Architecture_Core_Spec_v2.01_r1.00
* Secion: 3.2.3 ComPackets, Packets & Subpackets
diff --git a/block/partition-generic.c b/block/partition-generic.c
index aee643ce13d1..1d20c9cf213f 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -127,7 +127,8 @@ ssize_t part_stat_show(struct device *dev,
"%8lu %8lu %8llu %8u "
"%8lu %8lu %8llu %8u "
"%8u %8u %8u "
- "%8lu %8lu %8llu %8u"
+ "%8lu %8lu %8llu %8u "
+ "%8lu %8u"
"\n",
part_stat_read(p, ios[STAT_READ]),
part_stat_read(p, merges[STAT_READ]),
@@ -143,7 +144,9 @@ ssize_t part_stat_show(struct device *dev,
part_stat_read(p, ios[STAT_DISCARD]),
part_stat_read(p, merges[STAT_DISCARD]),
(unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]),
- (unsigned int)part_stat_read_msecs(p, STAT_DISCARD));
+ (unsigned int)part_stat_read_msecs(p, STAT_DISCARD),
+ part_stat_read(p, ios[STAT_FLUSH]),
+ (unsigned int)part_stat_read_msecs(p, STAT_FLUSH));
}
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
@@ -439,12 +442,14 @@ static bool disk_unlock_native_capacity(struct gendisk *disk)
}
}
-static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
+int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev)
{
struct disk_part_iter piter;
struct hd_struct *part;
int res;
+ if (!disk_part_scan_enabled(disk))
+ return 0;
if (bdev->bd_part_count || bdev->bd_super)
return -EBUSY;
res = invalidate_partition(disk, 0);
@@ -459,204 +464,124 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
return 0;
}
-static bool part_zone_aligned(struct gendisk *disk,
- struct block_device *bdev,
- sector_t from, sector_t size)
+static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
+ struct parsed_partitions *state, int p)
{
- unsigned int zone_sectors = bdev_zone_sectors(bdev);
+ sector_t size = state->parts[p].size;
+ sector_t from = state->parts[p].from;
+ struct hd_struct *part;
- /*
- * If this function is called, then the disk is a zoned block device
- * (host-aware or host-managed). This can be detected even if the
- * zoned block device support is disabled (CONFIG_BLK_DEV_ZONED not
- * set). In this case, however, only host-aware devices will be seen
- * as a block device is not created for host-managed devices. Without
- * zoned block device support, host-aware drives can still be used as
- * regular block devices (no zone operation) and their zone size will
- * be reported as 0. Allow this case.
- */
- if (!zone_sectors)
+ if (!size)
return true;
- /*
- * Check partition start and size alignement. If the drive has a
- * smaller last runt zone, ignore it and allow the partition to
- * use it. Check the zone size too: it should be a power of 2 number
- * of sectors.
- */
- if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
- u32 rem;
-
- div_u64_rem(from, zone_sectors, &rem);
- if (rem)
+ if (from >= get_capacity(disk)) {
+ printk(KERN_WARNING
+ "%s: p%d start %llu is beyond EOD, ",
+ disk->disk_name, p, (unsigned long long) from);
+ if (disk_unlock_native_capacity(disk))
return false;
- if ((from + size) < get_capacity(disk)) {
- div_u64_rem(size, zone_sectors, &rem);
- if (rem)
- return false;
- }
+ return true;
+ }
- } else {
+ if (from + size > get_capacity(disk)) {
+ printk(KERN_WARNING
+ "%s: p%d size %llu extends beyond EOD, ",
+ disk->disk_name, p, (unsigned long long) size);
- if (from & (zone_sectors - 1))
- return false;
- if ((from + size) < get_capacity(disk) &&
- (size & (zone_sectors - 1)))
+ if (disk_unlock_native_capacity(disk))
return false;
+ /*
+ * We can not ignore partitions of broken tables created by for
+ * example camera firmware, but we limit them to the end of the
+ * disk to avoid creating invalid block devices.
+ */
+ size = get_capacity(disk) - from;
+ }
+
+ part = add_partition(disk, p, from, size, state->parts[p].flags,
+ &state->parts[p].info);
+ if (IS_ERR(part)) {
+ printk(KERN_ERR " %s: p%d could not be added: %ld\n",
+ disk->disk_name, p, -PTR_ERR(part));
+ return true;
}
+#ifdef CONFIG_BLK_DEV_MD
+ if (state->parts[p].flags & ADDPART_FLAG_RAID)
+ md_autodetect_dev(part_to_dev(part)->devt);
+#endif
return true;
}
-int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
+int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
{
- struct parsed_partitions *state = NULL;
- struct hd_struct *part;
- int p, highest, res;
-rescan:
- if (state && !IS_ERR(state)) {
- free_partitions(state);
- state = NULL;
- }
+ struct parsed_partitions *state;
+ int ret = -EAGAIN, p, highest;
- res = drop_partitions(disk, bdev);
- if (res)
- return res;
+ if (!disk_part_scan_enabled(disk))
+ return 0;
- if (disk->fops->revalidate_disk)
- disk->fops->revalidate_disk(disk);
- check_disk_size_change(disk, bdev, true);
- bdev->bd_invalidated = 0;
- if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
+ state = check_partition(disk, bdev);
+ if (!state)
return 0;
if (IS_ERR(state)) {
/*
- * I/O error reading the partition table. If any
- * partition code tried to read beyond EOD, retry
- * after unlocking native capacity.
+ * I/O error reading the partition table. If we tried to read
+ * beyond EOD, retry after unlocking the native capacity.
*/
if (PTR_ERR(state) == -ENOSPC) {
printk(KERN_WARNING "%s: partition table beyond EOD, ",
disk->disk_name);
if (disk_unlock_native_capacity(disk))
- goto rescan;
+ return -EAGAIN;
}
return -EIO;
}
+
/*
- * If any partition code tried to read beyond EOD, try
- * unlocking native capacity even if partition table is
- * successfully read as we could be missing some partitions.
+ * Partitions are not supported on zoned block devices.
+ */
+ if (bdev_is_zoned(bdev)) {
+ pr_warn("%s: ignoring partition table on zoned block device\n",
+ disk->disk_name);
+ ret = 0;
+ goto out_free_state;
+ }
+
+ /*
+ * If we read beyond EOD, try unlocking native capacity even if the
+ * partition table was successfully read as we could be missing some
+ * partitions.
*/
if (state->access_beyond_eod) {
printk(KERN_WARNING
"%s: partition table partially beyond EOD, ",
disk->disk_name);
if (disk_unlock_native_capacity(disk))
- goto rescan;
+ goto out_free_state;
}
/* tell userspace that the media / partition table may have changed */
kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
- /* Detect the highest partition number and preallocate
- * disk->part_tbl. This is an optimization and not strictly
- * necessary.
+ /*
+ * Detect the highest partition number and preallocate disk->part_tbl.
+ * This is an optimization and not strictly necessary.
*/
for (p = 1, highest = 0; p < state->limit; p++)
if (state->parts[p].size)
highest = p;
-
disk_expand_part_tbl(disk, highest);
- /* add partitions */
- for (p = 1; p < state->limit; p++) {
- sector_t size, from;
-
- size = state->parts[p].size;
- if (!size)
- continue;
-
- from = state->parts[p].from;
- if (from >= get_capacity(disk)) {
- printk(KERN_WARNING
- "%s: p%d start %llu is beyond EOD, ",
- disk->disk_name, p, (unsigned long long) from);
- if (disk_unlock_native_capacity(disk))
- goto rescan;
- continue;
- }
-
- if (from + size > get_capacity(disk)) {
- printk(KERN_WARNING
- "%s: p%d size %llu extends beyond EOD, ",
- disk->disk_name, p, (unsigned long long) size);
-
- if (disk_unlock_native_capacity(disk)) {
- /* free state and restart */
- goto rescan;
- } else {
- /*
- * we can not ignore partitions of broken tables
- * created by for example camera firmware, but
- * we limit them to the end of the disk to avoid
- * creating invalid block devices
- */
- size = get_capacity(disk) - from;
- }
- }
-
- /*
- * On a zoned block device, partitions should be aligned on the
- * device zone size (i.e. zone boundary crossing not allowed).
- * Otherwise, resetting the write pointer of the last zone of
- * one partition may impact the following partition.
- */
- if (bdev_is_zoned(bdev) &&
- !part_zone_aligned(disk, bdev, from, size)) {
- printk(KERN_WARNING
- "%s: p%d start %llu+%llu is not zone aligned\n",
- disk->disk_name, p, (unsigned long long) from,
- (unsigned long long) size);
- continue;
- }
+ for (p = 1; p < state->limit; p++)
+ if (!blk_add_partition(disk, bdev, state, p))
+ goto out_free_state;
- part = add_partition(disk, p, from, size,
- state->parts[p].flags,
- &state->parts[p].info);
- if (IS_ERR(part)) {
- printk(KERN_ERR " %s: p%d could not be added: %ld\n",
- disk->disk_name, p, -PTR_ERR(part));
- continue;
- }
-#ifdef CONFIG_BLK_DEV_MD
- if (state->parts[p].flags & ADDPART_FLAG_RAID)
- md_autodetect_dev(part_to_dev(part)->devt);
-#endif
- }
+ ret = 0;
+out_free_state:
free_partitions(state);
- return 0;
-}
-
-int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
-{
- int res;
-
- if (!bdev->bd_invalidated)
- return 0;
-
- res = drop_partitions(disk, bdev);
- if (res)
- return res;
-
- set_capacity(disk, 0);
- check_disk_size_change(disk, bdev, false);
- bdev->bd_invalidated = 0;
- /* tell userspace that the media / partition table may have changed */
- kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
-
- return 0;
+ return ret;
}
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
diff --git a/block/sed-opal.c b/block/sed-opal.c
index b4c761973ac1..880cc57a5f6b 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -149,6 +149,8 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
{ 0x00, 0x00, 0x08, 0x01, 0x00, 0x00, 0x00, 0x01 },
[OPAL_ENTERPRISE_LOCKING_INFO_TABLE] =
{ 0x00, 0x00, 0x08, 0x01, 0x00, 0x00, 0x00, 0x00 },
+ [OPAL_DATASTORE] =
+ { 0x00, 0x00, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00 },
/* C_PIN_TABLE object ID's */
[OPAL_C_PIN_MSID] =
@@ -1139,11 +1141,11 @@ static int generic_get_column(struct opal_dev *dev, const u8 *table,
*
* the result is provided in dev->resp->tok[4]
*/
-static int generic_get_table_info(struct opal_dev *dev, enum opal_uid table,
+static int generic_get_table_info(struct opal_dev *dev, const u8 *table_uid,
u64 column)
{
u8 uid[OPAL_UID_LENGTH];
- const unsigned int half = OPAL_UID_LENGTH/2;
+ const unsigned int half = OPAL_UID_LENGTH_HALF;
/* sed-opal UIDs can be split in two halves:
* first: actual table index
@@ -1152,7 +1154,7 @@ static int generic_get_table_info(struct opal_dev *dev, enum opal_uid table,
* first part of the target table as relative index into that table
*/
memcpy(uid, opaluid[OPAL_TABLE_TABLE], half);
- memcpy(uid+half, opaluid[table], half);
+ memcpy(uid + half, table_uid, half);
return generic_get_column(dev, uid, column);
}
@@ -1221,6 +1223,75 @@ static int get_active_key(struct opal_dev *dev, void *data)
return get_active_key_cont(dev);
}
+static int generic_table_write_data(struct opal_dev *dev, const u64 data,
+ u64 offset, u64 size, const u8 *uid)
+{
+ const u8 __user *src = (u8 __user *)(uintptr_t)data;
+ u8 *dst;
+ u64 len;
+ size_t off = 0;
+ int err;
+
+ /* do we fit in the available space? */
+ err = generic_get_table_info(dev, uid, OPAL_TABLE_ROWS);
+ if (err) {
+ pr_debug("Couldn't get the table size\n");
+ return err;
+ }
+
+ len = response_get_u64(&dev->parsed, 4);
+ if (size > len || offset > len - size) {
+ pr_debug("Does not fit in the table (%llu vs. %llu)\n",
+ offset + size, len);
+ return -ENOSPC;
+ }
+
+ /* do the actual transmission(s) */
+ while (off < size) {
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_WHERE);
+ add_token_u64(&err, dev, offset + off);
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_VALUES);
+
+ /*
+ * The bytestring header is either 1 or 2 bytes, so assume 2.
+ * There also needs to be enough space to accommodate the
+ * trailing OPAL_ENDNAME (1 byte) and tokens added by
+ * cmd_finalize.
+ */
+ len = min(remaining_size(dev) - (2+1+CMD_FINALIZE_BYTES_NEEDED),
+ (size_t)(size - off));
+ pr_debug("Write bytes %zu+%llu/%llu\n", off, len, size);
+
+ dst = add_bytestring_header(&err, dev, len);
+ if (!dst)
+ break;
+
+ if (copy_from_user(dst, src + off, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ dev->pos += len;
+
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+ if (err)
+ break;
+
+ err = finalize_and_send(dev, parse_and_check_status);
+ if (err)
+ break;
+
+ off += len;
+ }
+
+ return err;
+}
+
static int generic_lr_enable_disable(struct opal_dev *dev,
u8 *uid, bool rle, bool wle,
bool rl, bool wl)
@@ -1583,68 +1654,9 @@ static int set_mbr_enable_disable(struct opal_dev *dev, void *data)
static int write_shadow_mbr(struct opal_dev *dev, void *data)
{
struct opal_shadow_mbr *shadow = data;
- const u8 __user *src;
- u8 *dst;
- size_t off = 0;
- u64 len;
- int err = 0;
-
- /* do we fit in the available shadow mbr space? */
- err = generic_get_table_info(dev, OPAL_MBR, OPAL_TABLE_ROWS);
- if (err) {
- pr_debug("MBR: could not get shadow size\n");
- return err;
- }
-
- len = response_get_u64(&dev->parsed, 4);
- if (shadow->size > len || shadow->offset > len - shadow->size) {
- pr_debug("MBR: does not fit in shadow (%llu vs. %llu)\n",
- shadow->offset + shadow->size, len);
- return -ENOSPC;
- }
-
- /* do the actual transmission(s) */
- src = (u8 __user *)(uintptr_t)shadow->data;
- while (off < shadow->size) {
- err = cmd_start(dev, opaluid[OPAL_MBR], opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, OPAL_WHERE);
- add_token_u64(&err, dev, shadow->offset + off);
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, OPAL_VALUES);
-
- /*
- * The bytestring header is either 1 or 2 bytes, so assume 2.
- * There also needs to be enough space to accommodate the
- * trailing OPAL_ENDNAME (1 byte) and tokens added by
- * cmd_finalize.
- */
- len = min(remaining_size(dev) - (2+1+CMD_FINALIZE_BYTES_NEEDED),
- (size_t)(shadow->size - off));
- pr_debug("MBR: write bytes %zu+%llu/%llu\n",
- off, len, shadow->size);
-
- dst = add_bytestring_header(&err, dev, len);
- if (!dst)
- break;
- if (copy_from_user(dst, src + off, len))
- err = -EFAULT;
- dev->pos += len;
-
- add_token_u8(&err, dev, OPAL_ENDNAME);
- if (err)
- break;
-
- err = finalize_and_send(dev, parse_and_check_status);
- if (err)
- break;
-
- off += len;
- }
- return err;
+ return generic_table_write_data(dev, shadow->data, shadow->offset,
+ shadow->size, opaluid[OPAL_MBR]);
}
static int generic_pw_cmd(u8 *key, size_t key_len, u8 *cpin_uid,
@@ -1874,7 +1886,6 @@ static int activate_lsp(struct opal_dev *dev, void *data)
{
struct opal_lr_act *opal_act = data;
u8 user_lr[OPAL_UID_LENGTH];
- u8 uint_3 = 0x83;
int err, i;
err = cmd_start(dev, opaluid[OPAL_LOCKINGSP_UID],
@@ -1887,10 +1898,7 @@ static int activate_lsp(struct opal_dev *dev, void *data)
return err;
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, uint_3);
- add_token_u8(&err, dev, 6);
- add_token_u8(&err, dev, 0);
- add_token_u8(&err, dev, 0);
+ add_token_u64(&err, dev, OPAL_SUM_SET_LIST);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_bytestring(&err, dev, user_lr, OPAL_UID_LENGTH);
@@ -1957,6 +1965,113 @@ static int get_msid_cpin_pin(struct opal_dev *dev, void *data)
return 0;
}
+static int write_table_data(struct opal_dev *dev, void *data)
+{
+ struct opal_read_write_table *write_tbl = data;
+
+ return generic_table_write_data(dev, write_tbl->data, write_tbl->offset,
+ write_tbl->size, write_tbl->table_uid);
+}
+
+static int read_table_data_cont(struct opal_dev *dev)
+{
+ int err;
+ const char *data_read;
+
+ err = parse_and_check_status(dev);
+ if (err)
+ return err;
+
+ dev->prev_d_len = response_get_string(&dev->parsed, 1, &data_read);
+ dev->prev_data = (void *)data_read;
+ if (!dev->prev_data) {
+ pr_debug("%s: Couldn't read data from the table.\n", __func__);
+ return OPAL_INVAL_PARAM;
+ }
+
+ return 0;
+}
+
+/*
+ * IO_BUFFER_LENGTH = 2048
+ * sizeof(header) = 56
+ * No. of Token Bytes in the Response = 11
+ * MAX size of data that can be carried in response buffer
+ * at a time is : 2048 - (56 + 11) = 1981 = 0x7BD.
+ */
+#define OPAL_MAX_READ_TABLE (0x7BD)
+
+static int read_table_data(struct opal_dev *dev, void *data)
+{
+ struct opal_read_write_table *read_tbl = data;
+ int err;
+ size_t off = 0, max_read_size = OPAL_MAX_READ_TABLE;
+ u64 table_len, len;
+ u64 offset = read_tbl->offset, read_size = read_tbl->size - 1;
+ u8 __user *dst;
+
+ err = generic_get_table_info(dev, read_tbl->table_uid, OPAL_TABLE_ROWS);
+ if (err) {
+ pr_debug("Couldn't get the table size\n");
+ return err;
+ }
+
+ table_len = response_get_u64(&dev->parsed, 4);
+
+ /* Check if the user is trying to read from the table limits */
+ if (read_size > table_len || offset > table_len - read_size) {
+ pr_debug("Read size exceeds the Table size limits (%llu vs. %llu)\n",
+ offset + read_size, table_len);
+ return -EINVAL;
+ }
+
+ while (off < read_size) {
+ err = cmd_start(dev, read_tbl->table_uid, opalmethod[OPAL_GET]);
+
+ add_token_u8(&err, dev, OPAL_STARTLIST);
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_STARTROW);
+ add_token_u64(&err, dev, offset + off); /* start row value */
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_ENDROW);
+
+ len = min(max_read_size, (size_t)(read_size - off));
+ add_token_u64(&err, dev, offset + off + len); /* end row value
+ */
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+ add_token_u8(&err, dev, OPAL_ENDLIST);
+
+ if (err) {
+ pr_debug("Error building read table data command.\n");
+ break;
+ }
+
+ err = finalize_and_send(dev, read_table_data_cont);
+ if (err)
+ break;
+
+ /* len+1: This includes the NULL terminator at the end*/
+ if (dev->prev_d_len > len + 1) {
+ err = -EOVERFLOW;
+ break;
+ }
+
+ dst = (u8 __user *)(uintptr_t)read_tbl->data;
+ if (copy_to_user(dst + off, dev->prev_data, dev->prev_d_len)) {
+ pr_debug("Error copying data to userspace\n");
+ err = -EFAULT;
+ break;
+ }
+ dev->prev_data = NULL;
+
+ off += len;
+ }
+
+ return err;
+}
+
static int end_opal_session(struct opal_dev *dev, void *data)
{
int err = 0;
@@ -2443,6 +2558,68 @@ bool opal_unlock_from_suspend(struct opal_dev *dev)
}
EXPORT_SYMBOL(opal_unlock_from_suspend);
+static int opal_read_table(struct opal_dev *dev,
+ struct opal_read_write_table *rw_tbl)
+{
+ const struct opal_step read_table_steps[] = {
+ { start_admin1LSP_opal_session, &rw_tbl->key },
+ { read_table_data, rw_tbl },
+ { end_opal_session, }
+ };
+ int ret = 0;
+
+ if (!rw_tbl->size)
+ return ret;
+
+ return execute_steps(dev, read_table_steps,
+ ARRAY_SIZE(read_table_steps));
+}
+
+static int opal_write_table(struct opal_dev *dev,
+ struct opal_read_write_table *rw_tbl)
+{
+ const struct opal_step write_table_steps[] = {
+ { start_admin1LSP_opal_session, &rw_tbl->key },
+ { write_table_data, rw_tbl },
+ { end_opal_session, }
+ };
+ int ret = 0;
+
+ if (!rw_tbl->size)
+ return ret;
+
+ return execute_steps(dev, write_table_steps,
+ ARRAY_SIZE(write_table_steps));
+}
+
+static int opal_generic_read_write_table(struct opal_dev *dev,
+ struct opal_read_write_table *rw_tbl)
+{
+ int ret, bit_set;
+
+ mutex_lock(&dev->dev_lock);
+ setup_opal_dev(dev);
+
+ bit_set = fls64(rw_tbl->flags) - 1;
+ switch (bit_set) {
+ case OPAL_READ_TABLE:
+ ret = opal_read_table(dev, rw_tbl);
+ break;
+ case OPAL_WRITE_TABLE:
+ ret = opal_write_table(dev, rw_tbl);
+ break;
+ default:
+ pr_debug("Invalid bit set in the flag (%016llx).\n",
+ rw_tbl->flags);
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&dev->dev_lock);
+
+ return ret;
+}
+
int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *arg)
{
void *p;
@@ -2505,6 +2682,9 @@ int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *arg)
case IOC_OPAL_PSID_REVERT_TPR:
ret = opal_reverttper(dev, p, true);
break;
+ case IOC_OPAL_GENERIC_TABLE_RW:
+ ret = opal_generic_read_write_table(dev, p);
+ break;
default:
break;
}
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 9803c7e0376e..f4907d941f03 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -235,16 +235,12 @@ static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
}
-/**
- * Type 3 does not have a reference tag so no remapping is required.
- */
+/* Type 3 does not have a reference tag so no remapping is required. */
static void t10_pi_type3_prepare(struct request *rq)
{
}
-/**
- * Type 3 does not have a reference tag so no remapping is required.
- */
+/* Type 3 does not have a reference tag so no remapping is required. */
static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
{
}
diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c
index 76d2ce3a1b5b..d16d893bd195 100644
--- a/crypto/asymmetric_keys/asym_tpm.c
+++ b/crypto/asymmetric_keys/asym_tpm.c
@@ -13,7 +13,7 @@
#include <crypto/sha.h>
#include <asm/unaligned.h>
#include <keys/asymmetric-subtype.h>
-#include <keys/trusted.h>
+#include <keys/trusted_tpm.h>
#include <crypto/asym_tpm_subtype.h>
#include <crypto/public_key.h>
@@ -21,10 +21,6 @@
#define TPM_ORD_LOADKEY2 65
#define TPM_ORD_UNBIND 30
#define TPM_ORD_SIGN 60
-#define TPM_LOADKEY2_SIZE 59
-#define TPM_FLUSHSPECIFIC_SIZE 18
-#define TPM_UNBIND_SIZE 63
-#define TPM_SIGN_SIZE 63
#define TPM_RT_KEY 0x00000001
@@ -68,16 +64,13 @@ static int tpm_loadkey2(struct tpm_buf *tb,
return ret;
/* build the request buffer */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
- store32(tb, TPM_LOADKEY2_SIZE + keybloblen);
- store32(tb, TPM_ORD_LOADKEY2);
- store32(tb, keyhandle);
- storebytes(tb, keyblob, keybloblen);
- store32(tb, authhandle);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_LOADKEY2);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, keyblob, keybloblen);
+ tpm_buf_append_u32(tb, authhandle);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
@@ -101,12 +94,9 @@ static int tpm_loadkey2(struct tpm_buf *tb,
*/
static int tpm_flushspecific(struct tpm_buf *tb, uint32_t handle)
{
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_COMMAND);
- store32(tb, TPM_FLUSHSPECIFIC_SIZE);
- store32(tb, TPM_ORD_FLUSHSPECIFIC);
- store32(tb, handle);
- store32(tb, TPM_RT_KEY);
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_FLUSHSPECIFIC);
+ tpm_buf_append_u32(tb, handle);
+ tpm_buf_append_u32(tb, TPM_RT_KEY);
return trusted_tpm_send(tb->data, MAX_BUF_SIZE);
}
@@ -155,17 +145,14 @@ static int tpm_unbind(struct tpm_buf *tb,
return ret;
/* build the request buffer */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
- store32(tb, TPM_UNBIND_SIZE + bloblen);
- store32(tb, TPM_ORD_UNBIND);
- store32(tb, keyhandle);
- store32(tb, bloblen);
- storebytes(tb, blob, bloblen);
- store32(tb, authhandle);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_UNBIND);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append_u32(tb, bloblen);
+ tpm_buf_append(tb, blob, bloblen);
+ tpm_buf_append_u32(tb, authhandle);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
@@ -241,17 +228,14 @@ static int tpm_sign(struct tpm_buf *tb,
return ret;
/* build the request buffer */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
- store32(tb, TPM_SIGN_SIZE + bloblen);
- store32(tb, TPM_ORD_SIGN);
- store32(tb, keyhandle);
- store32(tb, bloblen);
- storebytes(tb, blob, bloblen);
- store32(tb, authhandle);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_SIGN);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append_u32(tb, bloblen);
+ tpm_buf_append(tb, blob, bloblen);
+ tpm_buf_append_u32(tb, authhandle);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
@@ -519,7 +503,7 @@ static int tpm_key_decrypt(struct tpm_key *tk,
struct kernel_pkey_params *params,
const void *in, void *out)
{
- struct tpm_buf *tb;
+ struct tpm_buf tb;
uint32_t keyhandle;
uint8_t srkauth[SHA1_DIGEST_SIZE];
uint8_t keyauth[SHA1_DIGEST_SIZE];
@@ -533,14 +517,14 @@ static int tpm_key_decrypt(struct tpm_key *tk,
if (strcmp(params->encoding, "pkcs1"))
return -ENOPKG;
- tb = kzalloc(sizeof(*tb), GFP_KERNEL);
- if (!tb)
- return -ENOMEM;
+ r = tpm_buf_init(&tb, 0, 0);
+ if (r)
+ return r;
/* TODO: Handle a non-all zero SRK authorization */
memset(srkauth, 0, sizeof(srkauth));
- r = tpm_loadkey2(tb, SRKHANDLE, srkauth,
+ r = tpm_loadkey2(&tb, SRKHANDLE, srkauth,
tk->blob, tk->blob_len, &keyhandle);
if (r < 0) {
pr_devel("loadkey2 failed (%d)\n", r);
@@ -550,16 +534,16 @@ static int tpm_key_decrypt(struct tpm_key *tk,
/* TODO: Handle a non-all zero key authorization */
memset(keyauth, 0, sizeof(keyauth));
- r = tpm_unbind(tb, keyhandle, keyauth,
+ r = tpm_unbind(&tb, keyhandle, keyauth,
in, params->in_len, out, params->out_len);
if (r < 0)
pr_devel("tpm_unbind failed (%d)\n", r);
- if (tpm_flushspecific(tb, keyhandle) < 0)
+ if (tpm_flushspecific(&tb, keyhandle) < 0)
pr_devel("flushspecific failed (%d)\n", r);
error:
- kzfree(tb);
+ tpm_buf_destroy(&tb);
pr_devel("<==%s() = %d\n", __func__, r);
return r;
}
@@ -643,7 +627,7 @@ static int tpm_key_sign(struct tpm_key *tk,
struct kernel_pkey_params *params,
const void *in, void *out)
{
- struct tpm_buf *tb;
+ struct tpm_buf tb;
uint32_t keyhandle;
uint8_t srkauth[SHA1_DIGEST_SIZE];
uint8_t keyauth[SHA1_DIGEST_SIZE];
@@ -681,15 +665,14 @@ static int tpm_key_sign(struct tpm_key *tk,
goto error_free_asn1_wrapped;
}
- r = -ENOMEM;
- tb = kzalloc(sizeof(*tb), GFP_KERNEL);
- if (!tb)
+ r = tpm_buf_init(&tb, 0, 0);
+ if (r)
goto error_free_asn1_wrapped;
/* TODO: Handle a non-all zero SRK authorization */
memset(srkauth, 0, sizeof(srkauth));
- r = tpm_loadkey2(tb, SRKHANDLE, srkauth,
+ r = tpm_loadkey2(&tb, SRKHANDLE, srkauth,
tk->blob, tk->blob_len, &keyhandle);
if (r < 0) {
pr_devel("loadkey2 failed (%d)\n", r);
@@ -699,15 +682,15 @@ static int tpm_key_sign(struct tpm_key *tk,
/* TODO: Handle a non-all zero key authorization */
memset(keyauth, 0, sizeof(keyauth));
- r = tpm_sign(tb, keyhandle, keyauth, in, in_len, out, params->out_len);
+ r = tpm_sign(&tb, keyhandle, keyauth, in, in_len, out, params->out_len);
if (r < 0)
pr_devel("tpm_sign failed (%d)\n", r);
- if (tpm_flushspecific(tb, keyhandle) < 0)
+ if (tpm_flushspecific(&tb, keyhandle) < 0)
pr_devel("flushspecific failed (%d)\n", r);
error_free_tb:
- kzfree(tb);
+ tpm_buf_destroy(&tb);
error_free_asn1_wrapped:
kfree(asn1_wrapped);
pr_devel("<==%s() = %d\n", __func__, r);
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 753985c01517..46dc54d18f0b 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -56,7 +56,7 @@ struct acard_sg {
__le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
};
-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int acard_ahci_port_start(struct ata_port *ap);
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -210,7 +210,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
return si;
}
-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
@@ -248,6 +248,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
+
+ return AC_ERR_OK;
}
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 05c2b32dcc4d..ec6c64fce74a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -56,6 +56,7 @@ enum board_ids {
board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
+ board_ahci_al,
board_ahci_avn,
board_ahci_mcp65,
board_ahci_mcp77,
@@ -167,6 +168,13 @@ static const struct ata_port_info ahci_port_info[] = {
.port_ops = &ahci_ops,
},
/* by chipsets */
+ [board_ahci_al] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_MSI),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_avn] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
@@ -415,6 +423,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
+ /* Amazon's Annapurna Labs support */
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031),
+ .class = PCI_CLASS_STORAGE_SATA_AHCI,
+ .class_mask = 0xffffff,
+ board_ahci_al },
/* AMD */
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index e3163dae5e85..cb55ebc1725b 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -483,7 +483,6 @@ static int tegra_ahci_probe(struct platform_device *pdev)
struct tegra_ahci_priv *tegra;
struct resource *res;
int ret;
- unsigned int i;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
@@ -543,8 +542,9 @@ static int tegra_ahci_probe(struct platform_device *pdev)
if (!tegra->supplies)
return -ENOMEM;
- for (i = 0; i < tegra->soc->num_supplies; i++)
- tegra->supplies[i].supply = tegra->soc->supply_names[i];
+ regulator_bulk_set_supply_names(tegra->supplies,
+ tegra->soc->supply_names,
+ tegra->soc->num_supplies);
ret = devm_regulator_bulk_get(&pdev->dev,
tegra->soc->num_supplies,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index e4da725381d3..3ca7720e7d8f 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -841,6 +841,12 @@ static int piix_broken_suspend(void)
},
},
{
+ .ident = "TECRA M3",
+ .matches = {
+ DMI_MATCH(DMI_OEM_STRING, "Tecra M3,"),
+ },
+ },
+ {
.ident = "TECRA M4",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
@@ -955,18 +961,10 @@ static int piix_broken_suspend(void)
{ } /* terminate list */
};
- static const char *oemstrs[] = {
- "Tecra M3,",
- };
- int i;
if (dmi_check_system(sysids))
return 1;
- for (i = 0; i < ARRAY_SIZE(oemstrs); i++)
- if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL))
- return 1;
-
/* TECRA M4 sometimes forgets its identify and reports bogus
* DMI information. As the bogus information is a bit
* generic, match as many entries as possible. This manual
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index bff369d9a1a7..ea5bf5f4cbed 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -57,7 +57,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
-static void ahci_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
static void ahci_freeze(struct ata_port *ap);
static void ahci_thaw(struct ata_port *ap);
@@ -1624,7 +1624,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
return sata_pmp_qc_defer_cmd_switch(qc);
}
-static void ahci_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
@@ -1660,6 +1660,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
+
+ return AC_ERR_OK;
}
static void ahci_fbs_dec_intr(struct ata_port *ap)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 28c492be0a57..e9017c570bc5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4980,7 +4980,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
return ATA_DEFER_LINK;
}
-void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
+enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
+{
+ return AC_ERR_OK;
+}
/**
* ata_sg_init - Associate command with scatter-gather table.
@@ -5443,7 +5446,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
return;
}
- ap->ops->qc_prep(qc);
+ qc->err_mask |= ap->ops->qc_prep(qc);
+ if (unlikely(qc->err_mask))
+ goto err;
trace_ata_qc_issue(qc);
qc->err_mask |= ap->ops->qc_issue(qc);
if (unlikely(qc->err_mask))
@@ -6708,6 +6713,9 @@ void ata_host_detach(struct ata_host *host)
{
int i;
+ /* Ensure ata_port probe has completed */
+ async_synchronize_full();
+
for (i = 0; i < host->n_ports; i++)
ata_port_detach(host->ports[i]);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 4ed682da52ae..038db94216a9 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2679,12 +2679,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
+enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
ata_bmdma_fill_sg(qc);
+
+ return AC_ERR_OK;
}
EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
@@ -2697,12 +2699,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
+enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
ata_bmdma_fill_sg_dumb(qc);
+
+ return AC_ERR_OK;
}
EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 3aa006c5ed0c..6bd2228bb6ff 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -100,7 +100,7 @@ static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev,
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
- const u16 timing[2][5] = {
+ static const u16 timing[2][5] = {
{ 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
{ 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
@@ -154,7 +154,7 @@ static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
- const u8 timing[2][5] = {
+ static const u8 timing[2][5] = {
{ 0x00, 0x0A, 0x08, 0x33, 0x31 },
{ 0x70, 0x7A, 0x78, 0x43, 0x41 }
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 57f2ec71cfc3..1bfd0154dad5 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -510,7 +510,7 @@ static int pata_macio_cable_detect(struct ata_port *ap)
return ATA_CBL_PATA40;
}
-static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
{
unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
struct ata_port *ap = qc->ap;
@@ -523,7 +523,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
__func__, qc, qc->flags, write, qc->dev->devno);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
table = (struct dbdma_cmd *) priv->dma_table_cpu;
@@ -568,6 +568,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
table->command = cpu_to_le16(DBDMA_STOP);
dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
+
+ return AC_ERR_OK;
}
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 4afcb8e63e21..41430f79663c 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -44,25 +44,27 @@ static void pxa_ata_dma_irq(void *d)
/*
* Prepare taskfile for submission.
*/
-static void pxa_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
{
struct pata_pxa_data *pd = qc->ap->private_data;
struct dma_async_tx_descriptor *tx;
enum dma_transfer_direction dir;
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
DMA_PREP_INTERRUPT);
if (!tx) {
ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
- return;
+ return AC_ERR_OK;
}
tx->callback = pxa_ata_dma_irq;
tx->callback_param = pd;
pd->dma_cookie = dmaengine_submit(tx);
+
+ return AC_ERR_OK;
}
/*
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index cb490531b62e..5db55e1e2a61 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -116,7 +116,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static int adma_port_start(struct ata_port *ap);
static void adma_port_stop(struct ata_port *ap);
-static void adma_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
static void adma_freeze(struct ata_port *ap);
@@ -295,7 +295,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
return i;
}
-static void adma_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
{
struct adma_port_priv *pp = qc->ap->private_data;
u8 *buf = pp->pkt;
@@ -306,7 +306,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
adma_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
- return;
+ return AC_ERR_OK;
buf[i++] = 0; /* Response flags */
buf[i++] = 0; /* reserved */
@@ -371,6 +371,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
printk("%s\n", obuf);
}
#endif
+ return AC_ERR_OK;
}
static inline void adma_packet_start(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 8e9cb198fcd1..9239615d8a04 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -502,7 +502,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
return num_prde;
}
-static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sata_fsl_port_priv *pp = ap->private_data;
@@ -548,6 +548,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
desc_info, ttl_dwords, num_prde);
+
+ return AC_ERR_OK;
}
static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 7f99e23bff88..a6b76cc12a66 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -478,7 +478,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
prd[-1].flags |= PRD_END;
}
-static void inic_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
{
struct inic_port_priv *pp = qc->ap->private_data;
struct inic_pkt *pkt = pp->pkt;
@@ -538,6 +538,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc)
inic_fill_sg(prd, qc);
pp->cpb_tbl[0] = pp->pkt_dma;
+
+ return AC_ERR_OK;
}
static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index ad385a113391..277f11909fc1 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -592,8 +592,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static int mv_qc_defer(struct ata_queued_cmd *qc);
-static void mv_qc_prep(struct ata_queued_cmd *qc);
-static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
+static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
static int mv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
@@ -2031,7 +2031,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
@@ -2043,15 +2043,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
switch (tf->protocol) {
case ATA_PROT_DMA:
if (tf->command == ATA_CMD_DSM)
- return;
+ return AC_ERR_OK;
/* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
mv_rw_multi_errata_sata24(qc);
- return;
+ return AC_ERR_OK;
default:
- return;
+ return AC_ERR_OK;
}
/* Fill in command request block
@@ -2098,12 +2098,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
* of which are defined/used by Linux. If we get here, this
* driver needs work.
- *
- * FIXME: modify libata to give qc_prep a return value and
- * return error here.
*/
- BUG_ON(tf->command);
- break;
+ ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
+ tf->command);
+ return AC_ERR_INVALID;
}
mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
@@ -2116,8 +2114,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
mv_fill_sg(qc);
+
+ return AC_ERR_OK;
}
/**
@@ -2132,7 +2132,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
+static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
@@ -2143,9 +2143,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
- return;
+ return AC_ERR_OK;
if (tf->command == ATA_CMD_DSM)
- return; /* use bmdma for this */
+ return AC_ERR_OK; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2186,8 +2186,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
mv_fill_sg(qc);
+
+ return AC_ERR_OK;
}
/**
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 56946012d113..65ec8dff1c51 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -297,7 +297,7 @@ static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
static int nv_adma_slave_config(struct scsi_device *sdev);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
-static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
static void nv_adma_irq_clear(struct ata_port *ap);
@@ -319,7 +319,7 @@ static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
static int nv_swncq_slave_config(struct scsi_device *sdev);
static int nv_swncq_port_start(struct ata_port *ap);
-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
@@ -1344,7 +1344,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
return 1;
}
-static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
@@ -1356,7 +1356,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
ata_bmdma_qc_prep(qc);
- return;
+ return AC_ERR_OK;
}
cpb->resp_flags = NV_CPB_RESP_DONE;
@@ -1388,6 +1388,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
cpb->ctl_flags = ctl_flags;
wmb();
cpb->resp_flags = 0;
+
+ return AC_ERR_OK;
}
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
@@ -1950,17 +1952,19 @@ static int nv_swncq_port_start(struct ata_port *ap)
return 0;
}
-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
{
if (qc->tf.protocol != ATA_PROT_NCQ) {
ata_bmdma_qc_prep(qc);
- return;
+ return AC_ERR_OK;
}
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
nv_swncq_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 5fd464765ddc..c451d7d1c817 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -139,7 +139,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int pdc_common_port_start(struct ata_port *ap);
static int pdc_sata_port_start(struct ata_port *ap);
-static void pdc_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
@@ -633,7 +633,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
-static void pdc_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
{
struct pdc_port_priv *pp = qc->ap->private_data;
unsigned int i;
@@ -665,6 +665,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
default:
break;
}
+
+ return AC_ERR_OK;
}
static int pdc_is_sataii_tx4(unsigned long flags)
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index c53c5a47204d..ef00ab644afb 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -100,7 +100,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host);
-static void qs_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
static void qs_freeze(struct ata_port *ap);
@@ -260,7 +260,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
return si;
}
-static void qs_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
{
struct qs_port_priv *pp = qc->ap->private_data;
u8 dflags = QS_DF_PORD, *buf = pp->pkt;
@@ -272,7 +272,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
qs_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
- return;
+ return AC_ERR_OK;
nelem = qs_fill_sg(qc);
@@ -295,6 +295,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
/* frame information structure (FIS) */
ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
+
+ return AC_ERR_OK;
}
static inline void qs_packet_start(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 3495e1733a8e..980aacdbcf3b 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -550,12 +550,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
}
-static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
sata_rcar_bmdma_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index e6fbae2f645a..75321f1ceba5 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -103,7 +103,7 @@ static void sil_dev_config(struct ata_device *dev);
static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
-static void sil_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
static void sil_bmdma_setup(struct ata_queued_cmd *qc);
static void sil_bmdma_start(struct ata_queued_cmd *qc);
static void sil_bmdma_stop(struct ata_queued_cmd *qc);
@@ -317,12 +317,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
-static void sil_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
sil_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 7bef82de53ca..560070d4f1d0 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -326,7 +326,7 @@ static void sil24_dev_config(struct ata_device *dev);
static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
static int sil24_qc_defer(struct ata_queued_cmd *qc);
-static void sil24_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
static void sil24_pmp_attach(struct ata_port *ap);
@@ -830,7 +830,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
return ata_std_qc_defer(qc);
}
-static void sil24_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sil24_port_priv *pp = ap->private_data;
@@ -874,6 +874,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
if (qc->flags & ATA_QCFLAG_DMAMAP)
sil24_fill_sg(qc, sge);
+
+ return AC_ERR_OK;
}
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 2277ba0c9c7f..2c7b30c5ea3d 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -202,7 +202,7 @@ static void pdc_error_handler(struct ata_port *ap);
static void pdc_freeze(struct ata_port *ap);
static void pdc_thaw(struct ata_port *ap);
static int pdc_port_start(struct ata_port *ap);
-static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static unsigned int pdc20621_dimm_init(struct ata_host *host);
@@ -530,7 +530,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
}
-static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
{
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
@@ -542,6 +542,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
default:
break;
}
+
+ return AC_ERR_OK;
}
static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f6f77eaa7217..739b372a5112 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -417,18 +417,20 @@ out_free_page:
return ret;
}
-static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
+static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
+ int mode)
{
/*
- * We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do not support discard if
- * encryption is enabled, because it may give an attacker
- * useful information.
+ * We use fallocate to manipulate the space mappings used by the image
+ * a.k.a. discard/zerorange. However we do not support this if
+ * encryption is enabled, because it may give an attacker useful
+ * information.
*/
struct file *file = lo->lo_backing_file;
- int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
int ret;
+ mode |= FALLOC_FL_KEEP_SIZE;
+
if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
ret = -EOPNOTSUPP;
goto out;
@@ -596,9 +598,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
switch (req_op(rq)) {
case REQ_OP_FLUSH:
return lo_req_flush(lo, rq);
- case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
- return lo_discard(lo, rq, pos);
+ /*
+ * If the caller doesn't want deallocation, call zeroout to
+ * write zeroes the range. Otherwise, punch them out.
+ */
+ return lo_fallocate(lo, rq, pos,
+ (rq->cmd_flags & REQ_NOUNMAP) ?
+ FALLOC_FL_ZERO_RANGE :
+ FALLOC_FL_PUNCH_HOLE);
+ case REQ_OP_DISCARD:
+ return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
if (lo->transfer)
return lo_write_transfer(lo, rq, pos);
@@ -630,7 +640,9 @@ static void loop_reread_partitions(struct loop_device *lo,
{
int rc;
- rc = blkdev_reread_part(bdev);
+ mutex_lock(&bdev->bd_mutex);
+ rc = bdev_disk_changed(bdev, false);
+ mutex_unlock(&bdev->bd_mutex);
if (rc)
pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
__func__, lo->lo_number, lo->lo_file_name, rc);
@@ -1154,10 +1166,11 @@ out_unlock:
* must be at least one and it can only become zero when the
* current holder is released.
*/
- if (release)
- err = __blkdev_reread_part(bdev);
- else
- err = blkdev_reread_part(bdev);
+ if (!release)
+ mutex_lock(&bdev->bd_mutex);
+ err = bdev_disk_changed(bdev, false);
+ if (!release)
+ mutex_unlock(&bdev->bd_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo_number, err);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 964f78cfffa0..f6bafa9a68b9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -129,7 +129,7 @@ struct mtip_compat_ide_task_request_s {
/*
* This function check_for_surprise_removal is called
* while card is removed from the system and it will
- * read the vendor id from the configration space
+ * read the vendor id from the configuration space
*
* @pdev Pointer to the pci_dev structure.
*
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a94ee45440b3..57532465fb83 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -993,6 +993,7 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
+ sockfd_put(sock);
return NULL;
}
@@ -1031,14 +1032,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
sockfd_put(sock);
return -ENOMEM;
}
+
+ config->socks = socks;
+
nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
if (!nsock) {
sockfd_put(sock);
return -ENOMEM;
}
- config->socks = socks;
-
nsock->fallback_index = -1;
nsock->dead = false;
mutex_init(&nsock->tx_lock);
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index a235c45e22a7..bc837862b767 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -91,11 +91,13 @@ struct nullb {
#ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev);
-int null_zone_report(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones);
+int null_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
sector_t nr_sectors);
+size_t null_zone_valid_read_len(struct nullb *nullb,
+ sector_t sector, unsigned int len);
#else
static inline int null_zone_init(struct nullb_device *dev)
{
@@ -103,17 +105,18 @@ static inline int null_zone_init(struct nullb_device *dev)
return -EINVAL;
}
static inline void null_zone_exit(struct nullb_device *dev) {}
-static inline int null_zone_report(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones,
- unsigned int *nr_zones)
-{
- return -EOPNOTSUPP;
-}
static inline blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
sector_t nr_sectors)
{
return BLK_STS_NOTSUPP;
}
+static inline size_t null_zone_valid_read_len(struct nullb *nullb,
+ sector_t sector,
+ unsigned int len)
+{
+ return len;
+}
+#define null_report_zones NULL
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* __NULL_BLK_H */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 0e7da5015ccd..795fda576824 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -227,7 +227,7 @@ static ssize_t nullb_device_uint_attr_store(unsigned int *val,
int result;
result = kstrtouint(page, 0, &tmp);
- if (result)
+ if (result < 0)
return result;
*val = tmp;
@@ -241,7 +241,7 @@ static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
unsigned long tmp;
result = kstrtoul(page, 0, &tmp);
- if (result)
+ if (result < 0)
return result;
*val = tmp;
@@ -255,7 +255,7 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
int result;
result = kstrtobool(page, &tmp);
- if (result)
+ if (result < 0)
return result;
*val = tmp;
@@ -263,7 +263,7 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
}
/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
-#define NULLB_DEVICE_ATTR(NAME, TYPE) \
+#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
static ssize_t \
nullb_device_##NAME##_show(struct config_item *item, char *page) \
{ \
@@ -274,31 +274,57 @@ static ssize_t \
nullb_device_##NAME##_store(struct config_item *item, const char *page, \
size_t count) \
{ \
- if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
- return -EBUSY; \
- return nullb_device_##TYPE##_attr_store( \
- &to_nullb_device(item)->NAME, page, count); \
+ int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY; \
+ struct nullb_device *dev = to_nullb_device(item); \
+ TYPE new_value; \
+ int ret; \
+ \
+ ret = nullb_device_##TYPE##_attr_store(&new_value, page, count); \
+ if (ret < 0) \
+ return ret; \
+ if (apply_fn) \
+ ret = apply_fn(dev, new_value); \
+ else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
+ ret = -EBUSY; \
+ if (ret < 0) \
+ return ret; \
+ dev->NAME = new_value; \
+ return count; \
} \
CONFIGFS_ATTR(nullb_device_, NAME);
-NULLB_DEVICE_ATTR(size, ulong);
-NULLB_DEVICE_ATTR(completion_nsec, ulong);
-NULLB_DEVICE_ATTR(submit_queues, uint);
-NULLB_DEVICE_ATTR(home_node, uint);
-NULLB_DEVICE_ATTR(queue_mode, uint);
-NULLB_DEVICE_ATTR(blocksize, uint);
-NULLB_DEVICE_ATTR(irqmode, uint);
-NULLB_DEVICE_ATTR(hw_queue_depth, uint);
-NULLB_DEVICE_ATTR(index, uint);
-NULLB_DEVICE_ATTR(blocking, bool);
-NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
-NULLB_DEVICE_ATTR(memory_backed, bool);
-NULLB_DEVICE_ATTR(discard, bool);
-NULLB_DEVICE_ATTR(mbps, uint);
-NULLB_DEVICE_ATTR(cache_size, ulong);
-NULLB_DEVICE_ATTR(zoned, bool);
-NULLB_DEVICE_ATTR(zone_size, ulong);
-NULLB_DEVICE_ATTR(zone_nr_conv, uint);
+static int nullb_apply_submit_queues(struct nullb_device *dev,
+ unsigned int submit_queues)
+{
+ struct nullb *nullb = dev->nullb;
+ struct blk_mq_tag_set *set;
+
+ if (!nullb)
+ return 0;
+
+ set = nullb->tag_set;
+ blk_mq_update_nr_hw_queues(set, submit_queues);
+ return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
+}
+
+NULLB_DEVICE_ATTR(size, ulong, NULL);
+NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
+NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
+NULLB_DEVICE_ATTR(home_node, uint, NULL);
+NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
+NULLB_DEVICE_ATTR(blocksize, uint, NULL);
+NULLB_DEVICE_ATTR(irqmode, uint, NULL);
+NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
+NULLB_DEVICE_ATTR(index, uint, NULL);
+NULLB_DEVICE_ATTR(blocking, bool, NULL);
+NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
+NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
+NULLB_DEVICE_ATTR(discard, bool, NULL);
+NULLB_DEVICE_ATTR(mbps, uint, NULL);
+NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
+NULLB_DEVICE_ATTR(zoned, bool, NULL);
+NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
+NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -467,7 +493,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
+ return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_nr_conv\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -996,6 +1022,16 @@ next:
return 0;
}
+static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
+ unsigned int len, unsigned int off)
+{
+ void *dst;
+
+ dst = kmap_atomic(page);
+ memset(dst + off, 0xFF, len);
+ kunmap_atomic(dst);
+}
+
static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
{
size_t temp;
@@ -1036,10 +1072,24 @@ static int null_transfer(struct nullb *nullb, struct page *page,
unsigned int len, unsigned int off, bool is_write, sector_t sector,
bool is_fua)
{
+ struct nullb_device *dev = nullb->dev;
+ unsigned int valid_len = len;
int err = 0;
if (!is_write) {
- err = copy_from_nullb(nullb, page, off, sector, len);
+ if (dev->zoned)
+ valid_len = null_zone_valid_read_len(nullb,
+ sector, len);
+
+ if (valid_len) {
+ err = copy_from_nullb(nullb, page, off,
+ sector, valid_len);
+ off += valid_len;
+ len -= valid_len;
+ }
+
+ if (len)
+ nullb_fill_pattern(nullb, page, len, off);
flush_dcache_page(page);
} else {
flush_dcache_page(page);
@@ -1418,20 +1468,9 @@ static void null_config_discard(struct nullb *nullb)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
}
-static int null_open(struct block_device *bdev, fmode_t mode)
-{
- return 0;
-}
-
-static void null_release(struct gendisk *disk, fmode_t mode)
-{
-}
-
-static const struct block_device_operations null_fops = {
- .owner = THIS_MODULE,
- .open = null_open,
- .release = null_release,
- .report_zones = null_zone_report,
+static const struct block_device_operations null_ops = {
+ .owner = THIS_MODULE,
+ .report_zones = null_report_zones,
};
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
@@ -1532,7 +1571,7 @@ static int null_gendisk_register(struct nullb *nullb)
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
disk->first_minor = nullb->index;
- disk->fops = &null_fops;
+ disk->fops = &null_ops;
disk->private_data = nullb;
disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 3d7fdea872f8..d4d88b581822 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -66,22 +66,53 @@ void null_zone_exit(struct nullb_device *dev)
kvfree(dev->zones);
}
-int null_zone_report(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+int null_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
- unsigned int zno, nrz = 0;
-
- zno = null_zone_no(dev, sector);
- if (zno < dev->nr_zones) {
- nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
- memcpy(zones, &dev->zones[zno], nrz * sizeof(struct blk_zone));
+ unsigned int first_zone, i;
+ struct blk_zone zone;
+ int error;
+
+ first_zone = null_zone_no(dev, sector);
+ if (first_zone >= dev->nr_zones)
+ return 0;
+
+ nr_zones = min(nr_zones, dev->nr_zones - first_zone);
+ for (i = 0; i < nr_zones; i++) {
+ /*
+ * Stacked DM target drivers will remap the zone information by
+ * modifying the zone information passed to the report callback.
+ * So use a local copy to avoid corruption of the device zone
+ * array.
+ */
+ memcpy(&zone, &dev->zones[first_zone + i],
+ sizeof(struct blk_zone));
+ error = cb(&zone, i, data);
+ if (error)
+ return error;
}
- *nr_zones = nrz;
+ return nr_zones;
+}
- return 0;
+size_t null_zone_valid_read_len(struct nullb *nullb,
+ sector_t sector, unsigned int len)
+{
+ struct nullb_device *dev = nullb->dev;
+ struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
+ unsigned int nr_sectors = len >> SECTOR_SHIFT;
+
+ /* Read must be below the write pointer position */
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
+ sector + nr_sectors <= zone->wp)
+ return len;
+
+ if (sector > zone->wp)
+ return 0;
+
+ return (zone->wp - sector) << SECTOR_SHIFT;
}
static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
@@ -118,14 +149,14 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
return BLK_STS_OK;
}
-static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
+static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
+ sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
- unsigned int zno = null_zone_no(dev, sector);
- struct blk_zone *zone = &dev->zones[zno];
+ struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
size_t i;
- switch (req_op(cmd->rq)) {
+ switch (op) {
case REQ_OP_ZONE_RESET_ALL:
for (i = 0; i < dev->nr_zones; i++) {
if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
@@ -141,6 +172,29 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
break;
+ case REQ_OP_ZONE_OPEN:
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ return BLK_STS_IOERR;
+
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ return BLK_STS_IOERR;
+
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ break;
+ case REQ_OP_ZONE_FINISH:
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zone->len;
+ break;
default:
return BLK_STS_NOTSUPP;
}
@@ -155,7 +209,10 @@ blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op,
return null_zone_write(cmd, sector, nr_sectors);
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
- return null_zone_reset(cmd, sector);
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return null_zone_mgmt(cmd, op, sector);
default:
return BLK_STS_OK;
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 9c37047f4b56..aacdeed93320 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -67,6 +67,13 @@ config TCG_TIS_SPI
within Linux. To compile this driver as a module, choose M here;
the module will be called tpm_tis_spi.
+config TCG_TIS_SPI_CR50
+ bool "Cr50 SPI Interface"
+ depends on TCG_TIS_SPI
+ help
+ If you have a H1 secure module running Cr50 firmware on SPI bus,
+ say Yes and it will be accessible from within Linux.
+
config TCG_TIS_I2C_ATMEL
tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
depends on I2C
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index c354cdff9c62..5a0d99d4fec0 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -21,7 +21,9 @@ tpm-$(CONFIG_EFI) += eventlog/efi.o
tpm-$(CONFIG_OF) += eventlog/of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
-obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
+obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi_mod.o
+tpm_tis_spi_mod-y := tpm_tis_spi.o
+tpm_tis_spi_mod-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index d7a3888ad80f..a438b1206fcb 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/suspend.h>
#include <linux/freezer.h>
#include <linux/tpm_eventlog.h>
@@ -394,7 +395,11 @@ int tpm_pm_suspend(struct device *dev)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
- return 0;
+ goto suspended;
+
+ if ((chip->flags & TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED) &&
+ !pm_suspend_via_firmware())
+ goto suspended;
if (!tpm_chip_start(chip)) {
if (chip->flags & TPM_CHIP_FLAG_TPM2)
@@ -405,6 +410,7 @@ int tpm_pm_suspend(struct device *dev)
tpm_chip_stop(chip);
}
+suspended:
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
@@ -453,62 +459,6 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
}
EXPORT_SYMBOL_GPL(tpm_get_random);
-/**
- * tpm_seal_trusted() - seal a trusted key payload
- * @chip: a &struct tpm_chip instance, %NULL for the default chip
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
- *
- * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
- * the keyring subsystem.
- *
- * Return: same as with tpm_transmit_cmd()
- */
-int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- int rc;
-
- chip = tpm_find_get_ops(chip);
- if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
- return -ENODEV;
-
- rc = tpm2_seal_trusted(chip, payload, options);
-
- tpm_put_ops(chip);
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_seal_trusted);
-
-/**
- * tpm_unseal_trusted() - unseal a trusted key
- * @chip: a &struct tpm_chip instance, %NULL for the default chip
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
- *
- * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
- * the keyring subsystem.
- *
- * Return: same as with tpm_transmit_cmd()
- */
-int tpm_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- int rc;
-
- chip = tpm_find_get_ops(chip);
- if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
- return -ENODEV;
-
- rc = tpm2_unseal_trusted(chip, payload, options);
-
- tpm_put_ops(chip);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_unseal_trusted);
-
static int __init tpm_init(void)
{
int rc;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index edfa89160010..3b53b3e5ec3e 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -217,6 +217,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ struct tpm1_version *version;
ssize_t rc = 0;
char *str = buf;
cap_t cap;
@@ -232,31 +233,31 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
- /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
- rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
+ /* TPM 1.2 */
+ if (!tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version",
- sizeof(cap.tpm_version_1_2));
- if (!rc) {
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version_1_2.Major,
- cap.tpm_version_1_2.Minor,
- cap.tpm_version_1_2.revMajor,
- cap.tpm_version_1_2.revMinor);
- } else {
- /* Otherwise just use TPM_STRUCT_VER */
- if (tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
- "attempting to determine the 1.1 version",
- sizeof(cap.tpm_version)))
- goto out_ops;
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version.Major,
- cap.tpm_version.Minor,
- cap.tpm_version.revMajor,
- cap.tpm_version.revMinor);
+ sizeof(cap.version2))) {
+ version = &cap.version2.version;
+ goto out_print;
}
+
+ /* TPM 1.1 */
+ if (tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version",
+ sizeof(cap.version1))) {
+ goto out_ops;
+ }
+
+ version = &cap.version1;
+
+out_print:
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ version->major, version->minor,
+ version->rev_major, version->rev_minor);
+
rc = str - buf;
+
out_ops:
tpm_put_ops(chip);
return rc;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index a7fea3e0ca86..b9e1547be6b5 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -25,7 +25,6 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/tpm.h>
-#include <linux/highmem.h>
#include <linux/tpm_eventlog.h>
#ifdef CONFIG_X86
@@ -58,123 +57,6 @@ enum tpm_addr {
#define TPM_ERR_DISABLED 0x7
#define TPM_ERR_INVALID_POSTINIT 38
-#define TPM_HEADER_SIZE 10
-
-enum tpm2_const {
- TPM2_PLATFORM_PCR = 24,
- TPM2_PCR_SELECT_MIN = ((TPM2_PLATFORM_PCR + 7) / 8),
-};
-
-enum tpm2_timeouts {
- TPM2_TIMEOUT_A = 750,
- TPM2_TIMEOUT_B = 2000,
- TPM2_TIMEOUT_C = 200,
- TPM2_TIMEOUT_D = 30,
- TPM2_DURATION_SHORT = 20,
- TPM2_DURATION_MEDIUM = 750,
- TPM2_DURATION_LONG = 2000,
- TPM2_DURATION_LONG_LONG = 300000,
- TPM2_DURATION_DEFAULT = 120000,
-};
-
-enum tpm2_structures {
- TPM2_ST_NO_SESSIONS = 0x8001,
- TPM2_ST_SESSIONS = 0x8002,
-};
-
-/* Indicates from what layer of the software stack the error comes from */
-#define TSS2_RC_LAYER_SHIFT 16
-#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
-
-enum tpm2_return_codes {
- TPM2_RC_SUCCESS = 0x0000,
- TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
- TPM2_RC_HANDLE = 0x008B,
- TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
- TPM2_RC_FAILURE = 0x0101,
- TPM2_RC_DISABLED = 0x0120,
- TPM2_RC_COMMAND_CODE = 0x0143,
- TPM2_RC_TESTING = 0x090A, /* RC_WARN */
- TPM2_RC_REFERENCE_H0 = 0x0910,
- TPM2_RC_RETRY = 0x0922,
-};
-
-enum tpm2_command_codes {
- TPM2_CC_FIRST = 0x011F,
- TPM2_CC_HIERARCHY_CONTROL = 0x0121,
- TPM2_CC_HIERARCHY_CHANGE_AUTH = 0x0129,
- TPM2_CC_CREATE_PRIMARY = 0x0131,
- TPM2_CC_SEQUENCE_COMPLETE = 0x013E,
- TPM2_CC_SELF_TEST = 0x0143,
- TPM2_CC_STARTUP = 0x0144,
- TPM2_CC_SHUTDOWN = 0x0145,
- TPM2_CC_NV_READ = 0x014E,
- TPM2_CC_CREATE = 0x0153,
- TPM2_CC_LOAD = 0x0157,
- TPM2_CC_SEQUENCE_UPDATE = 0x015C,
- TPM2_CC_UNSEAL = 0x015E,
- TPM2_CC_CONTEXT_LOAD = 0x0161,
- TPM2_CC_CONTEXT_SAVE = 0x0162,
- TPM2_CC_FLUSH_CONTEXT = 0x0165,
- TPM2_CC_VERIFY_SIGNATURE = 0x0177,
- TPM2_CC_GET_CAPABILITY = 0x017A,
- TPM2_CC_GET_RANDOM = 0x017B,
- TPM2_CC_PCR_READ = 0x017E,
- TPM2_CC_PCR_EXTEND = 0x0182,
- TPM2_CC_EVENT_SEQUENCE_COMPLETE = 0x0185,
- TPM2_CC_HASH_SEQUENCE_START = 0x0186,
- TPM2_CC_CREATE_LOADED = 0x0191,
- TPM2_CC_LAST = 0x0193, /* Spec 1.36 */
-};
-
-enum tpm2_permanent_handles {
- TPM2_RS_PW = 0x40000009,
-};
-
-enum tpm2_capabilities {
- TPM2_CAP_HANDLES = 1,
- TPM2_CAP_COMMANDS = 2,
- TPM2_CAP_PCRS = 5,
- TPM2_CAP_TPM_PROPERTIES = 6,
-};
-
-enum tpm2_properties {
- TPM_PT_TOTAL_COMMANDS = 0x0129,
-};
-
-enum tpm2_startup_types {
- TPM2_SU_CLEAR = 0x0000,
- TPM2_SU_STATE = 0x0001,
-};
-
-enum tpm2_cc_attrs {
- TPM2_CC_ATTR_CHANDLES = 25,
- TPM2_CC_ATTR_RHANDLE = 28,
-};
-
-#define TPM_VID_INTEL 0x8086
-#define TPM_VID_WINBOND 0x1050
-#define TPM_VID_STM 0x104A
-
-enum tpm_chip_flags {
- TPM_CHIP_FLAG_TPM2 = BIT(1),
- TPM_CHIP_FLAG_IRQ = BIT(2),
- TPM_CHIP_FLAG_VIRTUAL = BIT(3),
- TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
- TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
-};
-
-#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
-
-struct tpm_header {
- __be16 tag;
- __be32 length;
- union {
- __be32 ordinal;
- __be32 return_code;
- };
-} __packed;
-
#define TPM_TAG_RQU_COMMAND 193
struct stclear_flags_t {
@@ -186,19 +68,16 @@ struct stclear_flags_t {
u8 bGlobalLock;
} __packed;
-struct tpm_version_t {
- u8 Major;
- u8 Minor;
- u8 revMajor;
- u8 revMinor;
+struct tpm1_version {
+ u8 major;
+ u8 minor;
+ u8 rev_major;
+ u8 rev_minor;
} __packed;
-struct tpm_version_1_2_t {
- __be16 tag;
- u8 Major;
- u8 Minor;
- u8 revMajor;
- u8 revMinor;
+struct tpm1_version2 {
+ __be16 tag;
+ struct tpm1_version version;
} __packed;
struct timeout_t {
@@ -243,8 +122,8 @@ typedef union {
struct stclear_flags_t stclear_flags;
__u8 owned;
__be32 num_pcrs;
- struct tpm_version_t tpm_version;
- struct tpm_version_1_2_t tpm_version_1_2;
+ struct tpm1_version version1;
+ struct tpm1_version2 version2;
__be32 manufacturer_id;
struct timeout_t timeout;
struct duration_t duration;
@@ -274,102 +153,6 @@ enum tpm_sub_capabilities {
* compiler warnings about stack frame size. */
#define TPM_MAX_RNG_DATA 128
-/* A string buffer type for constructing TPM commands. This is based on the
- * ideas of string buffer code in security/keys/trusted.h but is heap based
- * in order to keep the stack usage minimal.
- */
-
-enum tpm_buf_flags {
- TPM_BUF_OVERFLOW = BIT(0),
-};
-
-struct tpm_buf {
- struct page *data_page;
- unsigned int flags;
- u8 *data;
-};
-
-static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- head->tag = cpu_to_be16(tag);
- head->length = cpu_to_be32(sizeof(*head));
- head->ordinal = cpu_to_be32(ordinal);
-}
-
-static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
-{
- buf->data_page = alloc_page(GFP_HIGHUSER);
- if (!buf->data_page)
- return -ENOMEM;
-
- buf->flags = 0;
- buf->data = kmap(buf->data_page);
- tpm_buf_reset(buf, tag, ordinal);
- return 0;
-}
-
-static inline void tpm_buf_destroy(struct tpm_buf *buf)
-{
- kunmap(buf->data_page);
- __free_page(buf->data_page);
-}
-
-static inline u32 tpm_buf_length(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be32_to_cpu(head->length);
-}
-
-static inline u16 tpm_buf_tag(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be16_to_cpu(head->tag);
-}
-
-static inline void tpm_buf_append(struct tpm_buf *buf,
- const unsigned char *new_data,
- unsigned int new_len)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
- u32 len = tpm_buf_length(buf);
-
- /* Return silently if overflow has already happened. */
- if (buf->flags & TPM_BUF_OVERFLOW)
- return;
-
- if ((len + new_len) > PAGE_SIZE) {
- WARN(1, "tpm_buf: overflow\n");
- buf->flags |= TPM_BUF_OVERFLOW;
- return;
- }
-
- memcpy(&buf->data[len], new_data, new_len);
- head->length = cpu_to_be32(len + new_len);
-}
-
-static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
-{
- tpm_buf_append(buf, &value, 1);
-}
-
-static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
-{
- __be16 value2 = cpu_to_be16(value);
-
- tpm_buf_append(buf, (u8 *) &value2, 2);
-}
-
-static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
-{
- __be32 value2 = cpu_to_be32(value);
-
- tpm_buf_append(buf, (u8 *) &value2, 4);
-}
-
extern struct class *tpm_class;
extern struct class *tpmrm_class;
extern dev_t tpm_devt;
@@ -429,11 +212,6 @@ static inline void tpm_add_ppi(struct tpm_chip *chip)
}
#endif
-static inline u32 tpm2_rc_value(u32 rc)
-{
- return (rc & BIT(7)) ? rc & 0xff : rc;
-}
-
int tpm2_get_timeouts(struct tpm_chip *chip);
int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digest, u16 *digest_size_ptr);
@@ -441,12 +219,6 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digests);
int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
-int tpm2_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
-int tpm2_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
u32 *value, const char *desc);
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index 149e953ca369..ca7158fa6e6c 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -343,6 +343,7 @@ int tpm1_get_timeouts(struct tpm_chip *chip)
{
cap_t cap;
unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
+ unsigned long durations[3];
ssize_t rc;
rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, NULL,
@@ -427,6 +428,20 @@ int tpm1_get_timeouts(struct tpm_chip *chip)
usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long));
chip->duration[TPM_LONG_LONG] = 0; /* not used under 1.2 */
+ /*
+ * Provide the ability for vendor overrides of duration values in case
+ * of misreporting.
+ */
+ if (chip->ops->update_durations)
+ chip->ops->update_durations(chip, durations);
+
+ if (chip->duration_adjusted) {
+ dev_info(&chip->dev, HW_ERR "Adjusting reported durations.");
+ chip->duration[TPM_SHORT] = durations[0];
+ chip->duration[TPM_MEDIUM] = durations[1];
+ chip->duration[TPM_LONG] = durations[2];
+ }
+
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
* fix up the resulting too-small TPM_SHORT value to make things work.
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index ba9acae83bff..fdb457704aa7 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -13,20 +13,6 @@
#include "tpm.h"
#include <crypto/hash_info.h>
-#include <keys/trusted-type.h>
-
-enum tpm2_object_attributes {
- TPM2_OA_USER_WITH_AUTH = BIT(6),
-};
-
-enum tpm2_session_attributes {
- TPM2_SA_CONTINUE_SESSION = BIT(0),
-};
-
-struct tpm2_hash {
- unsigned int crypto_id;
- unsigned int tpm_id;
-};
static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SHA1, TPM_ALG_SHA1},
@@ -377,299 +363,6 @@ void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
tpm_buf_destroy(&buf);
}
-/**
- * tpm_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
- *
- * @buf: an allocated tpm_buf instance
- * @session_handle: session handle
- * @nonce: the session nonce, may be NULL if not used
- * @nonce_len: the session nonce length, may be 0 if not used
- * @attributes: the session attributes
- * @hmac: the session HMAC or password, may be NULL if not used
- * @hmac_len: the session HMAC or password length, maybe 0 if not used
- */
-static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
- const u8 *nonce, u16 nonce_len,
- u8 attributes,
- const u8 *hmac, u16 hmac_len)
-{
- tpm_buf_append_u32(buf, 9 + nonce_len + hmac_len);
- tpm_buf_append_u32(buf, session_handle);
- tpm_buf_append_u16(buf, nonce_len);
-
- if (nonce && nonce_len)
- tpm_buf_append(buf, nonce, nonce_len);
-
- tpm_buf_append_u8(buf, attributes);
- tpm_buf_append_u16(buf, hmac_len);
-
- if (hmac && hmac_len)
- tpm_buf_append(buf, hmac, hmac_len);
-}
-
-/**
- * tpm2_seal_trusted() - seal the payload of a trusted key
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- *
- * Return: < 0 on error and 0 on success.
- */
-int tpm2_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- unsigned int blob_len;
- struct tpm_buf buf;
- u32 hash;
- int i;
- int rc;
-
- for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
- if (options->hash == tpm2_hash_map[i].crypto_id) {
- hash = tpm2_hash_map[i].tpm_id;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(tpm2_hash_map))
- return -EINVAL;
-
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
- if (rc)
- return rc;
-
- tpm_buf_append_u32(&buf, options->keyhandle);
- tpm2_buf_append_auth(&buf, TPM2_RS_PW,
- NULL /* nonce */, 0,
- 0 /* session_attributes */,
- options->keyauth /* hmac */,
- TPM_DIGEST_SIZE);
-
- /* sensitive */
- tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
-
- tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
- tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
- tpm_buf_append_u16(&buf, payload->key_len + 1);
- tpm_buf_append(&buf, payload->key, payload->key_len);
- tpm_buf_append_u8(&buf, payload->migratable);
-
- /* public */
- tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
- tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH);
- tpm_buf_append_u16(&buf, hash);
-
- /* policy */
- if (options->policydigest_len) {
- tpm_buf_append_u32(&buf, 0);
- tpm_buf_append_u16(&buf, options->policydigest_len);
- tpm_buf_append(&buf, options->policydigest,
- options->policydigest_len);
- } else {
- tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
- tpm_buf_append_u16(&buf, 0);
- }
-
- /* public parameters */
- tpm_buf_append_u16(&buf, TPM_ALG_NULL);
- tpm_buf_append_u16(&buf, 0);
-
- /* outside info */
- tpm_buf_append_u16(&buf, 0);
-
- /* creation PCR */
- tpm_buf_append_u32(&buf, 0);
-
- if (buf.flags & TPM_BUF_OVERFLOW) {
- rc = -E2BIG;
- goto out;
- }
-
- rc = tpm_transmit_cmd(chip, &buf, 4, "sealing data");
- if (rc)
- goto out;
-
- blob_len = be32_to_cpup((__be32 *) &buf.data[TPM_HEADER_SIZE]);
- if (blob_len > MAX_BLOB_SIZE) {
- rc = -E2BIG;
- goto out;
- }
- if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
- rc = -EFAULT;
- goto out;
- }
-
- memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
- payload->blob_len = blob_len;
-
-out:
- tpm_buf_destroy(&buf);
-
- if (rc > 0) {
- if (tpm2_rc_value(rc) == TPM2_RC_HASH)
- rc = -EINVAL;
- else
- rc = -EPERM;
- }
-
- return rc;
-}
-
-/**
- * tpm2_load_cmd() - execute a TPM2_Load command
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- * @blob_handle: returned blob handle
- *
- * Return: 0 on success.
- * -E2BIG on wrong payload size.
- * -EPERM on tpm error status.
- * < 0 error from tpm_transmit_cmd.
- */
-static int tpm2_load_cmd(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options,
- u32 *blob_handle)
-{
- struct tpm_buf buf;
- unsigned int private_len;
- unsigned int public_len;
- unsigned int blob_len;
- int rc;
-
- private_len = be16_to_cpup((__be16 *) &payload->blob[0]);
- if (private_len > (payload->blob_len - 2))
- return -E2BIG;
-
- public_len = be16_to_cpup((__be16 *) &payload->blob[2 + private_len]);
- blob_len = private_len + public_len + 4;
- if (blob_len > payload->blob_len)
- return -E2BIG;
-
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_LOAD);
- if (rc)
- return rc;
-
- tpm_buf_append_u32(&buf, options->keyhandle);
- tpm2_buf_append_auth(&buf, TPM2_RS_PW,
- NULL /* nonce */, 0,
- 0 /* session_attributes */,
- options->keyauth /* hmac */,
- TPM_DIGEST_SIZE);
-
- tpm_buf_append(&buf, payload->blob, blob_len);
-
- if (buf.flags & TPM_BUF_OVERFLOW) {
- rc = -E2BIG;
- goto out;
- }
-
- rc = tpm_transmit_cmd(chip, &buf, 4, "loading blob");
- if (!rc)
- *blob_handle = be32_to_cpup(
- (__be32 *) &buf.data[TPM_HEADER_SIZE]);
-
-out:
- tpm_buf_destroy(&buf);
-
- if (rc > 0)
- rc = -EPERM;
-
- return rc;
-}
-
-/**
- * tpm2_unseal_cmd() - execute a TPM2_Unload command
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- * @blob_handle: blob handle
- *
- * Return: 0 on success
- * -EPERM on tpm error status
- * < 0 error from tpm_transmit_cmd
- */
-static int tpm2_unseal_cmd(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options,
- u32 blob_handle)
-{
- struct tpm_buf buf;
- u16 data_len;
- u8 *data;
- int rc;
-
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
- if (rc)
- return rc;
-
- tpm_buf_append_u32(&buf, blob_handle);
- tpm2_buf_append_auth(&buf,
- options->policyhandle ?
- options->policyhandle : TPM2_RS_PW,
- NULL /* nonce */, 0,
- TPM2_SA_CONTINUE_SESSION,
- options->blobauth /* hmac */,
- TPM_DIGEST_SIZE);
-
- rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
- if (rc > 0)
- rc = -EPERM;
-
- if (!rc) {
- data_len = be16_to_cpup(
- (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
- if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
- rc = -EFAULT;
- goto out;
- }
-
- if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
- rc = -EFAULT;
- goto out;
- }
- data = &buf.data[TPM_HEADER_SIZE + 6];
-
- memcpy(payload->key, data, data_len - 1);
- payload->key_len = data_len - 1;
- payload->migratable = data[data_len - 1];
- }
-
-out:
- tpm_buf_destroy(&buf);
- return rc;
-}
-
-/**
- * tpm2_unseal_trusted() - unseal the payload of a trusted key
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- *
- * Return: Same as with tpm_transmit_cmd.
- */
-int tpm2_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- u32 blob_handle;
- int rc;
-
- rc = tpm2_load_cmd(chip, payload, options, &blob_handle);
- if (rc)
- return rc;
-
- rc = tpm2_unseal_cmd(chip, payload, options, blob_handle);
- tpm2_flush_context(chip, blob_handle);
- return rc;
-}
-
struct tpm2_get_cap_out {
u8 more_data;
__be32 subcap_id;
@@ -939,6 +632,10 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
chip->cc_attrs_tbl = devm_kcalloc(&chip->dev, 4, nr_commands,
GFP_KERNEL);
+ if (!chip->cc_attrs_tbl) {
+ rc = -ENOMEM;
+ goto out;
+ }
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
if (rc)
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index e59f1f91d7f3..a9dcf31eadd2 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -22,6 +22,7 @@
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
+#define TPM_CRB_MAX_RESOURCES 3
static const guid_t crb_acpi_start_guid =
GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
@@ -91,7 +92,6 @@ enum crb_status {
struct crb_priv {
u32 sm;
const char *hid;
- void __iomem *iobase;
struct crb_regs_head __iomem *regs_h;
struct crb_regs_tail __iomem *regs_t;
u8 __iomem *cmd;
@@ -434,21 +434,27 @@ static const struct tpm_class_ops tpm_crb = {
static int crb_check_resource(struct acpi_resource *ares, void *data)
{
- struct resource *io_res = data;
+ struct resource *iores_array = data;
struct resource_win win;
struct resource *res = &(win.res);
+ int i;
if (acpi_dev_resource_memory(ares, res) ||
acpi_dev_resource_address_space(ares, &win)) {
- *io_res = *res;
- io_res->name = NULL;
+ for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
+ if (resource_type(iores_array + i) != IORESOURCE_MEM) {
+ iores_array[i] = *res;
+ iores_array[i].name = NULL;
+ break;
+ }
+ }
}
return 1;
}
-static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
- struct resource *io_res, u64 start, u32 size)
+static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
+ void __iomem **iobase_ptr, u64 start, u32 size)
{
struct resource new_res = {
.start = start,
@@ -460,10 +466,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
if (start != new_res.start)
return (void __iomem *) ERR_PTR(-EINVAL);
- if (!resource_contains(io_res, &new_res))
+ if (!iores)
return devm_ioremap_resource(dev, &new_res);
- return priv->iobase + (new_res.start - io_res->start);
+ if (!*iobase_ptr) {
+ *iobase_ptr = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(*iobase_ptr))
+ return *iobase_ptr;
+ }
+
+ return *iobase_ptr + (new_res.start - iores->start);
}
/*
@@ -490,9 +502,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
struct acpi_table_tpm2 *buf)
{
- struct list_head resources;
- struct resource io_res;
+ struct list_head acpi_resource_list;
+ struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
+ void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
struct device *dev = &device->dev;
+ struct resource *iores;
+ void __iomem **iobase_ptr;
+ int i;
u32 pa_high, pa_low;
u64 cmd_pa;
u32 cmd_size;
@@ -501,21 +517,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
u32 rsp_size;
int ret;
- INIT_LIST_HEAD(&resources);
- ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
- &io_res);
+ INIT_LIST_HEAD(&acpi_resource_list);
+ ret = acpi_dev_get_resources(device, &acpi_resource_list,
+ crb_check_resource, iores_array);
if (ret < 0)
return ret;
- acpi_dev_free_resource_list(&resources);
+ acpi_dev_free_resource_list(&acpi_resource_list);
- if (resource_type(&io_res) != IORESOURCE_MEM) {
+ if (resource_type(iores_array) != IORESOURCE_MEM) {
dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
return -EINVAL;
+ } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
+ IORESOURCE_MEM) {
+ dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
+ memset(iores_array + TPM_CRB_MAX_RESOURCES,
+ 0, sizeof(*iores_array));
+ iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
}
- priv->iobase = devm_ioremap_resource(dev, &io_res);
- if (IS_ERR(priv->iobase))
- return PTR_ERR(priv->iobase);
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (buf->control_address >= iores_array[i].start &&
+ buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
+ iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address,
+ sizeof(struct crb_regs_tail));
+
+ if (IS_ERR(priv->regs_t))
+ return PTR_ERR(priv->regs_t);
/* The ACPI IO region starts at the head area and continues to include
* the control area, as one nice sane region except for some older
@@ -523,9 +559,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
*/
if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
(priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
- if (buf->control_address == io_res.start +
+ if (iores &&
+ buf->control_address == iores->start +
sizeof(*priv->regs_h))
- priv->regs_h = priv->iobase;
+ priv->regs_h = *iobase_ptr;
else
dev_warn(dev, FW_BUG "Bad ACPI memory layout");
}
@@ -534,13 +571,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (ret)
return ret;
- priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
- sizeof(struct crb_regs_tail));
- if (IS_ERR(priv->regs_t)) {
- ret = PTR_ERR(priv->regs_t);
- goto out_relinquish_locality;
- }
-
/*
* PTT HW bug w/a: wake up the device to access
* possibly not retained registers.
@@ -552,13 +582,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
cmd_pa = ((u64)pa_high << 32) | pa_low;
- cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa,
- ioread32(&priv->regs_t->ctrl_cmd_size));
+ cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; iores_array[i].end; ++i) {
+ if (cmd_pa >= iores_array[i].start &&
+ cmd_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
pa_high, pa_low, cmd_size);
- priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
+ priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size);
if (IS_ERR(priv->cmd)) {
ret = PTR_ERR(priv->cmd);
goto out;
@@ -566,11 +609,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
rsp_pa = le64_to_cpu(__rsp_pa);
- rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa,
- ioread32(&priv->regs_t->ctrl_rsp_size));
+ rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (rsp_pa >= iores_array[i].start &&
+ rsp_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
if (cmd_pa != rsp_pa) {
- priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
+ priv->rsp = crb_map_res(dev, iores, iobase_ptr,
+ rsp_pa, rsp_size);
ret = PTR_ERR_OR_ZERO(priv->rsp);
goto out;
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index e4fdde93ed4c..e7df342a317d 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -286,7 +286,7 @@ static int tpm_tis_plat_probe(struct platform_device *pdev)
}
tpm_info.res = *res;
- tpm_info.irq = platform_get_irq(pdev, 0);
+ tpm_info.irq = platform_get_irq_optional(pdev, 0);
if (tpm_info.irq <= 0) {
if (pdev != force_pdev)
tpm_info.irq = -1;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 270f43acbb77..8af2cee1a762 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -506,6 +506,84 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
return rc;
}
+struct tis_vendor_durations_override {
+ u32 did_vid;
+ struct tpm1_version version;
+ unsigned long durations[3];
+};
+
+static const struct tis_vendor_durations_override vendor_dur_overrides[] = {
+ /* STMicroelectronics 0x104a */
+ { 0x0000104a,
+ { 1, 2, 8, 28 },
+ { (2 * 60 * HZ), (2 * 60 * HZ), (2 * 60 * HZ) } },
+};
+
+static void tpm_tis_update_durations(struct tpm_chip *chip,
+ unsigned long *duration_cap)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ struct tpm1_version *version;
+ u32 did_vid;
+ int i, rc;
+ cap_t cap;
+
+ chip->duration_adjusted = false;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed to read did_vid. %d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Try to get a TPM version 1.2 or 1.1 TPM_CAP_VERSION_INFO */
+ rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
+ "attempting to determine the 1.2 version",
+ sizeof(cap.version2));
+ if (!rc) {
+ version = &cap.version2.version;
+ } else {
+ rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version",
+ sizeof(cap.version1));
+
+ if (rc)
+ goto out;
+
+ version = &cap.version1;
+ }
+
+ for (i = 0; i != ARRAY_SIZE(vendor_dur_overrides); i++) {
+ if (vendor_dur_overrides[i].did_vid != did_vid)
+ continue;
+
+ if ((version->major ==
+ vendor_dur_overrides[i].version.major) &&
+ (version->minor ==
+ vendor_dur_overrides[i].version.minor) &&
+ (version->rev_major ==
+ vendor_dur_overrides[i].version.rev_major) &&
+ (version->rev_minor ==
+ vendor_dur_overrides[i].version.rev_minor)) {
+
+ memcpy(duration_cap,
+ vendor_dur_overrides[i].durations,
+ sizeof(vendor_dur_overrides[i].durations));
+
+ chip->duration_adjusted = true;
+ goto out;
+ }
+ }
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+}
+
struct tis_vendor_timeout_override {
u32 did_vid;
unsigned long timeout_us[4];
@@ -842,6 +920,7 @@ static const struct tpm_class_ops tpm_tis = {
.send = tpm_tis_send,
.cancel = tpm_tis_ready,
.update_timeouts = tpm_tis_update_timeouts,
+ .update_durations = tpm_tis_update_durations,
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = tpm_tis_req_canceled,
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 19513e622053..d1754fd6c573 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -20,42 +20,64 @@
* Dorn and Kyleen Hall and Jarko Sakkinnen.
*/
+#include <linux/acpi.h>
+#include <linux/completion.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <linux/acpi.h>
-#include <linux/freezer.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/tpm.h>
+
#include "tpm.h"
#include "tpm_tis_core.h"
+#include "tpm_tis_spi.h"
#define MAX_SPI_FRAMESIZE 64
-struct tpm_tis_spi_phy {
- struct tpm_tis_data priv;
- struct spi_device *spi_device;
- u8 *iobuf;
-};
-
-static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
+/*
+ * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
+ * keep trying to read from the device until MISO goes high indicating the
+ * wait state has ended.
+ *
+ * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+ */
+static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *spi_xfer)
{
- return container_of(data, struct tpm_tis_spi_phy, priv);
+ struct spi_message m;
+ int ret, i;
+
+ if ((phy->iobuf[3] & 0x01) == 0) {
+ // handle SPI wait states
+ phy->iobuf[0] = 0;
+
+ for (i = 0; i < TPM_RETRY; i++) {
+ spi_xfer->len = 1;
+ spi_message_init(&m);
+ spi_message_add_tail(spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+ if (phy->iobuf[0] & 0x01)
+ break;
+ }
+
+ if (i == TPM_RETRY)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
-static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *in, const u8 *out)
+int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
- int i;
struct spi_message m;
struct spi_transfer spi_xfer;
u8 transfer_len;
@@ -82,26 +104,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
- if ((phy->iobuf[3] & 0x01) == 0) {
- // handle SPI wait states
- phy->iobuf[0] = 0;
-
- for (i = 0; i < TPM_RETRY; i++) {
- spi_xfer.len = 1;
- spi_message_init(&m);
- spi_message_add_tail(&spi_xfer, &m);
- ret = spi_sync_locked(phy->spi_device, &m);
- if (ret < 0)
- goto exit;
- if (phy->iobuf[0] & 0x01)
- break;
- }
-
- if (i == TPM_RETRY) {
- ret = -ETIMEDOUT;
- goto exit;
- }
- }
+ ret = phy->flow_control(phy, &spi_xfer);
+ if (ret < 0)
+ goto exit;
spi_xfer.cs_change = 0;
spi_xfer.len = transfer_len;
@@ -117,6 +122,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
spi_message_init(&m);
spi_message_add_tail(&spi_xfer, &m);
+ reinit_completion(&phy->ready);
ret = spi_sync_locked(phy->spi_device, &m);
if (ret < 0)
goto exit;
@@ -146,7 +152,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
return tpm_tis_spi_transfer(data, addr, len, NULL, value);
}
-static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
+int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
__le16 result_le;
int rc;
@@ -159,7 +165,7 @@ static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
return rc;
}
-static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
+int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
__le32 result_le;
int rc;
@@ -172,7 +178,7 @@ static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
return rc;
}
-static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
+int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
__le32 value_le;
int rc;
@@ -184,6 +190,18 @@ static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
return rc;
}
+int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ int irq, const struct tpm_tis_phy_ops *phy_ops)
+{
+ phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ if (!phy->iobuf)
+ return -ENOMEM;
+
+ phy->spi_device = spi;
+
+ return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
+}
+
static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
.read_bytes = tpm_tis_spi_read_bytes,
.write_bytes = tpm_tis_spi_write_bytes,
@@ -202,11 +220,7 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
if (!phy)
return -ENOMEM;
- phy->spi_device = dev;
-
- phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
- if (!phy->iobuf)
- return -ENOMEM;
+ phy->flow_control = tpm_tis_spi_flow_control;
/* If the SPI device has an IRQ then use that */
if (dev->irq > 0)
@@ -214,11 +228,27 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
else
irq = -1;
- return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
- NULL);
+ init_completion(&phy->ready);
+ return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
+}
+
+typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
+
+static int tpm_tis_spi_driver_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
+ tpm_tis_spi_probe_func probe_func;
+
+ probe_func = of_device_get_match_data(&spi->dev);
+ if (!probe_func && spi_dev_id)
+ probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
+ if (!probe_func)
+ return -ENODEV;
+
+ return probe_func(spi);
}
-static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
static int tpm_tis_spi_remove(struct spi_device *dev)
{
@@ -230,15 +260,17 @@ static int tpm_tis_spi_remove(struct spi_device *dev)
}
static const struct spi_device_id tpm_tis_spi_id[] = {
- {"tpm_tis_spi", 0},
+ { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
+ { "cr50", (unsigned long)cr50_spi_probe },
{}
};
MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
static const struct of_device_id of_tis_spi_match[] = {
- { .compatible = "st,st33htpm-spi", },
- { .compatible = "infineon,slb9670", },
- { .compatible = "tcg,tpm_tis-spi", },
+ { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
+ { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
+ { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
+ { .compatible = "google,cr50", .data = cr50_spi_probe },
{}
};
MODULE_DEVICE_TABLE(of, of_tis_spi_match);
@@ -251,13 +283,12 @@ MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
static struct spi_driver tpm_tis_spi_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tpm_tis_spi",
.pm = &tpm_tis_pm,
.of_match_table = of_match_ptr(of_tis_spi_match),
.acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
},
- .probe = tpm_tis_spi_probe,
+ .probe = tpm_tis_spi_driver_probe,
.remove = tpm_tis_spi_remove,
.id_table = tpm_tis_spi_id,
};
diff --git a/drivers/char/tpm/tpm_tis_spi.h b/drivers/char/tpm/tpm_tis_spi.h
new file mode 100644
index 000000000000..bba73979c368
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Infineon Technologies AG
+ * Copyright (C) 2016 STMicroelectronics SAS
+ */
+
+#ifndef TPM_TIS_SPI_H
+#define TPM_TIS_SPI_H
+
+#include "tpm_tis_core.h"
+
+struct tpm_tis_spi_phy {
+ struct tpm_tis_data priv;
+ struct spi_device *spi_device;
+ int (*flow_control)(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *xfer);
+ struct completion ready;
+ unsigned long wake_after;
+
+ u8 *iobuf;
+};
+
+static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_spi_phy, priv);
+}
+
+extern int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ int irq, const struct tpm_tis_phy_ops *phy_ops);
+
+extern int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out);
+
+extern int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result);
+extern int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result);
+extern int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value);
+
+#ifdef CONFIG_TCG_TIS_SPI_CR50
+extern int cr50_spi_probe(struct spi_device *spi);
+#else
+static inline int cr50_spi_probe(struct spi_device *spi)
+{
+ return -ENODEV;
+}
+#endif
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_TCG_TIS_SPI_CR50)
+extern int tpm_tis_spi_resume(struct device *dev);
+#else
+#define tpm_tis_spi_resume NULL
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_tis_spi_cr50.c b/drivers/char/tpm/tpm_tis_spi_cr50.c
new file mode 100644
index 000000000000..37d72e818335
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi_cr50.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This device driver implements a TCG PTP FIFO interface over SPI for chips
+ * with Cr50 firmware.
+ * It is based on tpm_tis_spi driver by Peter Huewe and Christophe Ricard.
+ */
+
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+
+#include "tpm_tis_core.h"
+#include "tpm_tis_spi.h"
+
+/*
+ * Cr50 timing constants:
+ * - can go to sleep not earlier than after CR50_SLEEP_DELAY_MSEC.
+ * - needs up to CR50_WAKE_START_DELAY_USEC to wake after sleep.
+ * - requires waiting for "ready" IRQ, if supported; or waiting for at least
+ * CR50_NOIRQ_ACCESS_DELAY_MSEC between transactions, if IRQ is not supported.
+ * - waits for up to CR50_FLOW_CONTROL for flow control 'ready' indication.
+ */
+#define CR50_SLEEP_DELAY_MSEC 1000
+#define CR50_WAKE_START_DELAY_USEC 1000
+#define CR50_NOIRQ_ACCESS_DELAY msecs_to_jiffies(2)
+#define CR50_READY_IRQ_TIMEOUT msecs_to_jiffies(TPM2_TIMEOUT_A)
+#define CR50_FLOW_CONTROL msecs_to_jiffies(TPM2_TIMEOUT_A)
+#define MAX_IRQ_CONFIRMATION_ATTEMPTS 3
+
+#define TPM_CR50_FW_VER(l) (0x0f90 | ((l) << 12))
+#define TPM_CR50_MAX_FW_VER_LEN 64
+
+struct cr50_spi_phy {
+ struct tpm_tis_spi_phy spi_phy;
+
+ struct mutex time_track_mutex;
+ unsigned long last_access;
+
+ unsigned long access_delay;
+
+ unsigned int irq_confirmation_attempt;
+ bool irq_needs_confirmation;
+ bool irq_confirmed;
+};
+
+static inline struct cr50_spi_phy *to_cr50_spi_phy(struct tpm_tis_spi_phy *phy)
+{
+ return container_of(phy, struct cr50_spi_phy, spi_phy);
+}
+
+/*
+ * The cr50 interrupt handler just signals waiting threads that the
+ * interrupt was asserted. It does not do any processing triggered
+ * by interrupts but is instead used to avoid fixed delays.
+ */
+static irqreturn_t cr50_spi_irq_handler(int dummy, void *dev_id)
+{
+ struct cr50_spi_phy *cr50_phy = dev_id;
+
+ cr50_phy->irq_confirmed = true;
+ complete(&cr50_phy->spi_phy.ready);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Cr50 needs to have at least some delay between consecutive
+ * transactions. Make sure we wait.
+ */
+static void cr50_ensure_access_delay(struct cr50_spi_phy *phy)
+{
+ unsigned long allowed_access = phy->last_access + phy->access_delay;
+ unsigned long time_now = jiffies;
+ struct device *dev = &phy->spi_phy.spi_device->dev;
+
+ /*
+ * Note: There is a small chance, if Cr50 is not accessed in a few days,
+ * that time_in_range will not provide the correct result after the wrap
+ * around for jiffies. In this case, we'll have an unneeded short delay,
+ * which is fine.
+ */
+ if (time_in_range_open(time_now, phy->last_access, allowed_access)) {
+ unsigned long remaining, timeout = allowed_access - time_now;
+
+ remaining = wait_for_completion_timeout(&phy->spi_phy.ready,
+ timeout);
+ if (!remaining && phy->irq_confirmed)
+ dev_warn(dev, "Timeout waiting for TPM ready IRQ\n");
+ }
+
+ if (phy->irq_needs_confirmation) {
+ unsigned int attempt = ++phy->irq_confirmation_attempt;
+
+ if (phy->irq_confirmed) {
+ phy->irq_needs_confirmation = false;
+ phy->access_delay = CR50_READY_IRQ_TIMEOUT;
+ dev_info(dev, "TPM ready IRQ confirmed on attempt %u\n",
+ attempt);
+ } else if (attempt > MAX_IRQ_CONFIRMATION_ATTEMPTS) {
+ phy->irq_needs_confirmation = false;
+ dev_warn(dev, "IRQ not confirmed - will use delays\n");
+ }
+ }
+}
+
+/*
+ * Cr50 might go to sleep if there is no SPI activity for some time and
+ * miss the first few bits/bytes on the bus. In such case, wake it up
+ * by asserting CS and give it time to start up.
+ */
+static bool cr50_needs_waking(struct cr50_spi_phy *phy)
+{
+ /*
+ * Note: There is a small chance, if Cr50 is not accessed in a few days,
+ * that time_in_range will not provide the correct result after the wrap
+ * around for jiffies. In this case, we'll probably timeout or read
+ * incorrect value from TPM_STS and just retry the operation.
+ */
+ return !time_in_range_open(jiffies, phy->last_access,
+ phy->spi_phy.wake_after);
+}
+
+static void cr50_wake_if_needed(struct cr50_spi_phy *cr50_phy)
+{
+ struct tpm_tis_spi_phy *phy = &cr50_phy->spi_phy;
+
+ if (cr50_needs_waking(cr50_phy)) {
+ /* Assert CS, wait 1 msec, deassert CS */
+ struct spi_transfer spi_cs_wake = { .delay_usecs = 1000 };
+
+ spi_sync_transfer(phy->spi_device, &spi_cs_wake, 1);
+ /* Wait for it to fully wake */
+ usleep_range(CR50_WAKE_START_DELAY_USEC,
+ CR50_WAKE_START_DELAY_USEC * 2);
+ }
+
+ /* Reset the time when we need to wake Cr50 again */
+ phy->wake_after = jiffies + msecs_to_jiffies(CR50_SLEEP_DELAY_MSEC);
+}
+
+/*
+ * Flow control: clock the bus and wait for cr50 to set LSB before
+ * sending/receiving data. TCG PTP spec allows it to happen during
+ * the last byte of header, but cr50 never does that in practice,
+ * and earlier versions had a bug when it was set too early, so don't
+ * check for it during header transfer.
+ */
+static int cr50_spi_flow_control(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *spi_xfer)
+{
+ struct device *dev = &phy->spi_device->dev;
+ unsigned long timeout = jiffies + CR50_FLOW_CONTROL;
+ struct spi_message m;
+ int ret;
+
+ spi_xfer->len = 1;
+
+ do {
+ spi_message_init(&m);
+ spi_message_add_tail(spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+
+ if (time_after(jiffies, timeout)) {
+ dev_warn(dev, "Timeout during flow control\n");
+ return -EBUSY;
+ }
+ } while (!(phy->iobuf[0] & 0x01));
+
+ return 0;
+}
+
+static int tpm_tis_spi_cr50_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct cr50_spi_phy *cr50_phy = to_cr50_spi_phy(phy);
+ int ret;
+
+ mutex_lock(&cr50_phy->time_track_mutex);
+ /*
+ * Do this outside of spi_bus_lock in case cr50 is not the
+ * only device on that spi bus.
+ */
+ cr50_ensure_access_delay(cr50_phy);
+ cr50_wake_if_needed(cr50_phy);
+
+ ret = tpm_tis_spi_transfer(data, addr, len, in, out);
+
+ cr50_phy->last_access = jiffies;
+ mutex_unlock(&cr50_phy->time_track_mutex);
+
+ return ret;
+}
+
+static int tpm_tis_spi_cr50_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result)
+{
+ return tpm_tis_spi_cr50_transfer(data, addr, len, result, NULL);
+}
+
+static int tpm_tis_spi_cr50_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value)
+{
+ return tpm_tis_spi_cr50_transfer(data, addr, len, NULL, value);
+}
+
+static const struct tpm_tis_phy_ops tpm_spi_cr50_phy_ops = {
+ .read_bytes = tpm_tis_spi_cr50_read_bytes,
+ .write_bytes = tpm_tis_spi_cr50_write_bytes,
+ .read16 = tpm_tis_spi_read16,
+ .read32 = tpm_tis_spi_read32,
+ .write32 = tpm_tis_spi_write32,
+};
+
+static void cr50_print_fw_version(struct tpm_tis_data *data)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ int i, len = 0;
+ char fw_ver[TPM_CR50_MAX_FW_VER_LEN + 1];
+ char fw_ver_block[4];
+
+ /*
+ * Write anything to TPM_CR50_FW_VER to start from the beginning
+ * of the version string
+ */
+ tpm_tis_write8(data, TPM_CR50_FW_VER(data->locality), 0);
+
+ /* Read the string, 4 bytes at a time, until we get '\0' */
+ do {
+ tpm_tis_read_bytes(data, TPM_CR50_FW_VER(data->locality), 4,
+ fw_ver_block);
+ for (i = 0; i < 4 && fw_ver_block[i]; ++len, ++i)
+ fw_ver[len] = fw_ver_block[i];
+ } while (i == 4 && len < TPM_CR50_MAX_FW_VER_LEN);
+ fw_ver[len] = '\0';
+
+ dev_info(&phy->spi_device->dev, "Cr50 firmware version: %s\n", fw_ver);
+}
+
+int cr50_spi_probe(struct spi_device *spi)
+{
+ struct tpm_tis_spi_phy *phy;
+ struct cr50_spi_phy *cr50_phy;
+ int ret;
+ struct tpm_chip *chip;
+
+ cr50_phy = devm_kzalloc(&spi->dev, sizeof(*cr50_phy), GFP_KERNEL);
+ if (!cr50_phy)
+ return -ENOMEM;
+
+ phy = &cr50_phy->spi_phy;
+ phy->flow_control = cr50_spi_flow_control;
+ phy->wake_after = jiffies;
+ init_completion(&phy->ready);
+
+ cr50_phy->access_delay = CR50_NOIRQ_ACCESS_DELAY;
+ cr50_phy->last_access = jiffies;
+ mutex_init(&cr50_phy->time_track_mutex);
+
+ if (spi->irq > 0) {
+ ret = devm_request_irq(&spi->dev, spi->irq,
+ cr50_spi_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "cr50_spi", cr50_phy);
+ if (ret < 0) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_warn(&spi->dev, "Requesting IRQ %d failed: %d\n",
+ spi->irq, ret);
+ /*
+ * This is not fatal, the driver will fall back to
+ * delays automatically, since ready will never
+ * be completed without a registered irq handler.
+ * So, just fall through.
+ */
+ } else {
+ /*
+ * IRQ requested, let's verify that it is actually
+ * triggered, before relying on it.
+ */
+ cr50_phy->irq_needs_confirmation = true;
+ }
+ } else {
+ dev_warn(&spi->dev,
+ "No IRQ - will use delays between transactions.\n");
+ }
+
+ ret = tpm_tis_spi_init(spi, phy, -1, &tpm_spi_cr50_phy_ops);
+ if (ret)
+ return ret;
+
+ cr50_print_fw_version(&phy->priv);
+
+ chip = dev_get_drvdata(&spi->dev);
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+int tpm_tis_spi_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ /*
+ * Jiffies not increased during suspend, so we need to reset
+ * the time to wake Cr50 after resume.
+ */
+ phy->wake_after = jiffies;
+
+ return tpm_tis_resume(dev);
+}
+#endif
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 7270e7b69262..3259426f01dc 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1325,24 +1325,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols)
port->cons.ws.ws_col = cols;
}
-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
{
struct port_buffer *buf;
- unsigned int nr_added_bufs;
+ int nr_added_bufs;
int ret;
nr_added_bufs = 0;
do {
buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
if (!buf)
- break;
+ return -ENOMEM;
spin_lock_irq(lock);
ret = add_inbuf(vq, buf);
if (ret < 0) {
spin_unlock_irq(lock);
free_buf(buf, true);
- break;
+ return ret;
}
nr_added_bufs++;
spin_unlock_irq(lock);
@@ -1362,7 +1362,6 @@ static int add_port(struct ports_device *portdev, u32 id)
char debugfs_name[16];
struct port *port;
dev_t devt;
- unsigned int nr_added_bufs;
int err;
port = kmalloc(sizeof(*port), GFP_KERNEL);
@@ -1421,11 +1420,13 @@ static int add_port(struct ports_device *portdev, u32 id)
spin_lock_init(&port->outvq_lock);
init_waitqueue_head(&port->waitqueue);
- /* Fill the in_vq with buffers so the host can send us data. */
- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
- if (!nr_added_bufs) {
+ /* We can safely ignore ENOSPC because it means
+ * the queue already has buffers. Buffers are removed
+ * only by virtcons_remove(), not by unplug_port()
+ */
+ err = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (err < 0 && err != -ENOSPC) {
dev_err(port->dev, "Error allocating inbufs\n");
- err = -ENOMEM;
goto free_device;
}
@@ -2059,14 +2060,11 @@ static int virtcons_probe(struct virtio_device *vdev)
INIT_WORK(&portdev->control_work, &control_work_handler);
if (multiport) {
- unsigned int nr_added_bufs;
-
spin_lock_init(&portdev->c_ivq_lock);
spin_lock_init(&portdev->c_ovq_lock);
- nr_added_bufs = fill_queue(portdev->c_ivq,
- &portdev->c_ivq_lock);
- if (!nr_added_bufs) {
+ err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
+ if (err < 0) {
dev_err(&vdev->dev,
"Error allocating buffers for control queue\n");
/*
@@ -2077,7 +2075,7 @@ static int virtcons_probe(struct virtio_device *vdev)
VIRTIO_CONSOLE_DEVICE_READY, 0);
/* Device was functional: we need full cleanup. */
virtcons_remove(vdev);
- return -ENOMEM;
+ return err;
}
} else {
/*
diff --git a/drivers/gpio/gpio-bd70528.c b/drivers/gpio/gpio-bd70528.c
index 0c1ead12d883..4ba4d4a67881 100644
--- a/drivers/gpio/gpio-bd70528.c
+++ b/drivers/gpio/gpio-bd70528.c
@@ -25,13 +25,13 @@ static int bd70528_set_debounce(struct bd70528_gpio *bdgpio,
case 0:
val = BD70528_DEBOUNCE_DISABLE;
break;
- case 1 ... 15:
+ case 1 ... 15000:
val = BD70528_DEBOUNCE_15MS;
break;
- case 16 ... 30:
+ case 15001 ... 30000:
val = BD70528_DEBOUNCE_30MS;
break;
- case 31 ... 50:
+ case 30001 ... 50000:
val = BD70528_DEBOUNCE_50MS;
break;
default:
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index faf86ea9c51a..642c6321c22a 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
case 0:
val = MAX77620_CNFG_GPIO_DBNC_None;
break;
- case 1000 ... 8000:
+ case 1 ... 8000:
val = MAX77620_CNFG_GPIO_DBNC_8ms;
break;
- case 9000 ... 16000:
+ case 8001 ... 16000:
val = MAX77620_CNFG_GPIO_DBNC_16ms;
break;
- case 17000 ... 32000:
+ case 16001 ... 32000:
val = MAX77620_CNFG_GPIO_DBNC_32ms;
break;
default:
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 609ed16ae933..59ccfd24627d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1304,11 +1304,28 @@ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
{
+ /*
+ * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
+ * a non existing micro-USB-B connector which puts the HDMI
+ * DDC pins in GPIO mode, breaking HDMI support.
+ */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
}
},
+ {
+ /*
+ * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
+ * instead of controlling the actual micro-USB-B turns the 5V
+ * boost for its USB-A connector off. The actual micro-USB-B
+ * connector is wired for charging only.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
+ }
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 1d4aaa9580f4..82efc1e22e61 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -511,7 +511,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
* Also, don't allow GTT domain if the BO doens't have USWC falg set.
*/
if (adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type <= CHIP_RAVEN &&
+ adev->asic_type < CHIP_RAVEN &&
(adev->flags & AMD_IS_APU) &&
(bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index e1c15721611a..b19157b19fa0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1013,10 +1013,10 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */
- {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index a042ef471fbd..a73206784cba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -649,15 +649,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return -ENOMEM;
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
- for (i = 0; i < info->read_mmr_reg.count; i++)
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < info->read_mmr_reg.count; i++) {
if (amdgpu_asic_read_register(adev, se_num, sh_num,
info->read_mmr_reg.dword_offset + i,
&regs[i])) {
DRM_DEBUG_KMS("unallowed offset %#x\n",
info->read_mmr_reg.dword_offset + i);
kfree(regs);
+ amdgpu_gfx_off_ctrl(adev, true);
return -EFAULT;
}
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs);
return n ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index dfca83a2de47..97cf0b536873 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1038,8 +1038,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
- &&((adev->gfx.rlc_fw_version != 106 &&
+ /* Disable GFXOFF on original raven. There are combinations
+ * of sbios and platforms that are not stable.
+ */
+ if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ &&((adev->gfx.rlc_fw_version != 106 &&
adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a52f0b13a2c8..4139f129eafb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -688,7 +688,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
*/
if (adev->flags & AMD_IS_APU &&
adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type <= CHIP_RAVEN)
+ adev->asic_type < CHIP_RAVEN)
init_data.flags.gpu_vm_support = true;
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 34f95e0e3ea4..203ce4b1028f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3478,18 +3478,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int i;
u32 tmp = 0;
if (!query)
return -EINVAL;
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
- tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- *query = tmp;
+ /*
+ * PPSMC_MSG_GetCurrPkgPwr is not supported on:
+ * - Hawaii
+ * - Bonaire
+ * - Fiji
+ * - Tonga
+ */
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA)) {
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
+ tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ *query = tmp;
- if (tmp != 0)
- return 0;
+ if (tmp != 0)
+ return 0;
+ }
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 3ec5a10a7c4d..328e258a6895 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -759,6 +759,12 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_UCLK:
case SMU_DCEFCLK:
case SMU_FCLK:
+ /* There is only 2 levels for fine grained DPM */
+ if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+ soft_max_level = (soft_max_level >= 1 ? 1 : 0);
+ soft_min_level = (soft_min_level >= 1 ? 1 : 0);
+ }
+
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
return size;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index d3fb75bb9eb1..7cb2257bbb93 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -201,6 +201,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_post = false;
crtc_state->fb_changed = false;
crtc_state->fifo_changed = false;
+ crtc_state->preload_luts = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 71a0201437a9..aa1e2c670bc4 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -990,6 +990,55 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
dev_priv->display.color_commit(crtc_state);
}
+static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ return !old_crtc_state->base.gamma_lut &&
+ !old_crtc_state->base.degamma_lut;
+}
+
+static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * CGM_PIPE_MODE is itself single buffered. We'd have to
+ * somehow split it out from chv_load_luts() if we wanted
+ * the ability to preload the CGM LUTs/CSC without tearing.
+ */
+ if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
+ return false;
+
+ return !old_crtc_state->base.gamma_lut;
+}
+
+static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * The hardware degamma is active whenever the pipe
+ * CSC is active. Thus even if the old state has no
+ * software degamma we need to avoid clobbering the
+ * linear hardware degamma mid scanout.
+ */
+ return !old_crtc_state->csc_enable &&
+ !old_crtc_state->base.gamma_lut;
+}
+
int intel_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
@@ -1133,6 +1182,8 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1185,6 +1236,8 @@ static int chv_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = chv_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1224,6 +1277,8 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1281,6 +1336,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1319,6 +1376,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = glk_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1368,6 +1427,8 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
crtc_state->csc_mode = icl_csc_mode(crtc_state);
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index dfff6f4357b8..af50f05f4e9d 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -2504,6 +2504,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
* the highest stride limits of them all.
*/
crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ if (!crtc)
+ return 0;
+
plane = to_intel_plane(crtc->base.primary);
return plane->max_stride(plane, pixel_format, modifier,
@@ -13740,6 +13743,11 @@ static void intel_update_crtc(struct intel_crtc *crtc,
/* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc);
} else {
+ if (new_crtc_state->preload_luts &&
+ (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe))
+ intel_color_load_luts(new_crtc_state);
+
intel_pre_plane_update(old_crtc_state, new_crtc_state);
if (new_crtc_state->update_pipe)
@@ -14034,6 +14042,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->base.active &&
!needs_modeset(new_crtc_state) &&
+ !new_crtc_state->preload_luts &&
(new_crtc_state->base.color_mgmt_changed ||
new_crtc_state->update_pipe))
intel_color_load_luts(new_crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 449abaea619f..4075b0387c87 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -761,6 +761,7 @@ struct intel_crtc_state {
bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fb_changed; /* fb on any of the planes is changed */
bool fifo_changed; /* FIFO split is changed */
+ bool preload_luts;
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index d59eee5c5d9c..b5c588e511dd 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -235,6 +235,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = ggtt->gmadr.start;
info->apertures->ranges[0].size = ggtt->mappable_end;
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start =
+ (unsigned long)(ggtt->gmadr.start + vma->node.start);
+ info->fix.smem_len = vma->node.size;
+
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@@ -244,10 +249,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->screen_base = vaddr;
info->screen_size = vma->node.size;
- /* Our framebuffer is the entirety of fbdev's system memory */
- info->fix.smem_start = (unsigned long)info->screen_base;
- info->fix.smem_len = info->screen_size;
-
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 6b3b50f0f6d9..abfbac49b8e8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -671,8 +671,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
obj->mm.dirty = false;
for_each_sgt_page(page, sgt_iter, pages) {
- if (obj->mm.dirty)
+ if (obj->mm.dirty && trylock_page(page)) {
+ /*
+ * As this may not be anonymous memory (e.g. shmem)
+ * but exist on a real mapping, we have to lock
+ * the page in order to dirty it -- holding
+ * the page reference is not sufficient to
+ * prevent the inode from being truncated.
+ * Play safe and take the lock.
+ *
+ * However...!
+ *
+ * The mmu-notifier can be invalidated for a
+ * migrate_page, that is alreadying holding the lock
+ * on the page. Such a try_to_unmap() will result
+ * in us calling put_pages() and so recursively try
+ * to lock the page. We avoid that deadlock with
+ * a trylock_page() and in exchange we risk missing
+ * some page dirtying.
+ */
set_page_dirty(page);
+ unlock_page(page);
+ }
mark_page_accessed(page);
put_page(page);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
index 4cd54c569911..379a91780bd4 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -103,6 +103,8 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return ERR_CAST(obj);
}
+ i915_gem_object_set_readonly(obj);
+
node->obj = obj;
return node;
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 8e251e719390..212acaef581e 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -843,8 +843,8 @@ create_event_attributes(struct i915_pmu *pmu)
const char *name;
const char *unit;
} events[] = {
- __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
- __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
+ __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
+ __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
};
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 7b84ebca2901..3eba8a2b39c2 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -177,9 +177,37 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority | __NO_PREEMPTION;
}
-static void kick_submission(struct intel_engine_cs *engine, int prio)
+static inline bool need_preempt(int prio, int active)
{
- const struct i915_request *inflight = *engine->execlists.active;
+ /*
+ * Allow preemption of low -> normal -> high, but we do
+ * not allow low priority tasks to preempt other low priority
+ * tasks under the impression that latency for low priority
+ * tasks does not matter (as much as background throughput),
+ * so kiss.
+ */
+ return prio >= max(I915_PRIORITY_NORMAL, active);
+}
+
+static void kick_submission(struct intel_engine_cs *engine,
+ const struct i915_request *rq,
+ int prio)
+{
+ const struct i915_request *inflight;
+
+ /*
+ * We only need to kick the tasklet once for the high priority
+ * new context we add into the queue.
+ */
+ if (prio <= engine->execlists.queue_priority_hint)
+ return;
+
+ rcu_read_lock();
+
+ /* Nothing currently active? We're overdue for a submission! */
+ inflight = execlists_active(&engine->execlists);
+ if (!inflight)
+ goto unlock;
/*
* If we are already the currently executing context, don't
@@ -188,10 +216,15 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
* tasklet, i.e. we have not change the priority queue
* sufficiently to oust the running context.
*/
- if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
- return;
+ if (inflight->hw_context == rq->hw_context)
+ goto unlock;
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ engine->execlists.queue_priority_hint = prio;
+ if (need_preempt(prio, rq_prio(inflight)))
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+unlock:
+ rcu_read_unlock();
}
static void __i915_schedule(struct i915_sched_node *node,
@@ -317,13 +350,8 @@ static void __i915_schedule(struct i915_sched_node *node,
list_move_tail(&node->link, cache.priolist);
}
- if (prio <= engine->execlists.queue_priority_hint)
- continue;
-
- engine->execlists.queue_priority_hint = prio;
-
/* Defer (tasklet) submission until after all of our updates. */
- kick_submission(engine, prio);
+ kick_submission(engine, node_to_request(node), prio);
}
spin_unlock(&engine->active.lock);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 704558d449a2..56fae3472114 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -177,7 +177,6 @@ static const char * const smbus_pnp_ids[] = {
"LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */
"LEN009b", /* T580 */
- "LEN0402", /* X1 Extreme 2nd Generation */
"LEN200f", /* T450s */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index aa98953f4462..d6d5ab23c088 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -38,9 +38,9 @@ config MD_AUTODETECT
default y
---help---
If you say Y here, then the kernel will try to autodetect raid
- arrays as part of its boot process.
+ arrays as part of its boot process.
- If you don't use raid and say Y, this autodetection can cause
+ If you don't use raid and say Y, this autodetection can cause
a several-second delay in the boot time due to various
synchronisation steps that are part of this step.
@@ -290,7 +290,7 @@ config DM_SNAPSHOT
depends on BLK_DEV_DM
select DM_BUFIO
---help---
- Allow volume managers to take writable snapshots of a device.
+ Allow volume managers to take writable snapshots of a device.
config DM_THIN_PROVISIONING
tristate "Thin provisioning target"
@@ -298,7 +298,7 @@ config DM_THIN_PROVISIONING
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
- Provides thin provisioning and snapshots that share a data store.
+ Provides thin provisioning and snapshots that share a data store.
config DM_CACHE
tristate "Cache target (EXPERIMENTAL)"
@@ -307,23 +307,23 @@ config DM_CACHE
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
- dm-cache attempts to improve performance of a block device by
- moving frequently used data to a smaller, higher performance
- device. Different 'policy' plugins can be used to change the
- algorithms used to select which blocks are promoted, demoted,
- cleaned etc. It supports writeback and writethrough modes.
+ dm-cache attempts to improve performance of a block device by
+ moving frequently used data to a smaller, higher performance
+ device. Different 'policy' plugins can be used to change the
+ algorithms used to select which blocks are promoted, demoted,
+ cleaned etc. It supports writeback and writethrough modes.
config DM_CACHE_SMQ
tristate "Stochastic MQ Cache Policy (EXPERIMENTAL)"
depends on DM_CACHE
default y
---help---
- A cache policy that uses a multiqueue ordered by recent hits
- to select which blocks should be promoted and demoted.
- This is meant to be a general purpose policy. It prioritises
- reads over writes. This SMQ policy (vs MQ) offers the promise
- of less memory utilization, improved performance and increased
- adaptability in the face of changing workloads.
+ A cache policy that uses a multiqueue ordered by recent hits
+ to select which blocks should be promoted and demoted.
+ This is meant to be a general purpose policy. It prioritises
+ reads over writes. This SMQ policy (vs MQ) offers the promise
+ of less memory utilization, improved performance and increased
+ adaptability in the face of changing workloads.
config DM_WRITECACHE
tristate "Writecache target"
@@ -343,9 +343,9 @@ config DM_ERA
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
- dm-era tracks which parts of a block device are written to
- over time. Useful for maintaining cache coherency when using
- vendor snapshots.
+ dm-era tracks which parts of a block device are written to
+ over time. Useful for maintaining cache coherency when using
+ vendor snapshots.
config DM_CLONE
tristate "Clone target (EXPERIMENTAL)"
@@ -353,20 +353,20 @@ config DM_CLONE
default n
select DM_PERSISTENT_DATA
---help---
- dm-clone produces a one-to-one copy of an existing, read-only source
- device into a writable destination device. The cloned device is
- visible/mountable immediately and the copy of the source device to the
- destination device happens in the background, in parallel with user
- I/O.
+ dm-clone produces a one-to-one copy of an existing, read-only source
+ device into a writable destination device. The cloned device is
+ visible/mountable immediately and the copy of the source device to the
+ destination device happens in the background, in parallel with user
+ I/O.
- If unsure, say N.
+ If unsure, say N.
config DM_MIRROR
tristate "Mirror target"
depends on BLK_DEV_DM
---help---
- Allow volume managers to mirror logical volumes, also
- needed for live data migration tools such as 'pvmove'.
+ Allow volume managers to mirror logical volumes, also
+ needed for live data migration tools such as 'pvmove'.
config DM_LOG_USERSPACE
tristate "Mirror userspace logging"
@@ -483,7 +483,7 @@ config DM_FLAKEY
tristate "Flakey target"
depends on BLK_DEV_DM
---help---
- A target that intermittently fails I/O for debugging purposes.
+ A target that intermittently fails I/O for debugging purposes.
config DM_VERITY
tristate "Verity target support"
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
index d26b35195825..fd714628da6a 100644
--- a/drivers/md/bcache/Makefile
+++ b/drivers/md/bcache/Makefile
@@ -5,5 +5,3 @@ obj-$(CONFIG_BCACHE) += bcache.o
bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
util.o writeback.o
-
-CFLAGS_request.o += -Iblock
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 6f776823b9ba..a1df0d95151c 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -377,7 +377,10 @@ retry_invalidate:
if (!fifo_full(&ca->free_inc))
goto retry_invalidate;
- bch_prio_write(ca);
+ if (bch_prio_write(ca, false) < 0) {
+ ca->invalidate_needs_gc = 1;
+ wake_up_gc(ca->set);
+ }
}
}
out:
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 013e35a9e317..9198c1b480d9 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -582,6 +582,7 @@ struct cache_set {
*/
wait_queue_head_t btree_cache_wait;
struct task_struct *btree_cache_alloc_lock;
+ spinlock_t btree_cannibalize_lock;
/*
* When we free a btree node, we increment the gen of the bucket the
@@ -723,6 +724,7 @@ struct cache_set {
unsigned int gc_always_rewrite:1;
unsigned int shrinker_disabled:1;
unsigned int copy_gc_enabled:1;
+ unsigned int idle_max_writeback_rate_enabled:1;
#define BUCKET_HASH_BITS 12
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
@@ -977,7 +979,7 @@ bool bch_cached_dev_error(struct cached_dev *dc);
__printf(2, 3)
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
-void bch_prio_write(struct cache *ca);
+int bch_prio_write(struct cache *ca, bool wait);
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
extern struct workqueue_struct *bcache_wq;
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 08768796b543..cffcdc9feefb 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -155,6 +155,7 @@ int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
return 0;
}
+/* Pop the top key of keylist by pointing l->top to its previous key */
struct bkey *bch_keylist_pop(struct keylist *l)
{
struct bkey *k = l->keys;
@@ -168,6 +169,7 @@ struct bkey *bch_keylist_pop(struct keylist *l)
return l->top = k;
}
+/* Pop the bottom key of keylist and update l->top_p */
void bch_keylist_pop_front(struct keylist *l)
{
l->top_p -= bkey_u64s(l->keys);
@@ -309,7 +311,6 @@ void bch_btree_keys_free(struct btree_keys *b)
t->tree = NULL;
t->data = NULL;
}
-EXPORT_SYMBOL(bch_btree_keys_free);
int bch_btree_keys_alloc(struct btree_keys *b,
unsigned int page_order,
@@ -342,7 +343,6 @@ err:
bch_btree_keys_free(b);
return -ENOMEM;
}
-EXPORT_SYMBOL(bch_btree_keys_alloc);
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
bool *expensive_debug_checks)
@@ -361,7 +361,6 @@ void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
* any more.
*/
}
-EXPORT_SYMBOL(bch_btree_keys_init);
/* Binary tree stuff for auxiliary search trees */
@@ -678,7 +677,6 @@ void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
bch_bset_build_unwritten_tree(b);
}
-EXPORT_SYMBOL(bch_bset_init_next);
/*
* Build auxiliary binary tree 'struct bset_tree *t', this tree is used to
@@ -732,7 +730,6 @@ void bch_bset_build_written_tree(struct btree_keys *b)
j = inorder_next(j, t->size))
make_bfloat(t, j);
}
-EXPORT_SYMBOL(bch_bset_build_written_tree);
/* Insert */
@@ -780,7 +777,6 @@ fix_right: do {
j = j * 2 + 1;
} while (j < t->size);
}
-EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
static void bch_bset_fix_lookup_table(struct btree_keys *b,
struct bset_tree *t,
@@ -855,7 +851,6 @@ bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
return b->ops->key_merge(b, l, r);
}
-EXPORT_SYMBOL(bch_bkey_try_merge);
void bch_bset_insert(struct btree_keys *b, struct bkey *where,
struct bkey *insert)
@@ -875,7 +870,6 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
bkey_copy(where, insert);
bch_bset_fix_lookup_table(b, t, where);
}
-EXPORT_SYMBOL(bch_bset_insert);
unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bkey *replace_key)
@@ -931,7 +925,6 @@ copy: bkey_copy(m, k);
merged:
return status;
}
-EXPORT_SYMBOL(bch_btree_insert_key);
/* Lookup */
@@ -1077,7 +1070,6 @@ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
return i.l;
}
-EXPORT_SYMBOL(__bch_bset_search);
/* Btree iterator */
@@ -1132,7 +1124,6 @@ struct bkey *bch_btree_iter_init(struct btree_keys *b,
{
return __bch_btree_iter_init(b, iter, search, b->set);
}
-EXPORT_SYMBOL(bch_btree_iter_init);
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
btree_iter_cmp_fn *cmp)
@@ -1165,7 +1156,6 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
return __bch_btree_iter_next(iter, btree_iter_cmp);
}
-EXPORT_SYMBOL(bch_btree_iter_next);
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
struct btree_keys *b, ptr_filter_fn fn)
@@ -1196,7 +1186,6 @@ int bch_bset_sort_state_init(struct bset_sort_state *state,
return mempool_init_page_pool(&state->pool, 1, page_order);
}
-EXPORT_SYMBOL(bch_bset_sort_state_init);
static void btree_mergesort(struct btree_keys *b, struct bset *out,
struct btree_iter *iter,
@@ -1313,7 +1302,6 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
}
-EXPORT_SYMBOL(bch_btree_sort_partial);
void bch_btree_sort_and_fix_extents(struct btree_keys *b,
struct btree_iter *iter,
@@ -1366,7 +1354,6 @@ void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
out:
bch_bset_build_written_tree(b);
}
-EXPORT_SYMBOL(bch_btree_sort_lazy);
void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
{
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ba434d9ac720..14d6c33b0957 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -543,6 +543,11 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
set_btree_node_dirty(b);
+ /*
+ * w->journal is always the oldest journal pin of all bkeys
+ * in the leaf node, to make sure the oldest jset seq won't
+ * be increased before this btree node is flushed.
+ */
if (journal_ref) {
if (w->journal &&
journal_pin_cmp(b->c, w->journal, journal_ref)) {
@@ -723,6 +728,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
* IO can always make forward progress:
*/
nr /= c->btree_pages;
+ if (nr == 0)
+ nr = 1;
nr = min_t(unsigned long, nr, mca_can_free(c));
i = 0;
@@ -884,15 +891,17 @@ out:
static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
{
- struct task_struct *old;
-
- old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
- if (old && old != current) {
+ spin_lock(&c->btree_cannibalize_lock);
+ if (likely(c->btree_cache_alloc_lock == NULL)) {
+ c->btree_cache_alloc_lock = current;
+ } else if (c->btree_cache_alloc_lock != current) {
if (op)
prepare_to_wait(&c->btree_cache_wait, &op->wait,
TASK_UNINTERRUPTIBLE);
+ spin_unlock(&c->btree_cannibalize_lock);
return -EINTR;
}
+ spin_unlock(&c->btree_cannibalize_lock);
return 0;
}
@@ -927,10 +936,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
*/
static void bch_cannibalize_unlock(struct cache_set *c)
{
+ spin_lock(&c->btree_cannibalize_lock);
if (c->btree_cache_alloc_lock == current) {
c->btree_cache_alloc_lock = NULL;
wake_up(&c->btree_cache_wait);
}
+ spin_unlock(&c->btree_cannibalize_lock);
}
static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index c12cd809ab19..0164a1fe94a9 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -45,7 +45,6 @@ void closure_sub(struct closure *cl, int v)
{
closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
}
-EXPORT_SYMBOL(closure_sub);
/*
* closure_put - decrement a closure's refcount
@@ -54,7 +53,6 @@ void closure_put(struct closure *cl)
{
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
}
-EXPORT_SYMBOL(closure_put);
/*
* closure_wake_up - wake up all closures on a wait list, without memory barrier
@@ -76,7 +74,6 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
-EXPORT_SYMBOL(__closure_wake_up);
/**
* closure_wait - add a closure to a waitlist
@@ -96,7 +93,6 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
return true;
}
-EXPORT_SYMBOL(closure_wait);
struct closure_syncer {
struct task_struct *task;
@@ -131,7 +127,6 @@ void __sched __closure_sync(struct closure *cl)
__set_current_state(TASK_RUNNING);
}
-EXPORT_SYMBOL(__closure_sync);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -149,7 +144,6 @@ void closure_debug_create(struct closure *cl)
list_add(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL(closure_debug_create);
void closure_debug_destroy(struct closure *cl)
{
@@ -162,7 +156,6 @@ void closure_debug_destroy(struct closure *cl)
list_del(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *closure_debug;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 41adcd1546f1..73478a91a342 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -62,18 +62,6 @@ static void bch_data_insert_keys(struct closure *cl)
struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
int ret;
- /*
- * If we're looping, might already be waiting on
- * another journal write - can't wait on more than one journal write at
- * a time
- *
- * XXX: this looks wrong
- */
-#if 0
- while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
- closure_sync(&s->cl);
-#endif
-
if (!op->replace)
journal_ref = bch_journal(op->c, &op->insert_keys,
op->flush_journal ? cl : NULL);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 20ed838e9413..77e9869345e7 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -92,10 +92,11 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
sb->version, sb->flags, sb->seq, sb->keys);
- err = "Not a bcache superblock";
+ err = "Not a bcache superblock (bad offset)";
if (sb->offset != SB_SECTOR)
goto err;
+ err = "Not a bcache superblock (bad magic)";
if (memcmp(sb->magic, bcache_magic, 16))
goto err;
@@ -529,12 +530,29 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
closure_sync(cl);
}
-void bch_prio_write(struct cache *ca)
+int bch_prio_write(struct cache *ca, bool wait)
{
int i;
struct bucket *b;
struct closure cl;
+ pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu",
+ fifo_used(&ca->free[RESERVE_PRIO]),
+ fifo_used(&ca->free[RESERVE_NONE]),
+ fifo_used(&ca->free_inc));
+
+ /*
+ * Pre-check if there are enough free buckets. In the non-blocking
+ * scenario it's better to fail early rather than starting to allocate
+ * buckets and do a cleanup later in case of failure.
+ */
+ if (!wait) {
+ size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
+ fifo_used(&ca->free[RESERVE_NONE]);
+ if (prio_buckets(ca) > avail)
+ return -ENOMEM;
+ }
+
closure_init_stack(&cl);
lockdep_assert_held(&ca->set->bucket_lock);
@@ -544,9 +562,6 @@ void bch_prio_write(struct cache *ca)
atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
&ca->meta_sectors_written);
- //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
- // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
-
for (i = prio_buckets(ca) - 1; i >= 0; --i) {
long bucket;
struct prio_set *p = ca->disk_buckets;
@@ -564,7 +579,7 @@ void bch_prio_write(struct cache *ca)
p->magic = pset_magic(&ca->sb);
p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
- bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
+ bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
@@ -593,6 +608,7 @@ void bch_prio_write(struct cache *ca)
ca->prio_last_buckets[i] = ca->prio_buckets[i];
}
+ return 0;
}
static void prio_read(struct cache *ca, uint64_t bucket)
@@ -761,20 +777,28 @@ static inline int idx_to_first_minor(int idx)
static void bcache_device_free(struct bcache_device *d)
{
+ struct gendisk *disk = d->disk;
+
lockdep_assert_held(&bch_register_lock);
- pr_info("%s stopped", d->disk->disk_name);
+ if (disk)
+ pr_info("%s stopped", disk->disk_name);
+ else
+ pr_err("bcache device (NULL gendisk) stopped");
if (d->c)
bcache_device_detach(d);
- if (d->disk && d->disk->flags & GENHD_FL_UP)
- del_gendisk(d->disk);
- if (d->disk && d->disk->queue)
- blk_cleanup_queue(d->disk->queue);
- if (d->disk) {
+
+ if (disk) {
+ if (disk->flags & GENHD_FL_UP)
+ del_gendisk(disk);
+
+ if (disk->queue)
+ blk_cleanup_queue(disk->queue);
+
ida_simple_remove(&bcache_device_idx,
- first_minor_to_idx(d->disk->first_minor));
- put_disk(d->disk);
+ first_minor_to_idx(disk->first_minor));
+ put_disk(disk);
}
bioset_exit(&d->bio_split);
@@ -1769,6 +1793,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
sema_init(&c->sb_write_mutex, 1);
mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->btree_cache_wait);
+ spin_lock_init(&c->btree_cannibalize_lock);
init_waitqueue_head(&c->bucket_wait);
init_waitqueue_head(&c->gc_wait);
sema_init(&c->uuid_write_mutex, 1);
@@ -1809,6 +1834,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->congested_read_threshold_us = 2000;
c->congested_write_threshold_us = 20000;
c->error_limit = DEFAULT_IO_ERROR_LIMIT;
+ c->idle_max_writeback_rate_enabled = 1;
WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
return c;
@@ -1954,7 +1980,7 @@ static int run_cache_set(struct cache_set *c)
mutex_lock(&c->bucket_lock);
for_each_cache(ca, c, i)
- bch_prio_write(ca);
+ bch_prio_write(ca, true);
mutex_unlock(&c->bucket_lock);
err = "cannot allocate new UUID bucket";
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 627dcea0f5b6..733e2ddf3c78 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -134,6 +134,7 @@ rw_attribute(expensive_debug_checks);
rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled);
rw_attribute(copy_gc_enabled);
+rw_attribute(idle_max_writeback_rate);
rw_attribute(gc_after_writeback);
rw_attribute(size);
@@ -747,6 +748,8 @@ SHOW(__bch_cache_set)
sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
+ sysfs_printf(idle_max_writeback_rate, "%i",
+ c->idle_max_writeback_rate_enabled);
sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
sysfs_printf(io_disable, "%i",
test_bit(CACHE_SET_IO_DISABLE, &c->flags));
@@ -864,6 +867,9 @@ STORE(__bch_cache_set)
sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite);
sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled);
+ sysfs_strtoul_bool(idle_max_writeback_rate,
+ c->idle_max_writeback_rate_enabled);
+
/*
* write gc_after_writeback here may overwrite an already set
* BCH_DO_AUTO_GC, it doesn't matter because this flag will be
@@ -954,6 +960,7 @@ static struct attribute *bch_cache_set_internal_files[] = {
&sysfs_gc_always_rewrite,
&sysfs_btree_shrinker_disabled,
&sysfs_copy_gc_enabled,
+ &sysfs_idle_max_writeback_rate,
&sysfs_gc_after_writeback,
&sysfs_io_disable,
&sysfs_cutoff_writeback,
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index d60268fe49e1..4a40f9eadeaf 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -122,6 +122,10 @@ static void __update_writeback_rate(struct cached_dev *dc)
static bool set_at_max_writeback_rate(struct cache_set *c,
struct cached_dev *dc)
{
+ /* Don't sst max writeback rate if it is disabled */
+ if (!c->idle_max_writeback_rate_enabled)
+ return false;
+
/* Don't set max writeback rate if gc is running */
if (!c->gc_mark_valid)
return false;
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index b5389890bbc3..1f8f98efd97a 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -150,11 +150,10 @@ static int bio_detain(struct dm_bio_prison *prison,
struct dm_bio_prison_cell **cell_result)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -198,11 +197,9 @@ void dm_cell_release(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell,
struct bio_list *bios)
{
- unsigned long flags;
-
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
__cell_release(prison, cell, bios);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_release);
@@ -250,12 +247,10 @@ void dm_cell_visit_release(struct dm_bio_prison *prison,
void *context,
struct dm_bio_prison_cell *cell)
{
- unsigned long flags;
-
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
visit_fn(context, cell);
rb_erase(&cell->node, &prison->cells);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_visit_release);
@@ -275,11 +270,10 @@ int dm_cell_promote_or_release(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __promote_or_release(prison, cell);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -379,10 +373,9 @@ EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
int r = 1;
- unsigned long flags;
unsigned next_entry;
- spin_lock_irqsave(&ds->lock, flags);
+ spin_lock_irq(&ds->lock);
if ((ds->sweeper == ds->current_entry) &&
!ds->entries[ds->current_entry].count)
r = 0;
@@ -392,7 +385,7 @@ int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
if (!ds->entries[next_entry].count)
ds->current_entry = next_entry;
}
- spin_unlock_irqrestore(&ds->lock, flags);
+ spin_unlock_irq(&ds->lock);
return r;
}
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index b092cdc8e1ae..8ee019eda32d 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -177,11 +177,10 @@ bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 **cell_result)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -261,11 +260,10 @@ int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 **cell_result)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -285,11 +283,9 @@ void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
struct work_struct *continuation)
{
- unsigned long flags;
-
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
__quiesce(prison, cell, continuation);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
@@ -309,11 +305,10 @@ int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
unsigned new_lock_level)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __promote(prison, cell, new_lock_level);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -342,11 +337,10 @@ bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
struct bio_list *bios)
{
bool r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __unlock(prison, cell, bios);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 8346e6d1816c..2d32821b3a5b 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -74,22 +74,19 @@ static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
{
bool r;
- unsigned long flags;
- spin_lock_irqsave(&iot->lock, flags);
+ spin_lock_irq(&iot->lock);
r = __iot_idle_for(iot, jifs);
- spin_unlock_irqrestore(&iot->lock, flags);
+ spin_unlock_irq(&iot->lock);
return r;
}
static void iot_io_begin(struct io_tracker *iot, sector_t len)
{
- unsigned long flags;
-
- spin_lock_irqsave(&iot->lock, flags);
+ spin_lock_irq(&iot->lock);
iot->in_flight += len;
- spin_unlock_irqrestore(&iot->lock, flags);
+ spin_unlock_irq(&iot->lock);
}
static void __iot_io_end(struct io_tracker *iot, sector_t len)
@@ -172,7 +169,6 @@ static void __commit(struct work_struct *_ws)
{
struct batcher *b = container_of(_ws, struct batcher, commit_work);
blk_status_t r;
- unsigned long flags;
struct list_head work_items;
struct work_struct *ws, *tmp;
struct continuation *k;
@@ -186,12 +182,12 @@ static void __commit(struct work_struct *_ws)
* We have to grab these before the commit_op to avoid a race
* condition.
*/
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
list_splice_init(&b->work_items, &work_items);
bio_list_merge(&bios, &b->bios);
bio_list_init(&b->bios);
b->commit_scheduled = false;
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
r = b->commit_op(b->commit_context);
@@ -238,13 +234,12 @@ static void async_commit(struct batcher *b)
static void continue_after_commit(struct batcher *b, struct continuation *k)
{
- unsigned long flags;
bool commit_scheduled;
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
commit_scheduled = b->commit_scheduled;
list_add_tail(&k->ws.entry, &b->work_items);
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
if (commit_scheduled)
async_commit(b);
@@ -255,13 +250,12 @@ static void continue_after_commit(struct batcher *b, struct continuation *k)
*/
static void issue_after_commit(struct batcher *b, struct bio *bio)
{
- unsigned long flags;
bool commit_scheduled;
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
commit_scheduled = b->commit_scheduled;
bio_list_add(&b->bios, bio);
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
if (commit_scheduled)
async_commit(b);
@@ -273,12 +267,11 @@ static void issue_after_commit(struct batcher *b, struct bio *bio)
static void schedule_commit(struct batcher *b)
{
bool immediate;
- unsigned long flags;
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
b->commit_scheduled = true;
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
if (immediate)
async_commit(b);
@@ -630,23 +623,19 @@ static struct per_bio_data *init_per_bio_data(struct bio *bio)
static void defer_bio(struct cache *cache, struct bio *bio)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
bio_list_add(&cache->deferred_bios, bio);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
wake_deferred_bio_worker(cache);
}
static void defer_bios(struct cache *cache, struct bio_list *bios)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
bio_list_merge(&cache->deferred_bios, bios);
bio_list_init(bios);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
wake_deferred_bio_worker(cache);
}
@@ -756,33 +745,27 @@ static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
static void set_discard(struct cache *cache, dm_dblock_t b)
{
- unsigned long flags;
-
BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
atomic_inc(&cache->stats.discard_count);
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
set_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
}
static void clear_discard(struct cache *cache, dm_dblock_t b)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
clear_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
}
static bool is_discarded(struct cache *cache, dm_dblock_t b)
{
int r;
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
return r;
}
@@ -790,12 +773,10 @@ static bool is_discarded(struct cache *cache, dm_dblock_t b)
static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
{
int r;
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
return r;
}
@@ -827,17 +808,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{
- unsigned long flags;
struct per_bio_data *pb;
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
bio_op(bio) != REQ_OP_DISCARD) {
pb = get_per_bio_data(bio);
pb->tick = true;
cache->need_tick_bio = false;
}
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
}
static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
@@ -1889,17 +1869,16 @@ static void process_deferred_bios(struct work_struct *ws)
{
struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
- unsigned long flags;
bool commit_needed = false;
struct bio_list bios;
struct bio *bio;
bio_list_init(&bios);
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
bio_list_merge(&bios, &cache->deferred_bios);
bio_list_init(&cache->deferred_bios);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
while ((bio = bio_list_pop(&bios))) {
if (bio->bi_opf & REQ_PREFLUSH)
diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
index 6bc8c1d1c351..08c552e5e41b 100644
--- a/drivers/md/dm-clone-metadata.c
+++ b/drivers/md/dm-clone-metadata.c
@@ -712,7 +712,7 @@ static int __metadata_commit(struct dm_clone_metadata *cmd)
static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
{
int r;
- unsigned long word, flags;
+ unsigned long word;
word = 0;
do {
@@ -736,9 +736,9 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
return r;
/* Update the changed flag */
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
dmap->changed = 0;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
return 0;
}
@@ -746,7 +746,6 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
{
int r = -EPERM;
- unsigned long flags;
struct dirty_map *dmap, *next_dmap;
down_write(&cmd->lock);
@@ -770,9 +769,9 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
}
/* Swap dirty bitmaps */
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
cmd->current_dmap = next_dmap;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
/*
* No one is accessing the old dirty bitmap anymore, so we can flush
@@ -817,9 +816,9 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
{
int r = 0;
struct dirty_map *dmap;
- unsigned long word, region_nr, flags;
+ unsigned long word, region_nr;
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
if (cmd->read_only) {
r = -EPERM;
@@ -836,7 +835,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
}
}
out:
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
return r;
}
@@ -903,13 +902,11 @@ out:
void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
{
- unsigned long flags;
-
down_write(&cmd->lock);
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
cmd->read_only = 1;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
if (!cmd->fail_io)
dm_bm_set_read_only(cmd->bm);
@@ -919,13 +916,11 @@ void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
void dm_clone_metadata_set_read_write(struct dm_clone_metadata *cmd)
{
- unsigned long flags;
-
down_write(&cmd->lock);
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
cmd->read_only = 0;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
if (!cmd->fail_io)
dm_bm_set_read_write(cmd->bm);
diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h
index 434bff08508b..3fe50a781c11 100644
--- a/drivers/md/dm-clone-metadata.h
+++ b/drivers/md/dm-clone-metadata.h
@@ -44,7 +44,9 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
* @start: Starting region number
* @nr_regions: Number of regions in the range
*
- * This function doesn't block, so it's safe to call it from interrupt context.
+ * This function doesn't block, but since it uses spin_lock_irq()/spin_unlock_irq()
+ * it's NOT safe to call it from any context where interrupts are disabled, e.g.,
+ * from interrupt context.
*/
int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
unsigned long nr_regions);
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 4ca8f1977222..b3d89072d21c 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -332,8 +332,6 @@ static void submit_bios(struct bio_list *bios)
*/
static void issue_bio(struct clone *clone, struct bio *bio)
{
- unsigned long flags;
-
if (!bio_triggers_commit(clone, bio)) {
generic_make_request(bio);
return;
@@ -352,9 +350,9 @@ static void issue_bio(struct clone *clone, struct bio *bio)
* Batch together any bios that trigger commits and then issue a single
* commit for them in process_deferred_flush_bios().
*/
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_add(&clone->deferred_flush_bios, bio);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
wake_worker(clone);
}
@@ -469,7 +467,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
static void process_discard_bio(struct clone *clone, struct bio *bio)
{
- unsigned long rs, re, flags;
+ unsigned long rs, re;
bio_region_range(clone, bio, &rs, &re);
BUG_ON(re > clone->nr_regions);
@@ -501,9 +499,9 @@ static void process_discard_bio(struct clone *clone, struct bio *bio)
/*
* Defer discard processing.
*/
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_add(&clone->deferred_discard_bios, bio);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
wake_worker(clone);
}
@@ -554,6 +552,12 @@ struct hash_table_bucket {
#define bucket_unlock_irqrestore(bucket, flags) \
spin_unlock_irqrestore(&(bucket)->lock, flags)
+#define bucket_lock_irq(bucket) \
+ spin_lock_irq(&(bucket)->lock)
+
+#define bucket_unlock_irq(bucket) \
+ spin_unlock_irq(&(bucket)->lock)
+
static int hash_table_init(struct clone *clone)
{
unsigned int i, sz;
@@ -851,7 +855,6 @@ static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio
*/
static void hydrate_bio_region(struct clone *clone, struct bio *bio)
{
- unsigned long flags;
unsigned long region_nr;
struct hash_table_bucket *bucket;
struct dm_clone_region_hydration *hd, *hd2;
@@ -859,19 +862,19 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
region_nr = bio_to_region(clone, bio);
bucket = get_hash_table_bucket(clone, region_nr);
- bucket_lock_irqsave(bucket, flags);
+ bucket_lock_irq(bucket);
hd = __hash_find(bucket, region_nr);
if (hd) {
/* Someone else is hydrating the region */
bio_list_add(&hd->deferred_bios, bio);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
return;
}
if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
/* The region has been hydrated */
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
issue_bio(clone, bio);
return;
}
@@ -880,16 +883,16 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
* We must allocate a hydration descriptor and start the hydration of
* the corresponding region.
*/
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
hd = alloc_hydration(clone);
hydration_init(hd, region_nr);
- bucket_lock_irqsave(bucket, flags);
+ bucket_lock_irq(bucket);
/* Check if the region has been hydrated in the meantime. */
if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
free_hydration(hd);
issue_bio(clone, bio);
return;
@@ -899,7 +902,7 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
if (hd2 != hd) {
/* Someone else started the region's hydration. */
bio_list_add(&hd2->deferred_bios, bio);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
free_hydration(hd);
return;
}
@@ -911,7 +914,7 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
*/
if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
hlist_del(&hd->h);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
free_hydration(hd);
bio_io_error(bio);
return;
@@ -925,11 +928,11 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
* to the destination device.
*/
if (is_overwrite_bio(clone, bio)) {
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
hydration_overwrite(hd, bio);
} else {
bio_list_add(&hd->deferred_bios, bio);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
hydration_copy(hd, 1);
}
}
@@ -996,7 +999,6 @@ static unsigned long __start_next_hydration(struct clone *clone,
unsigned long offset,
struct batch_info *batch)
{
- unsigned long flags;
struct hash_table_bucket *bucket;
struct dm_clone_region_hydration *hd;
unsigned long nr_regions = clone->nr_regions;
@@ -1010,13 +1012,13 @@ static unsigned long __start_next_hydration(struct clone *clone,
break;
bucket = get_hash_table_bucket(clone, offset);
- bucket_lock_irqsave(bucket, flags);
+ bucket_lock_irq(bucket);
if (!dm_clone_is_region_hydrated(clone->cmd, offset) &&
!__hash_find(bucket, offset)) {
hydration_init(hd, offset);
__insert_region_hydration(bucket, hd);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
/* Batch hydration */
__batch_hydration(batch, hd);
@@ -1024,7 +1026,7 @@ static unsigned long __start_next_hydration(struct clone *clone,
return (offset + 1);
}
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
} while (++offset < nr_regions);
@@ -1140,13 +1142,13 @@ static void process_deferred_discards(struct clone *clone)
int r = -EPERM;
struct bio *bio;
struct blk_plug plug;
- unsigned long rs, re, flags;
+ unsigned long rs, re;
struct bio_list discards = BIO_EMPTY_LIST;
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_merge(&discards, &clone->deferred_discard_bios);
bio_list_init(&clone->deferred_discard_bios);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
if (bio_list_empty(&discards))
return;
@@ -1176,13 +1178,12 @@ out:
static void process_deferred_bios(struct clone *clone)
{
- unsigned long flags;
struct bio_list bios = BIO_EMPTY_LIST;
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_merge(&bios, &clone->deferred_bios);
bio_list_init(&clone->deferred_bios);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios))
return;
@@ -1193,7 +1194,6 @@ static void process_deferred_bios(struct clone *clone)
static void process_deferred_flush_bios(struct clone *clone)
{
struct bio *bio;
- unsigned long flags;
struct bio_list bios = BIO_EMPTY_LIST;
struct bio_list bio_completions = BIO_EMPTY_LIST;
@@ -1201,13 +1201,13 @@ static void process_deferred_flush_bios(struct clone *clone)
* If there are any deferred flush bios, we must commit the metadata
* before issuing them or signaling their completion.
*/
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_merge(&bios, &clone->deferred_flush_bios);
bio_list_init(&clone->deferred_flush_bios);
bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
bio_list_init(&clone->deferred_flush_completions);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
!(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f87f6495652f..eb9782fc93fe 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2700,21 +2700,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io/%s",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
- 1, devname);
+ cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd/%s",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1, devname);
else
cc->crypt_queue = alloc_workqueue("kcryptd/%s",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
num_online_cpus(), devname);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 8288887b7f94..eb37584427a4 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -17,6 +17,7 @@
struct badblock {
struct rb_node node;
sector_t bb;
+ unsigned char wr_fail_cnt;
};
struct dust_device {
@@ -101,7 +102,8 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
return 0;
}
-static int dust_add_block(struct dust_device *dd, unsigned long long block)
+static int dust_add_block(struct dust_device *dd, unsigned long long block,
+ unsigned char wr_fail_cnt)
{
struct badblock *bblock;
unsigned long flags;
@@ -115,6 +117,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
spin_lock_irqsave(&dd->dust_lock, flags);
bblock->bb = block;
+ bblock->wr_fail_cnt = wr_fail_cnt;
if (!dust_rb_insert(&dd->badblocklist, bblock)) {
if (!dd->quiet_mode) {
DMERR("%s: block %llu already in badblocklist",
@@ -126,8 +129,10 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
}
dd->badblock_count++;
- if (!dd->quiet_mode)
- DMINFO("%s: badblock added at block %llu", __func__, block);
+ if (!dd->quiet_mode) {
+ DMINFO("%s: badblock added at block %llu with write fail count %hhu",
+ __func__, block, wr_fail_cnt);
+ }
spin_unlock_irqrestore(&dd->dust_lock, flags);
return 0;
@@ -163,22 +168,27 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
bool fail_read_on_bb)
{
unsigned long flags;
- int ret = DM_MAPIO_REMAPPED;
+ int r = DM_MAPIO_REMAPPED;
if (fail_read_on_bb) {
thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
- ret = __dust_map_read(dd, thisblock);
+ r = __dust_map_read(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
}
- return ret;
+ return r;
}
-static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
+static int __dust_map_write(struct dust_device *dd, sector_t thisblock)
{
struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
+ if (bblk && bblk->wr_fail_cnt > 0) {
+ bblk->wr_fail_cnt--;
+ return DM_MAPIO_KILL;
+ }
+
if (bblk) {
rb_erase(&bblk->node, &dd->badblocklist);
dd->badblock_count--;
@@ -189,37 +199,40 @@ static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
(unsigned long long)thisblock);
}
}
+
+ return DM_MAPIO_REMAPPED;
}
static int dust_map_write(struct dust_device *dd, sector_t thisblock,
bool fail_read_on_bb)
{
unsigned long flags;
+ int ret = DM_MAPIO_REMAPPED;
if (fail_read_on_bb) {
thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
- __dust_map_write(dd, thisblock);
+ ret = __dust_map_write(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
}
- return DM_MAPIO_REMAPPED;
+ return ret;
}
static int dust_map(struct dm_target *ti, struct bio *bio)
{
struct dust_device *dd = ti->private;
- int ret;
+ int r;
bio_set_dev(bio, dd->dev->bdev);
bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
if (bio_data_dir(bio) == READ)
- ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+ r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
else
- ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+ r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
- return ret;
+ return r;
}
static bool __dust_clear_badblocks(struct rb_root *tree,
@@ -375,8 +388,10 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
struct dust_device *dd = ti->private;
sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
bool invalid_msg = false;
- int result = -EINVAL;
+ int r = -EINVAL;
unsigned long long tmp, block;
+ unsigned char wr_fail_cnt;
+ unsigned int tmp_ui;
unsigned long flags;
char dummy;
@@ -388,45 +403,69 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
} else if (!strcasecmp(argv[0], "disable")) {
DMINFO("disabling read failures on bad sectors");
dd->fail_read_on_bb = false;
- result = 0;
+ r = 0;
} else if (!strcasecmp(argv[0], "enable")) {
DMINFO("enabling read failures on bad sectors");
dd->fail_read_on_bb = true;
- result = 0;
+ r = 0;
} else if (!strcasecmp(argv[0], "countbadblocks")) {
spin_lock_irqsave(&dd->dust_lock, flags);
DMINFO("countbadblocks: %llu badblock(s) found",
dd->badblock_count);
spin_unlock_irqrestore(&dd->dust_lock, flags);
- result = 0;
+ r = 0;
} else if (!strcasecmp(argv[0], "clearbadblocks")) {
- result = dust_clear_badblocks(dd);
+ r = dust_clear_badblocks(dd);
} else if (!strcasecmp(argv[0], "quiet")) {
if (!dd->quiet_mode)
dd->quiet_mode = true;
else
dd->quiet_mode = false;
- result = 0;
+ r = 0;
} else {
invalid_msg = true;
}
} else if (argc == 2) {
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
- return result;
+ return r;
block = tmp;
sector_div(size, dd->sect_per_block);
if (block > size) {
DMERR("selected block value out of range");
- return result;
+ return r;
}
if (!strcasecmp(argv[0], "addbadblock"))
- result = dust_add_block(dd, block);
+ r = dust_add_block(dd, block, 0);
else if (!strcasecmp(argv[0], "removebadblock"))
- result = dust_remove_block(dd, block);
+ r = dust_remove_block(dd, block);
else if (!strcasecmp(argv[0], "queryblock"))
- result = dust_query_block(dd, block);
+ r = dust_query_block(dd, block);
+ else
+ invalid_msg = true;
+
+ } else if (argc == 3) {
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
+ return r;
+
+ if (sscanf(argv[2], "%u%c", &tmp_ui, &dummy) != 1)
+ return r;
+
+ block = tmp;
+ if (tmp_ui > 255) {
+ DMERR("selected write fail count out of range");
+ return r;
+ }
+ wr_fail_cnt = tmp_ui;
+ sector_div(size, dd->sect_per_block);
+ if (block > size) {
+ DMERR("selected block value out of range");
+ return r;
+ }
+
+ if (!strcasecmp(argv[0], "addbadblock"))
+ r = dust_add_block(dd, block, wr_fail_cnt);
else
invalid_msg = true;
@@ -436,7 +475,7 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
if (invalid_msg)
DMERR("unrecognized message '%s' received", argv[0]);
- return result;
+ return r;
}
static void dust_status(struct dm_target *ti, status_type_t type,
@@ -499,12 +538,12 @@ static struct target_type dust_target = {
static int __init dm_dust_init(void)
{
- int result = dm_register_target(&dust_target);
+ int r = dm_register_target(&dust_target);
- if (result < 0)
- DMERR("dm_register_target failed %d", result);
+ if (r < 0)
+ DMERR("dm_register_target failed %d", r);
- return result;
+ return r;
}
static void __exit dm_dust_exit(void)
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 2900fbde89b3..a2cc9e45cbba 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -280,7 +280,7 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
struct flakey_c *fc = ti->private;
bio_set_dev(bio, fc->dev->bdev);
- if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
bio->bi_iter.bi_sector =
flakey_map_sector(ti, bio->bi_iter.bi_sector);
}
@@ -322,8 +322,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
pb->bio_submitted = false;
- /* Do not fail reset zone */
- if (bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (op_is_zone_mgmt(bio_op(bio)))
goto map_bio;
/* Are we alive ? */
@@ -384,7 +383,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
struct flakey_c *fc = ti->private;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
- if (bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (op_is_zone_mgmt(bio_op(bio)))
return DM_ENDIO_DONE;
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
@@ -460,21 +459,15 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
}
#ifdef CONFIG_BLK_DEV_ZONED
-static int flakey_report_zones(struct dm_target *ti, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+static int flakey_report_zones(struct dm_target *ti,
+ struct dm_report_zones_args *args, unsigned int nr_zones)
{
struct flakey_c *fc = ti->private;
- int ret;
+ sector_t sector = flakey_map_sector(ti, args->next_sector);
- /* Do report and remap it */
- ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector),
- zones, nr_zones);
- if (ret != 0)
- return ret;
-
- if (*nr_zones)
- dm_remap_zone_report(ti, fc->start, zones, nr_zones);
- return 0;
+ args->start = fc->start;
+ return blkdev_report_zones(fc->dev->bdev, sector, nr_zones,
+ dm_report_zones_cb, args);
}
#endif
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index dab4446fe7d8..b225b3e445fa 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -53,6 +53,7 @@
#define SB_VERSION_1 1
#define SB_VERSION_2 2
#define SB_VERSION_3 3
+#define SB_VERSION_4 4
#define SB_SECTORS 8
#define MAX_SECTORS_PER_BLOCK 8
@@ -73,6 +74,7 @@ struct superblock {
#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
#define SB_FLAG_RECALCULATING 0x2
#define SB_FLAG_DIRTY_BITMAP 0x4
+#define SB_FLAG_FIXED_PADDING 0x8
#define JOURNAL_ENTRY_ROUNDUP 8
@@ -250,6 +252,7 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
+ bool fix_padding;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
@@ -463,7 +466,9 @@ static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
static void sb_set_version(struct dm_integrity_c *ic)
{
- if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
+ ic->sb->version = SB_VERSION_4;
+ else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
ic->sb->version = SB_VERSION_3;
else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
ic->sb->version = SB_VERSION_2;
@@ -2955,6 +2960,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
+ arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
ic->tag_size, ic->mode, arg_count);
if (ic->meta_dev)
@@ -2974,6 +2980,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
}
+ if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
+ DMEMIT(" fix_padding");
#define EMIT_ALG(a, n) \
do { \
@@ -3042,8 +3050,14 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
if (!ic->meta_dev) {
sector_t last_sector, last_area, last_offset;
- ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
- (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
+ /* we have to maintain excessive padding for compatibility with existing volumes */
+ __u64 metadata_run_padding =
+ ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
+ (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
+ (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
+
+ ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
+ metadata_run_padding) >> SECTOR_SHIFT;
if (!(ic->metadata_run & (ic->metadata_run - 1)))
ic->log2_metadata_run = __ffs(ic->metadata_run);
else
@@ -3086,6 +3100,8 @@ static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sec
journal_sections = 1;
if (!ic->meta_dev) {
+ if (ic->fix_padding)
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
ic->sb->journal_sections = cpu_to_le32(journal_sections);
if (!interleave_sectors)
interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
@@ -3725,6 +3741,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
} else if (!strcmp(opt_string, "recalculate")) {
ic->recalculate_flag = true;
+ } else if (!strcmp(opt_string, "fix_padding")) {
+ ic->fix_padding = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -3867,7 +3885,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
should_write_sb = true;
}
- if (!ic->sb->version || ic->sb->version > SB_VERSION_3) {
+ if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
r = -EINVAL;
ti->error = "Unknown version";
goto bad;
@@ -4182,7 +4200,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index ecefe6703736..8d07fdf63a47 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -90,7 +90,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
struct linear_c *lc = ti->private;
bio_set_dev(bio, lc->dev->bdev);
- if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
bio->bi_iter.bi_sector =
linear_map_sector(ti, bio->bi_iter.bi_sector);
}
@@ -136,21 +136,15 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
}
#ifdef CONFIG_BLK_DEV_ZONED
-static int linear_report_zones(struct dm_target *ti, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+static int linear_report_zones(struct dm_target *ti,
+ struct dm_report_zones_args *args, unsigned int nr_zones)
{
- struct linear_c *lc = (struct linear_c *) ti->private;
- int ret;
-
- /* Do report and remap it */
- ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
- zones, nr_zones);
- if (ret != 0)
- return ret;
+ struct linear_c *lc = ti->private;
+ sector_t sector = linear_map_sector(ti, args->next_sector);
- if (*nr_zones)
- dm_remap_zone_report(ti, lc->start, zones, nr_zones);
- return 0;
+ args->start = lc->start;
+ return blkdev_report_zones(lc->dev->bdev, sector, nr_zones,
+ dm_report_zones_cb, args);
}
#endif
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b0aa595e4375..c412eaa975fc 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -209,6 +209,7 @@ struct raid_dev {
#define RT_FLAG_RS_SUSPENDED 5
#define RT_FLAG_RS_IN_SYNC 6
#define RT_FLAG_RS_RESYNCING 7
+#define RT_FLAG_RS_GROW 8
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -241,6 +242,9 @@ struct raid_set {
struct raid_type *raid_type;
struct dm_target_callbacks callbacks;
+ sector_t array_sectors;
+ sector_t dev_sectors;
+
/* Optional raid4/5/6 journal device */
struct journal_dev {
struct dm_dev *dev;
@@ -616,7 +620,6 @@ static int raid10_format_to_md_layout(struct raid_set *rs,
} else if (algorithm == ALGORITHM_RAID10_FAR) {
f = copies;
- r = !RAID10_OFFSET;
if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
r |= RAID10_USE_FAR_SETS;
@@ -1615,13 +1618,12 @@ static int _check_data_dev_sectors(struct raid_set *rs)
}
/* Calculate the sectors per device and per array used for @rs */
-static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
+static int rs_set_dev_and_array_sectors(struct raid_set *rs, sector_t sectors, bool use_mddev)
{
int delta_disks;
unsigned int data_stripes;
+ sector_t array_sectors = sectors, dev_sectors = sectors;
struct mddev *mddev = &rs->md;
- struct md_rdev *rdev;
- sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
if (use_mddev) {
delta_disks = mddev->delta_disks;
@@ -1656,12 +1658,9 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
/* Striped layouts */
array_sectors = (data_stripes + delta_disks) * dev_sectors;
- rdev_for_each(rdev, mddev)
- if (!test_bit(Journal, &rdev->flags))
- rdev->sectors = dev_sectors;
-
mddev->array_sectors = array_sectors;
mddev->dev_sectors = dev_sectors;
+ rs_set_rdev_sectors(rs);
return _check_data_dev_sectors(rs);
bad:
@@ -1670,7 +1669,7 @@ bad:
}
/* Setup recovery on @rs */
-static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
+static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
{
/* raid0 does not recover */
if (rs_is_raid0(rs))
@@ -1691,22 +1690,6 @@ static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
? MaxSector : dev_sectors;
}
-/* Setup recovery on @rs based on raid type, device size and 'nosync' flag */
-static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
-{
- if (!dev_sectors)
- /* New raid set or 'sync' flag provided */
- __rs_setup_recovery(rs, 0);
- else if (dev_sectors == MaxSector)
- /* Prevent recovery */
- __rs_setup_recovery(rs, MaxSector);
- else if (__rdev_sectors(rs) < dev_sectors)
- /* Grown raid set */
- __rs_setup_recovery(rs, __rdev_sectors(rs));
- else
- __rs_setup_recovery(rs, MaxSector);
-}
-
static void do_table_event(struct work_struct *ws)
{
struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
@@ -2474,7 +2457,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
return -EINVAL;
}
- /* Enable bitmap creation for RAID levels != 0 */
+ /* Enable bitmap creation on @rs unless no metadevs or raid0 or journaled raid4/5/6 set. */
mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
@@ -2911,7 +2894,7 @@ static int rs_setup_reshape(struct raid_set *rs)
/* Remove disk(s) */
} else if (rs->delta_disks < 0) {
- r = rs_set_dev_and_array_sectors(rs, true);
+ r = rs_set_dev_and_array_sectors(rs, rs->ti->len, true);
mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */
/* Change layout and/or chunk size */
@@ -3008,7 +2991,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bool resize = false;
struct raid_type *rt;
unsigned int num_raid_params, num_raid_devs;
- sector_t calculated_dev_sectors, rdev_sectors, reshape_sectors;
+ sector_t sb_array_sectors, rdev_sectors, reshape_sectors;
struct raid_set *rs = NULL;
const char *arg;
struct rs_layout rs_layout;
@@ -3067,11 +3050,13 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
*
* Any existing superblock will overwrite the array and device sizes
*/
- r = rs_set_dev_and_array_sectors(rs, false);
+ r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
if (r)
goto bad;
- calculated_dev_sectors = rs->md.dev_sectors;
+ /* Memorize just calculated, potentially larger sizes to grow the raid set in preresume */
+ rs->array_sectors = rs->md.array_sectors;
+ rs->dev_sectors = rs->md.dev_sectors;
/*
* Backup any new raid set level, layout, ...
@@ -3084,6 +3069,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
+ /* All in-core metadata now as of current superblocks after calling analyse_superblocks() */
+ sb_array_sectors = rs->md.array_sectors;
rdev_sectors = __rdev_sectors(rs);
if (!rdev_sectors) {
ti->error = "Invalid rdev size";
@@ -3093,8 +3080,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
reshape_sectors = _get_reshape_sectors(rs);
- if (calculated_dev_sectors != rdev_sectors)
- resize = calculated_dev_sectors != (reshape_sectors ? rdev_sectors - reshape_sectors : rdev_sectors);
+ if (rs->dev_sectors != rdev_sectors) {
+ resize = (rs->dev_sectors != rdev_sectors - reshape_sectors);
+ if (rs->dev_sectors > rdev_sectors - reshape_sectors)
+ set_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
+ }
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
@@ -3121,13 +3111,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
rs_set_new(rs);
} else if (rs_is_recovering(rs)) {
- /* Rebuild particular devices */
- if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
- set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
- rs_setup_recovery(rs, MaxSector);
- }
/* A recovering raid set may be resized */
- ; /* skip setup rs */
+ goto size_check;
} else if (rs_is_reshaping(rs)) {
/* Have to reject size change request during reshape */
if (resize) {
@@ -3171,6 +3156,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
rs_setup_recovery(rs, MaxSector);
rs_set_new(rs);
} else if (rs_reshape_requested(rs)) {
+ /* Only request grow on raid set size extensions, not on reshapes. */
+ clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
+
/*
* No need to check for 'ongoing' takeover here, because takeover
* is an instant operation as oposed to an ongoing reshape.
@@ -3201,13 +3189,31 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
rs_set_cur(rs);
} else {
+size_check:
/* May not set recovery when a device rebuild is requested */
if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
- rs_setup_recovery(rs, MaxSector);
+ clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
- } else
- rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ?
- 0 : (resize ? calculated_dev_sectors : MaxSector));
+ rs_setup_recovery(rs, MaxSector);
+ } else if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
+ /*
+ * Set raid set to current size, i.e. size as of
+ * superblocks to grow to larger size in preresume.
+ */
+ r = rs_set_dev_and_array_sectors(rs, sb_array_sectors, false);
+ if (r)
+ goto bad;
+
+ rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors);
+ } else {
+ /* This is no size change or it is shrinking, update size and record in superblocks */
+ r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
+ if (r)
+ goto bad;
+
+ if (sb_array_sectors > rs->array_sectors)
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ }
rs_set_cur(rs);
}
@@ -3406,10 +3412,9 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
/* Helper to return resync/reshape progress for @rs and runtime flags for raid set in sync / resynching */
static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
- sector_t resync_max_sectors)
+ enum sync_state state, sector_t resync_max_sectors)
{
sector_t r;
- enum sync_state state;
struct mddev *mddev = &rs->md;
clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@@ -3420,8 +3425,6 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else {
- state = decipher_sync_action(mddev, recovery);
-
if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
r = mddev->recovery_cp;
else
@@ -3439,18 +3442,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
/*
* In case we are recovering, the array is not in sync
* and health chars should show the recovering legs.
+ *
+ * Already retrieved recovery offset from curr_resync_completed above.
*/
;
- else if (state == st_resync)
- /*
- * If "resync" is occurring, the raid set
- * is or may be out of sync hence the health
- * characters shall be 'a'.
- */
- set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
- else if (state == st_reshape)
+
+ else if (state == st_resync || state == st_reshape)
/*
- * If "reshape" is occurring, the raid set
+ * If "resync/reshape" is occurring, the raid set
* is or may be out of sync hence the health
* characters shall be 'a'.
*/
@@ -3464,22 +3463,22 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
*/
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
- else {
- struct md_rdev *rdev;
-
+ else if (test_bit(MD_RECOVERY_NEEDED, &recovery))
/*
* We are idle and recovery is needed, prevent 'A' chars race
* caused by components still set to in-sync by constructor.
*/
- if (test_bit(MD_RECOVERY_NEEDED, &recovery))
- set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+ else {
/*
- * The raid set may be doing an initial sync, or it may
- * be rebuilding individual components. If all the
- * devices are In_sync, then it is the raid set that is
- * being initialized.
+ * We are idle and the raid set may be doing an initial
+ * sync, or it may be rebuilding individual components.
+ * If all the devices are In_sync, then it is the raid set
+ * that is being initialized.
*/
+ struct md_rdev *rdev;
+
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags) &&
@@ -3512,7 +3511,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
unsigned int rebuild_disks;
unsigned int write_mostly_params = 0;
sector_t progress, resync_max_sectors, resync_mismatches;
- const char *sync_action;
+ enum sync_state state;
struct raid_type *rt;
switch (type) {
@@ -3526,14 +3525,14 @@ static void raid_status(struct dm_target *ti, status_type_t type,
/* Access most recent mddev properties for status output */
smp_rmb();
- recovery = rs->md.recovery;
/* Get sensible max sectors even if raid set not yet started */
resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
mddev->resync_max_sectors : mddev->dev_sectors;
- progress = rs_get_progress(rs, recovery, resync_max_sectors);
+ recovery = rs->md.recovery;
+ state = decipher_sync_action(mddev, recovery);
+ progress = rs_get_progress(rs, recovery, state, resync_max_sectors);
resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
atomic64_read(&mddev->resync_mismatches) : 0;
- sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
/* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
for (i = 0; i < rs->raid_disks; i++)
@@ -3561,7 +3560,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
* See Documentation/admin-guide/device-mapper/dm-raid.rst for
* information on each of these states.
*/
- DMEMIT(" %s", sync_action);
+ DMEMIT(" %s", sync_str(state));
/*
* v1.5.0+:
@@ -3955,11 +3954,22 @@ static int raid_preresume(struct dm_target *ti)
if (r)
return r;
- /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
- if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
- mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
- r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors,
- to_bytes(rs->requested_bitmap_chunk_sectors), 0);
+ /* We are extending the raid set size, adjust mddev/md_rdev sizes and set capacity. */
+ if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
+ mddev->array_sectors = rs->array_sectors;
+ mddev->dev_sectors = rs->dev_sectors;
+ rs_set_rdev_sectors(rs);
+ rs_set_capacity(rs);
+ }
+
+ /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
+ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
+ (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
+ (rs->requested_bitmap_chunk_sectors &&
+ mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
+ int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
+
+ r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, chunksize, 0);
if (r)
DMERR("Failed to resize bitmap");
}
@@ -3968,8 +3978,10 @@ static int raid_preresume(struct dm_target *ti)
/* Be prepared for mddev_resume() in raid_resume() */
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
- set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
mddev->resync_min = mddev->recovery_cp;
+ if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags))
+ mddev->resync_max_sectors = mddev->dev_sectors;
}
/* Check for any reshape request unless new raid set */
@@ -4017,7 +4029,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 14, 0},
+ .version = {1, 15, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 8547d7594338..63bbcc20f49a 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -55,19 +55,6 @@ static void trigger_event(struct work_struct *work)
dm_table_event(sc->ti->table);
}
-static inline struct stripe_c *alloc_context(unsigned int stripes)
-{
- size_t len;
-
- if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
- stripes))
- return NULL;
-
- len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
-
- return kmalloc(len, GFP_KERNEL);
-}
-
/*
* Parse a single <dev> <sector> pair
*/
@@ -142,7 +129,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
- sc = alloc_context(stripes);
+ sc = kmalloc(struct_size(sc, stripe, stripes), GFP_KERNEL);
if (!sc) {
ti->error = "Memory allocation for striped context "
"failed";
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 52e049554f5c..2ae0c1913766 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -918,21 +918,15 @@ bool dm_table_supports_dax(struct dm_table *t,
static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
-struct verify_rq_based_data {
- unsigned sq_count;
- unsigned mq_count;
-};
-
-static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
- struct verify_rq_based_data *v = data;
+ struct block_device *bdev = dev->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
- if (queue_is_mq(q))
- v->mq_count++;
- else
- v->sq_count++;
+ /* request-based cannot stack on partitions! */
+ if (bdev != bdev->bd_contains)
+ return false;
return queue_is_mq(q);
}
@@ -941,7 +935,6 @@ static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
struct dm_target *tgt;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@@ -1045,14 +1038,10 @@ verify_rq_based:
/* Non-request-stackable devices can't be used for request-based dm */
if (!tgt->type->iterate_devices ||
- !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
+ !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}
- if (v.sq_count > 0) {
- DMERR("table load rejected: not all devices are blk-mq request-stackable");
- return -EINVAL;
- }
return 0;
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index fcd887703f95..5a2c494cb552 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -609,13 +609,12 @@ static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
blk_status_t error)
{
struct bio_list bios;
- unsigned long flags;
bio_list_init(&bios);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
__merge_bio_list(&bios, master);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
error_bio_list(&bios, error);
}
@@ -623,15 +622,14 @@ static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
static void requeue_deferred_cells(struct thin_c *tc)
{
struct pool *pool = tc->pool;
- unsigned long flags;
struct list_head cells;
struct dm_bio_prison_cell *cell, *tmp;
INIT_LIST_HEAD(&cells);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_splice_init(&tc->deferred_cells, &cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
list_for_each_entry_safe(cell, tmp, &cells, user_list)
cell_requeue(pool, cell);
@@ -640,14 +638,13 @@ static void requeue_deferred_cells(struct thin_c *tc)
static void requeue_io(struct thin_c *tc)
{
struct bio_list bios;
- unsigned long flags;
bio_list_init(&bios);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
__merge_bio_list(&bios, &tc->deferred_bio_list);
__merge_bio_list(&bios, &tc->retry_on_resume_list);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
error_bio_list(&bios, BLK_STS_DM_REQUEUE);
requeue_deferred_cells(tc);
@@ -756,7 +753,6 @@ static void inc_all_io_entry(struct pool *pool, struct bio *bio)
static void issue(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- unsigned long flags;
if (!bio_triggers_commit(tc, bio)) {
generic_make_request(bio);
@@ -777,9 +773,9 @@ static void issue(struct thin_c *tc, struct bio *bio)
* Batch together any bios that trigger commits and then issue a
* single commit for them in process_deferred_bios().
*/
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_list_add(&pool->deferred_flush_bios, bio);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
}
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
@@ -886,12 +882,15 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
{
struct pool *pool = tc->pool;
unsigned long flags;
+ int has_work;
spin_lock_irqsave(&tc->lock, flags);
cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
+ has_work = !bio_list_empty(&tc->deferred_bio_list);
spin_unlock_irqrestore(&tc->lock, flags);
- wake_worker(pool);
+ if (has_work)
+ wake_worker(pool);
}
static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
@@ -960,7 +959,6 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- unsigned long flags;
/*
* If the bio has the REQ_FUA flag set we must commit the metadata
@@ -985,9 +983,9 @@ static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
* Batch together any bios that trigger commits and then issue a
* single commit for them in process_deferred_bios().
*/
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_list_add(&pool->deferred_flush_completions, bio);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
}
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
@@ -1226,14 +1224,13 @@ static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
static void process_prepared(struct pool *pool, struct list_head *head,
process_mapping_fn *fn)
{
- unsigned long flags;
struct list_head maps;
struct dm_thin_new_mapping *m, *tmp;
INIT_LIST_HEAD(&maps);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
list_splice_init(head, &maps);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
list_for_each_entry_safe(m, tmp, &maps, list)
(*fn)(m);
@@ -1510,14 +1507,12 @@ static int commit(struct pool *pool)
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
- unsigned long flags;
-
if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
DMWARN("%s: reached low water mark for data device: sending event.",
dm_device_name(pool->pool_md));
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->low_water_triggered = true;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
dm_table_event(pool->ti->table);
}
}
@@ -1593,11 +1588,10 @@ static void retry_on_resume(struct bio *bio)
{
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
- unsigned long flags;
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_add(&tc->retry_on_resume_list, bio);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
}
static blk_status_t should_error_unserviceable_bio(struct pool *pool)
@@ -2170,7 +2164,6 @@ static void __sort_thin_deferred_bios(struct thin_c *tc)
static void process_thin_deferred_bios(struct thin_c *tc)
{
struct pool *pool = tc->pool;
- unsigned long flags;
struct bio *bio;
struct bio_list bios;
struct blk_plug plug;
@@ -2184,10 +2177,10 @@ static void process_thin_deferred_bios(struct thin_c *tc)
bio_list_init(&bios);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
if (bio_list_empty(&tc->deferred_bio_list)) {
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
return;
}
@@ -2196,7 +2189,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
bio_list_merge(&bios, &tc->deferred_bio_list);
bio_list_init(&tc->deferred_bio_list);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
@@ -2206,10 +2199,10 @@ static void process_thin_deferred_bios(struct thin_c *tc)
* prepared mappings to process.
*/
if (ensure_next_mapping(pool)) {
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_add(&tc->deferred_bio_list, bio);
bio_list_merge(&tc->deferred_bio_list, &bios);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
break;
}
@@ -2264,16 +2257,15 @@ static unsigned sort_cells(struct pool *pool, struct list_head *cells)
static void process_thin_deferred_cells(struct thin_c *tc)
{
struct pool *pool = tc->pool;
- unsigned long flags;
struct list_head cells;
struct dm_bio_prison_cell *cell;
unsigned i, j, count;
INIT_LIST_HEAD(&cells);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_splice_init(&tc->deferred_cells, &cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
if (list_empty(&cells))
return;
@@ -2294,9 +2286,9 @@ static void process_thin_deferred_cells(struct thin_c *tc)
for (j = i; j < count; j++)
list_add(&pool->cell_sort_array[j]->user_list, &cells);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_splice(&cells, &tc->deferred_cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
return;
}
@@ -2349,7 +2341,6 @@ static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
static void process_deferred_bios(struct pool *pool)
{
- unsigned long flags;
struct bio *bio;
struct bio_list bios, bio_completions;
struct thin_c *tc;
@@ -2368,13 +2359,13 @@ static void process_deferred_bios(struct pool *pool)
bio_list_init(&bios);
bio_list_init(&bio_completions);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_list_merge(&bios, &pool->deferred_flush_bios);
bio_list_init(&pool->deferred_flush_bios);
bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
bio_list_init(&pool->deferred_flush_completions);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
@@ -2657,12 +2648,11 @@ static void metadata_operation_failed(struct pool *pool, const char *op, int r)
*/
static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
{
- unsigned long flags;
struct pool *pool = tc->pool;
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_add(&tc->deferred_bio_list, bio);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
wake_worker(pool);
}
@@ -2678,13 +2668,12 @@ static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
- unsigned long flags;
struct pool *pool = tc->pool;
throttle_lock(&pool->throttle);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_add_tail(&cell->user_list, &tc->deferred_cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
throttle_unlock(&pool->throttle);
wake_worker(pool);
@@ -2810,15 +2799,14 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
static void requeue_bios(struct pool *pool)
{
- unsigned long flags;
struct thin_c *tc;
rcu_read_lock();
list_for_each_entry_rcu(tc, &pool->active_thins, list) {
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
bio_list_init(&tc->retry_on_resume_list);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
}
rcu_read_unlock();
}
@@ -3412,15 +3400,14 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
int r;
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
/*
* As this is a singleton target, ti->begin is always zero.
*/
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_set_dev(bio, pt->data_dev->bdev);
r = DM_MAPIO_REMAPPED;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
return r;
}
@@ -3591,7 +3578,6 @@ static void pool_resume(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
/*
* Must requeue active_thins' bios and then resume
@@ -3600,10 +3586,10 @@ static void pool_resume(struct dm_target *ti)
requeue_bios(pool);
pool_resume_active_thins(pool);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->low_water_triggered = false;
pool->suspended = false;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
do_waker(&pool->waker.work);
}
@@ -3612,11 +3598,10 @@ static void pool_presuspend(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->suspended = true;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
pool_suspend_active_thins(pool);
}
@@ -3625,13 +3610,12 @@ static void pool_presuspend_undo(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
pool_resume_active_thins(pool);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->suspended = false;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
}
static void pool_postsuspend(struct dm_target *ti)
@@ -4110,11 +4094,10 @@ static void thin_put(struct thin_c *tc)
static void thin_dtr(struct dm_target *ti)
{
struct thin_c *tc = ti->private;
- unsigned long flags;
- spin_lock_irqsave(&tc->pool->lock, flags);
+ spin_lock_irq(&tc->pool->lock);
list_del_rcu(&tc->list);
- spin_unlock_irqrestore(&tc->pool->lock, flags);
+ spin_unlock_irq(&tc->pool->lock);
synchronize_rcu();
thin_put(tc);
@@ -4150,7 +4133,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct thin_c *tc;
struct dm_dev *pool_dev, *origin_dev;
struct mapped_device *pool_md;
- unsigned long flags;
mutex_lock(&dm_thin_pool_table.mutex);
@@ -4244,9 +4226,9 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
mutex_unlock(&dm_thin_pool_table.mutex);
- spin_lock_irqsave(&tc->pool->lock, flags);
+ spin_lock_irq(&tc->pool->lock);
if (tc->pool->suspended) {
- spin_unlock_irqrestore(&tc->pool->lock, flags);
+ spin_unlock_irq(&tc->pool->lock);
mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
ti->error = "Unable to activate thin device while pool is suspended";
r = -EINVAL;
@@ -4255,7 +4237,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
refcount_set(&tc->refcount, 1);
init_completion(&tc->can_destroy);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
- spin_unlock_irqrestore(&tc->pool->lock, flags);
+ spin_unlock_irq(&tc->pool->lock);
/*
* This synchronize_rcu() call is needed here otherwise we risk a
* wake_worker() call finding no bios to process (because the newly
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index d06b8aa41e26..7d727a72aa13 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1218,7 +1218,8 @@ bio_copy:
}
} while (bio->bi_iter.bi_size);
- if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks))
+ if (unlikely(bio->bi_opf & REQ_FUA ||
+ wc->uncommitted_blocks >= wc->autocommit_blocks))
writecache_flush(wc);
else
writecache_schedule_autocommit(wc);
@@ -1561,7 +1562,7 @@ static void writecache_writeback(struct work_struct *work)
{
struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
struct blk_plug plug;
- struct wc_entry *f, *g, *e = NULL;
+ struct wc_entry *f, *uninitialized_var(g), *e = NULL;
struct rb_node *node, *next_node;
struct list_head skipped;
struct writeback_list wbl;
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 595a73110e17..22b3cb0050a7 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -554,6 +554,7 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
dmz_release_mblock(zmd, mblk);
+ dmz_check_bdev(zmd->dev);
return ERR_PTR(-EIO);
}
@@ -625,6 +626,8 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
ret = submit_bio_wait(bio);
bio_put(bio);
+ if (ret)
+ dmz_check_bdev(zmd->dev);
return ret;
}
@@ -691,6 +694,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
clear_bit(DMZ_META_ERROR, &mblk->state);
+ dmz_check_bdev(zmd->dev);
ret = -EIO;
}
nr_mblks_submitted--;
@@ -768,7 +772,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
- goto out;
+ goto err;
}
/*
@@ -778,7 +782,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
*/
ret = dmz_log_dirty_mblocks(zmd, &write_list);
if (ret)
- goto out;
+ goto err;
/*
* The log is on disk. It is now safe to update in place
@@ -786,11 +790,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
*/
ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
if (ret)
- goto out;
+ goto err;
ret = dmz_write_sb(zmd, zmd->mblk_primary);
if (ret)
- goto out;
+ goto err;
while (!list_empty(&write_list)) {
mblk = list_first_entry(&write_list, struct dmz_mblock, link);
@@ -805,16 +809,20 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
zmd->sb_gen++;
out:
- if (ret && !list_empty(&write_list)) {
- spin_lock(&zmd->mblk_lock);
- list_splice(&write_list, &zmd->mblk_dirty_list);
- spin_unlock(&zmd->mblk_lock);
- }
-
dmz_unlock_flush(zmd);
up_write(&zmd->mblk_sem);
return ret;
+
+err:
+ if (!list_empty(&write_list)) {
+ spin_lock(&zmd->mblk_lock);
+ list_splice(&write_list, &zmd->mblk_dirty_list);
+ spin_unlock(&zmd->mblk_lock);
+ }
+ if (!dmz_check_bdev(zmd->dev))
+ ret = -EIO;
+ goto out;
}
/*
@@ -1080,9 +1088,10 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
/*
* Initialize a zone descriptor.
*/
-static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
- struct blk_zone *blkz)
+static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
{
+ struct dmz_metadata *zmd = data;
+ struct dm_zone *zone = &zmd->zones[idx];
struct dmz_dev *dev = zmd->dev;
/* Ignore the eventual last runt (smaller) zone */
@@ -1096,26 +1105,29 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
atomic_set(&zone->refcount, 0);
zone->chunk = DMZ_MAP_UNMAPPED;
- if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ switch (blkz->type) {
+ case BLK_ZONE_TYPE_CONVENTIONAL:
set_bit(DMZ_RND, &zone->flags);
zmd->nr_rnd_zones++;
- } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
- blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
+ break;
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
set_bit(DMZ_SEQ, &zone->flags);
- } else
+ break;
+ default:
return -ENXIO;
-
- if (blkz->cond == BLK_ZONE_COND_OFFLINE)
- set_bit(DMZ_OFFLINE, &zone->flags);
- else if (blkz->cond == BLK_ZONE_COND_READONLY)
- set_bit(DMZ_READ_ONLY, &zone->flags);
+ }
if (dmz_is_rnd(zone))
zone->wp_block = 0;
else
zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
- if (!dmz_is_offline(zone) && !dmz_is_readonly(zone)) {
+ if (blkz->cond == BLK_ZONE_COND_OFFLINE)
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ else if (blkz->cond == BLK_ZONE_COND_READONLY)
+ set_bit(DMZ_READ_ONLY, &zone->flags);
+ else {
zmd->nr_useable_zones++;
if (dmz_is_rnd(zone)) {
zmd->nr_rnd_zones++;
@@ -1139,23 +1151,13 @@ static void dmz_drop_zones(struct dmz_metadata *zmd)
}
/*
- * The size of a zone report in number of zones.
- * This results in 4096*64B=256KB report zones commands.
- */
-#define DMZ_REPORT_NR_ZONES 4096
-
-/*
* Allocate and initialize zone descriptors using the zone
* information from disk.
*/
static int dmz_init_zones(struct dmz_metadata *zmd)
{
struct dmz_dev *dev = zmd->dev;
- struct dm_zone *zone;
- struct blk_zone *blkz;
- unsigned int nr_blkz;
- sector_t sector = 0;
- int i, ret = 0;
+ int ret;
/* Init */
zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
@@ -1169,54 +1171,38 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
dmz_dev_info(dev, "Using %zu B for zone information",
sizeof(struct dm_zone) * dev->nr_zones);
- /* Get zone information */
- nr_blkz = DMZ_REPORT_NR_ZONES;
- blkz = kcalloc(nr_blkz, sizeof(struct blk_zone), GFP_KERNEL);
- if (!blkz) {
- ret = -ENOMEM;
- goto out;
- }
-
/*
- * Get zone information and initialize zone descriptors.
- * At the same time, determine where the super block
- * should be: first block of the first randomly writable
- * zone.
+ * Get zone information and initialize zone descriptors. At the same
+ * time, determine where the super block should be: first block of the
+ * first randomly writable zone.
*/
- zone = zmd->zones;
- while (sector < dev->capacity) {
- /* Get zone information */
- nr_blkz = DMZ_REPORT_NR_ZONES;
- ret = blkdev_report_zones(dev->bdev, sector, blkz, &nr_blkz);
- if (ret) {
- dmz_dev_err(dev, "Report zones failed %d", ret);
- goto out;
- }
+ ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES, dmz_init_zone,
+ zmd);
+ if (ret < 0) {
+ dmz_drop_zones(zmd);
+ return ret;
+ }
- if (!nr_blkz)
- break;
+ return 0;
+}
- /* Process report */
- for (i = 0; i < nr_blkz; i++) {
- ret = dmz_init_zone(zmd, zone, &blkz[i]);
- if (ret)
- goto out;
- sector += dev->zone_nr_sectors;
- zone++;
- }
- }
+static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
+ void *data)
+{
+ struct dm_zone *zone = data;
- /* The entire zone configuration of the disk should now be known */
- if (sector < dev->capacity) {
- dmz_dev_err(dev, "Failed to get correct zone information");
- ret = -ENXIO;
- }
-out:
- kfree(blkz);
- if (ret)
- dmz_drop_zones(zmd);
+ clear_bit(DMZ_OFFLINE, &zone->flags);
+ clear_bit(DMZ_READ_ONLY, &zone->flags);
+ if (blkz->cond == BLK_ZONE_COND_OFFLINE)
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ else if (blkz->cond == BLK_ZONE_COND_READONLY)
+ set_bit(DMZ_READ_ONLY, &zone->flags);
- return ret;
+ if (dmz_is_seq(zone))
+ zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
+ else
+ zone->wp_block = 0;
+ return 0;
}
/*
@@ -1224,9 +1210,7 @@ out:
*/
static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- unsigned int nr_blkz = 1;
unsigned int noio_flag;
- struct blk_zone blkz;
int ret;
/*
@@ -1236,29 +1220,19 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
* GFP_NOIO was specified.
*/
noio_flag = memalloc_noio_save();
- ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
- &blkz, &nr_blkz);
+ ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1,
+ dmz_update_zone_cb, zone);
memalloc_noio_restore(noio_flag);
- if (!nr_blkz)
+
+ if (ret == 0)
ret = -EIO;
- if (ret) {
+ if (ret < 0) {
dmz_dev_err(zmd->dev, "Get zone %u report failed",
dmz_id(zmd, zone));
+ dmz_check_bdev(zmd->dev);
return ret;
}
- clear_bit(DMZ_OFFLINE, &zone->flags);
- clear_bit(DMZ_READ_ONLY, &zone->flags);
- if (blkz.cond == BLK_ZONE_COND_OFFLINE)
- set_bit(DMZ_OFFLINE, &zone->flags);
- else if (blkz.cond == BLK_ZONE_COND_READONLY)
- set_bit(DMZ_READ_ONLY, &zone->flags);
-
- if (dmz_is_seq(zone))
- zone->wp_block = dmz_sect2blk(blkz.wp - blkz.start);
- else
- zone->wp_block = 0;
-
return 0;
}
@@ -1312,9 +1286,9 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
struct dmz_dev *dev = zmd->dev;
- ret = blkdev_reset_zones(dev->bdev,
- dmz_start_sect(zmd, zone),
- dev->zone_nr_sectors, GFP_NOIO);
+ ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
+ dmz_start_sect(zmd, zone),
+ dev->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
dmz_id(zmd, zone), ret);
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index d240d7ca8a8a..e7ace908a9b7 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -82,6 +82,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
dmz_id(zmd, zone), (unsigned long long)wp_block,
(unsigned long long)block, nr_blocks, ret);
+ dmz_check_bdev(zrc->dev);
return ret;
}
@@ -489,12 +490,7 @@ static void dmz_reclaim_work(struct work_struct *work)
ret = dmz_do_reclaim(zrc);
if (ret) {
dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
- if (ret == -EIO)
- /*
- * LLD might be performing some error handling sequence
- * at the underlying device. To not interfere, do not
- * attempt to schedule the next reclaim run immediately.
- */
+ if (!dmz_check_bdev(zrc->dev))
return;
}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index d3bcc4197f5d..4574e0dedbd6 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -80,6 +80,8 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
bio->bi_status = status;
+ if (bio->bi_status != BLK_STS_OK)
+ bioctx->target->dev->flags |= DMZ_CHECK_BDEV;
if (refcount_dec_and_test(&bioctx->ref)) {
struct dm_zone *zone = bioctx->zone;
@@ -565,32 +567,52 @@ out:
}
/*
- * Check the backing device availability. If it's on the way out,
+ * Check if the backing device is being removed. If it's on the way out,
* start failing I/O. Reclaim and metadata components also call this
* function to cleanly abort operation in the event of such failure.
*/
bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
{
- struct gendisk *disk;
+ if (dmz_dev->flags & DMZ_BDEV_DYING)
+ return true;
- if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
- disk = dmz_dev->bdev->bd_disk;
- if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
- dmz_dev_warn(dmz_dev, "Backing device queue dying");
- dmz_dev->flags |= DMZ_BDEV_DYING;
- } else if (disk->fops->check_events) {
- if (disk->fops->check_events(disk, 0) &
- DISK_EVENT_MEDIA_CHANGE) {
- dmz_dev_warn(dmz_dev, "Backing device offline");
- dmz_dev->flags |= DMZ_BDEV_DYING;
- }
- }
+ if (dmz_dev->flags & DMZ_CHECK_BDEV)
+ return !dmz_check_bdev(dmz_dev);
+
+ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
+ dmz_dev_warn(dmz_dev, "Backing device queue dying");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
}
return dmz_dev->flags & DMZ_BDEV_DYING;
}
/*
+ * Check the backing device availability. This detects such events as
+ * backing device going offline due to errors, media removals, etc.
+ * This check is less efficient than dmz_bdev_is_dying() and should
+ * only be performed as a part of error handling.
+ */
+bool dmz_check_bdev(struct dmz_dev *dmz_dev)
+{
+ struct gendisk *disk;
+
+ dmz_dev->flags &= ~DMZ_CHECK_BDEV;
+
+ if (dmz_bdev_is_dying(dmz_dev))
+ return false;
+
+ disk = dmz_dev->bdev->bd_disk;
+ if (disk->fops->check_events &&
+ disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) {
+ dmz_dev_warn(dmz_dev, "Backing device offline");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
+ }
+
+ return !(dmz_dev->flags & DMZ_BDEV_DYING);
+}
+
+/*
* Process a new BIO.
*/
static int dmz_map(struct dm_target *ti, struct bio *bio)
@@ -902,8 +924,8 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct dmz_target *dmz = ti->private;
- if (dmz_bdev_is_dying(dmz->dev))
- return -ENODEV;
+ if (!dmz_check_bdev(dmz->dev))
+ return -EIO;
*bdev = dmz->dev->bdev;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index d8e70b0ade35..5b5e493d479c 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -72,6 +72,7 @@ struct dmz_dev {
/* Device flags. */
#define DMZ_BDEV_DYING (1 << 0)
+#define DMZ_CHECK_BDEV (2 << 0)
/*
* Zone descriptor.
@@ -255,5 +256,6 @@ void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
* Functions defined in dm-zoned-target.c
*/
bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
+bool dmz_check_bdev(struct dmz_dev *dmz_dev);
#endif /* DM_ZONED_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1a5e328c443a..e8f9661a10a1 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -440,14 +440,48 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data)
+{
+ struct dm_report_zones_args *args = data;
+ sector_t sector_diff = args->tgt->begin - args->start;
+
+ /*
+ * Ignore zones beyond the target range.
+ */
+ if (zone->start >= args->start + args->tgt->len)
+ return 0;
+
+ /*
+ * Remap the start sector and write pointer position of the zone
+ * to match its position in the target range.
+ */
+ zone->start += sector_diff;
+ if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ zone->wp = zone->start + zone->len;
+ else if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->wp = zone->start;
+ else
+ zone->wp += sector_diff;
+ }
+
+ args->next_sector = zone->start + zone->len;
+ return args->orig_cb(zone, args->zone_idx++, args->orig_data);
+}
+EXPORT_SYMBOL_GPL(dm_report_zones_cb);
+
static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
-#ifdef CONFIG_BLK_DEV_ZONED
struct mapped_device *md = disk->private_data;
- struct dm_target *tgt;
struct dm_table *map;
int srcu_idx, ret;
+ struct dm_report_zones_args args = {
+ .next_sector = sector,
+ .orig_data = data,
+ .orig_cb = cb,
+ };
if (dm_suspended_md(md))
return -EAGAIN;
@@ -456,38 +490,30 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
if (!map)
return -EIO;
- tgt = dm_table_find_target(map, sector);
- if (!tgt) {
- ret = -EIO;
- goto out;
- }
+ do {
+ struct dm_target *tgt;
- /*
- * If we are executing this, we already know that the block device
- * is a zoned device and so each target should have support for that
- * type of drive. A missing report_zones method means that the target
- * driver has a problem.
- */
- if (WARN_ON(!tgt->type->report_zones)) {
- ret = -EIO;
- goto out;
- }
+ tgt = dm_table_find_target(map, args.next_sector);
+ if (WARN_ON_ONCE(!tgt->type->report_zones)) {
+ ret = -EIO;
+ goto out;
+ }
- /*
- * blkdev_report_zones() will loop and call this again to cover all the
- * zones of the target, eventually moving on to the next target.
- * So there is no need to loop here trying to fill the entire array
- * of zones.
- */
- ret = tgt->type->report_zones(tgt, sector, zones, nr_zones);
+ args.tgt = tgt;
+ ret = tgt->type->report_zones(tgt, &args, nr_zones);
+ if (ret < 0)
+ goto out;
+ } while (args.zone_idx < nr_zones &&
+ args.next_sector < get_capacity(disk));
+ ret = args.zone_idx;
out:
dm_put_live_table(md, srcu_idx);
return ret;
-#else
- return -ENOTSUPP;
-#endif
}
+#else
+#define dm_blk_report_zones NULL
+#endif /* CONFIG_BLK_DEV_ZONED */
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
@@ -1174,7 +1200,8 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
- * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
+ * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
+ * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1212,54 +1239,6 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
-/*
- * The zone descriptors obtained with a zone report indicate
- * zone positions within the underlying device of the target. The zone
- * descriptors must be remapped to match their position within the dm device.
- * The caller target should obtain the zones information using
- * blkdev_report_zones() to ensure that remapping for partition offset is
- * already handled.
- */
-void dm_remap_zone_report(struct dm_target *ti, sector_t start,
- struct blk_zone *zones, unsigned int *nr_zones)
-{
-#ifdef CONFIG_BLK_DEV_ZONED
- struct blk_zone *zone;
- unsigned int nrz = *nr_zones;
- int i;
-
- /*
- * Remap the start sector and write pointer position of the zones in
- * the array. Since we may have obtained from the target underlying
- * device more zones that the target size, also adjust the number
- * of zones.
- */
- for (i = 0; i < nrz; i++) {
- zone = zones + i;
- if (zone->start >= start + ti->len) {
- memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
- break;
- }
-
- zone->start = zone->start + ti->begin - start;
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- continue;
-
- if (zone->cond == BLK_ZONE_COND_FULL)
- zone->wp = zone->start + zone->len;
- else if (zone->cond == BLK_ZONE_COND_EMPTY)
- zone->wp = zone->start;
- else
- zone->wp = zone->wp + ti->begin - start;
- }
-
- *nr_zones = i;
-#else /* !CONFIG_BLK_DEV_ZONED */
- *nr_zones = 0;
-#endif
-}
-EXPORT_SYMBOL_GPL(dm_remap_zone_report);
-
static blk_qc_t __map_bio(struct dm_target_io *tio)
{
int r;
@@ -1627,7 +1606,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
ci.sector_count = 0;
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
- } else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
+ } else if (op_is_zone_mgmt(bio_op(bio))) {
ci.bio = bio;
ci.sector_count = 0;
error = __split_and_process_non_flush(&ci);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index b092c7b5282f..3ad18246fcb3 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2139,6 +2139,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
memcpy(page_address(store.sb_page),
page_address(bitmap->storage.sb_page),
sizeof(bitmap_super_t));
+ spin_lock_irq(&bitmap->counts.lock);
md_bitmap_file_unmap(&bitmap->storage);
bitmap->storage = store;
@@ -2154,7 +2155,6 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
blocks = min(old_counts.chunks << old_counts.chunkshift,
chunks << chunkshift);
- spin_lock_irq(&bitmap->counts.lock);
/* For cluster raid, need to pre-allocate bitmap */
if (mddev_is_clustered(bitmap->mddev)) {
unsigned long page;
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index c766c559d36d..26c75c0199fa 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -244,10 +244,9 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
sector_t start_sector, end_sector, data_offset;
sector_t bio_sector = bio->bi_iter.bi_sector;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
tmp_dev = which_dev(mddev, bio_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 6780938d2991..152f9e65a226 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -104,10 +104,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
mp_bh = mempool_alloc(&conf->pool, GFP_NOIO);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1be7abeb24fd..805b33e27496 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -550,7 +550,13 @@ static void md_submit_flush_data(struct work_struct *ws)
}
}
-void md_flush_request(struct mddev *mddev, struct bio *bio)
+/*
+ * Manages consolidation of flushes and submitting any flushes needed for
+ * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
+ * being finished in another context. Returns false if the flushing is
+ * complete but still needs the I/O portion of the bio to be processed.
+ */
+bool md_flush_request(struct mddev *mddev, struct bio *bio)
{
ktime_t start = ktime_get_boottime();
spin_lock_irq(&mddev->lock);
@@ -575,9 +581,10 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
else {
bio->bi_opf &= ~REQ_PREFLUSH;
- mddev->pers->make_request(mddev, bio);
+ return false;
}
}
+ return true;
}
EXPORT_SYMBOL(md_flush_request);
@@ -1098,6 +1105,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdp_super_t *sb;
int ret;
+ bool spare_disk = true;
/*
* Calculate the position of the superblock (512byte sectors),
@@ -1148,8 +1156,18 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
else
rdev->desc_nr = sb->this_disk.number;
+ /* not spare disk, or LEVEL_MULTIPATH */
+ if (sb->level == LEVEL_MULTIPATH ||
+ (rdev->desc_nr >= 0 &&
+ sb->disks[rdev->desc_nr].state &
+ ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
+ spare_disk = false;
+
if (!refdev) {
- ret = 1;
+ if (!spare_disk)
+ ret = 1;
+ else
+ ret = 0;
} else {
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
@@ -1165,7 +1183,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
}
ev1 = md_event(sb);
ev2 = md_event(refsb);
- if (ev1 > ev2)
+
+ if (!spare_disk && ev1 > ev2)
ret = 1;
else
ret = 0;
@@ -1525,6 +1544,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sector_t sectors;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
+ bool spare_disk = true;
/*
* Calculate the position of the superblock in 512byte sectors.
@@ -1658,8 +1678,19 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sb->level != 0)
return -EINVAL;
+ /* not spare disk, or LEVEL_MULTIPATH */
+ if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
+ (rdev->desc_nr >= 0 &&
+ rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
+ (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
+ le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
+ spare_disk = false;
+
if (!refdev) {
- ret = 1;
+ if (!spare_disk)
+ ret = 1;
+ else
+ ret = 0;
} else {
__u64 ev1, ev2;
struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
@@ -1676,7 +1707,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
ev1 = le64_to_cpu(sb->events);
ev2 = le64_to_cpu(refsb->events);
- if (ev1 > ev2)
+ if (!spare_disk && ev1 > ev2)
ret = 1;
else
ret = 0;
@@ -3597,7 +3628,7 @@ abort_free:
* Check a full RAID array for plausibility
*/
-static void analyze_sbs(struct mddev *mddev)
+static int analyze_sbs(struct mddev *mddev)
{
int i;
struct md_rdev *rdev, *freshest, *tmp;
@@ -3618,6 +3649,12 @@ static void analyze_sbs(struct mddev *mddev)
md_kick_rdev_from_array(rdev);
}
+ /* Cannot find a valid fresh disk */
+ if (!freshest) {
+ pr_warn("md: cannot find a valid disk\n");
+ return -EINVAL;
+ }
+
super_types[mddev->major_version].
validate_super(mddev, freshest);
@@ -3652,6 +3689,8 @@ static void analyze_sbs(struct mddev *mddev)
clear_bit(In_sync, &rdev->flags);
}
}
+
+ return 0;
}
/* Read a fixed-point number.
@@ -5570,7 +5609,9 @@ int md_run(struct mddev *mddev)
if (!mddev->raid_disks) {
if (!mddev->persistent)
return -EINVAL;
- analyze_sbs(mddev);
+ err = analyze_sbs(mddev);
+ if (err)
+ return -EINVAL;
}
if (mddev->level != LEVEL_NONE)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index c5e3ff398b59..5f86f8adb0a4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -550,7 +550,7 @@ struct md_personality
int level;
struct list_head list;
struct module *owner;
- bool (*make_request)(struct mddev *mddev, struct bio *bio);
+ bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
/*
* start up works that do NOT require md_thread. tasks that
* requires md_thread should go into start()
@@ -703,7 +703,7 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);
extern int mddev_congested(struct mddev *mddev, int bits);
-extern void md_flush_request(struct mddev *mddev, struct bio *bio);
+extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
extern int md_super_wait(struct mddev *mddev);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 1e772287b1c8..b7c20979bd19 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -575,10 +575,9 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
unsigned chunk_sects;
unsigned sectors;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
raid0_handle_discard(mddev, bio);
@@ -615,7 +614,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
tmp_dev = map_sector(mddev, zone, sector, &sector);
break;
default:
- WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
+ WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
bio_io_error(bio);
return true;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 0466ee2453b4..a409ab6f30bc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -819,6 +819,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
else
generic_make_request(bio);
bio = next;
+ cond_resched();
}
}
@@ -1567,10 +1568,9 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
{
sector_t sectors;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
/*
* There is a limit to the maximum size, but
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 299c7b1c9718..ec136e44aef7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -191,7 +191,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
out_free_pages:
while (--j >= 0)
- resync_free_pages(&rps[j * 2]);
+ resync_free_pages(&rps[j]);
j = 0;
out_free_bio:
@@ -1525,10 +1525,9 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
int chunk_sects = chunk_mask + 1;
int sectors = bio_sectors(bio);
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
if (!md_write_start(mddev, bio))
return false;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 18a4064a61a8..cab5b1352892 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1404,7 +1404,7 @@ int ppl_init_log(struct r5conf *conf)
atomic64_set(&ppl_conf->seq, 0);
INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
spin_lock_init(&ppl_conf->no_mem_stripes_lock);
- ppl_conf->write_hint = RWF_WRITE_LIFE_NOT_SET;
+ ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
if (!mddev->external) {
ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 223e97ab27e6..f0fc538bfe59 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1134,7 +1134,7 @@ again:
bi->bi_iter.bi_size = STRIPE_SIZE;
bi->bi_write_hint = sh->dev[i].write_hint;
if (!rrdev)
- sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET;
+ sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -1187,7 +1187,7 @@ again:
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_iter.bi_size = STRIPE_SIZE;
rbi->bi_write_hint = sh->dev[i].write_hint;
- sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET;
+ sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -5592,8 +5592,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
if (ret == 0)
return true;
if (ret == -ENODEV) {
- md_flush_request(mddev, bi);
- return true;
+ if (md_flush_request(mddev, bi))
+ return true;
}
/* ret == -EAGAIN, fallback */
/*
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 79a53cb8507b..00a79489067c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -1353,7 +1353,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
{
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
- int ret = 0;
+ int ret;
adr += chip->start;
@@ -1383,7 +1383,7 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs, last_end = 0;
int chipnum;
- int ret = 0;
+ int ret;
if (!map->virt)
return -EINVAL;
@@ -1550,7 +1550,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, write_cmd;
- int ret=0;
+ int ret;
adr += chip->start;
@@ -1624,7 +1624,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs;
@@ -1871,7 +1871,7 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs, vec_seek, i;
size_t len = 0;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index cf8c8be40a9c..04b383bc3947 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -123,19 +123,23 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
}
-static void cfi_check_err_status(struct map_info *map, struct flchip *chip,
- unsigned long adr)
+static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status;
if (!cfi_use_status_reg(cfi))
- return;
+ return 0;
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
status = map_read(map, adr);
+ /* The error bits are invalid while the chip's busy */
+ if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
+ return 0;
+
if (map_word_bitsset(map, status, CMD(0x3a))) {
unsigned long chipstatus = MERGESTATUS(status);
@@ -151,7 +155,12 @@ static void cfi_check_err_status(struct map_info *map, struct flchip *chip,
if (chipstatus & CFI_SR_SLSB)
pr_err("%s sector write protected, status %lx\n",
map->name, chipstatus);
+
+ /* Erase/Program status bits are set on the operation failure */
+ if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
+ return 1;
}
+ return 0;
}
/* #define DEBUG_CFI_FEATURES */
@@ -785,7 +794,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
kfree(mtd->eraseregions);
kfree(mtd);
kfree(cfi->cmdset_priv);
- kfree(cfi->cfiq);
return NULL;
}
@@ -848,20 +856,16 @@ static int __xipram chip_good(struct map_info *map, struct flchip *chip,
if (cfi_use_status_reg(cfi)) {
map_word ready = CMD(CFI_SR_DRB);
- map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB);
+
/*
* For chips that support status register, check device
- * ready bit and Erase/Program status bit to know if
- * operation succeeded.
+ * ready bit
*/
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
curd = map_read(map, addr);
- if (map_word_andequal(map, curd, ready, ready))
- return !map_word_bitsset(map, curd, err);
-
- return 0;
+ return map_word_andequal(map, curd, ready, ready);
}
oldd = map_read(map, addr);
@@ -1699,8 +1703,11 @@ static int __xipram do_write_oneword_once(struct map_info *map,
break;
}
- if (chip_good(map, chip, adr, datum))
+ if (chip_good(map, chip, adr, datum)) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
@@ -1713,7 +1720,7 @@ static int __xipram do_write_oneword_start(struct map_info *map,
struct flchip *chip,
unsigned long adr, int mode)
{
- int ret = 0;
+ int ret;
mutex_lock(&chip->mutex);
@@ -1773,7 +1780,6 @@ static int __xipram do_write_oneword_retry(struct map_info *map,
ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
if (ret) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
@@ -1791,7 +1797,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
unsigned long adr, map_word datum,
int mode)
{
- int ret = 0;
+ int ret;
adr += chip->start;
@@ -1815,7 +1821,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs, chipstart;
DECLARE_WAITQUEUE(wait, current);
@@ -1970,12 +1976,17 @@ static int __xipram do_write_buffer_wait(struct map_info *map,
*/
if (time_after(jiffies, timeo) &&
!chip_good(map, chip, adr, datum)) {
+ pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
+ __func__, adr);
ret = -EIO;
break;
}
- if (chip_good(map, chip, adr, datum))
+ if (chip_good(map, chip, adr, datum)) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
@@ -2014,7 +2025,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
int len)
{
struct cfi_private *cfi = map->fldrv_priv;
- int ret = -EIO;
+ int ret;
unsigned long cmd_adr;
int z, words;
map_word datum;
@@ -2071,12 +2082,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
chip->word_write_time);
ret = do_write_buffer_wait(map, chip, adr, datum);
- if (ret) {
- cfi_check_err_status(map, chip, adr);
+ if (ret)
do_write_buffer_reset(map, chip, cfi);
- pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
- __func__, adr);
- }
xip_enable(map, chip, adr);
@@ -2095,7 +2102,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs;
@@ -2232,7 +2239,7 @@ static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int retry_cnt = 0;
map_word oldd;
- int ret = 0;
+ int ret;
int i;
adr += chip->start;
@@ -2271,9 +2278,9 @@ retry:
udelay(1);
}
- if (!chip_good(map, chip, adr, datum)) {
+ if (!chip_good(map, chip, adr, datum) ||
+ cfi_check_err_status(map, chip, adr)) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
@@ -2307,7 +2314,7 @@ static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs, chipstart;
- int ret = 0;
+ int ret;
int chipnum;
chipnum = to >> cfi->chipshift;
@@ -2411,7 +2418,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
unsigned long timeo = jiffies + HZ;
unsigned long int adr;
DECLARE_WAITQUEUE(wait, current);
- int ret = 0;
+ int ret;
int retry_cnt = 0;
adr = cfi->addr_unlock1;
@@ -2467,8 +2474,11 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->erase_suspended = 0;
}
- if (chip_good(map, chip, adr, map_word_ff(map)))
+ if (chip_good(map, chip, adr, map_word_ff(map))) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
@@ -2483,7 +2493,6 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
/* Did we succeed? */
if (ret) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
@@ -2508,7 +2517,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
- int ret = 0;
+ int ret;
int retry_cnt = 0;
adr += chip->start;
@@ -2564,8 +2573,11 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->erase_suspended = 0;
}
- if (chip_good(map, chip, adr, map_word_ff(map)))
+ if (chip_good(map, chip, adr, map_word_ff(map))) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
@@ -2580,7 +2592,6 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
/* Did we succeed? */
if (ret) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index e752067526a5..54edae63b92d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -611,7 +611,7 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs;
@@ -895,7 +895,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
{ struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr, len;
- int chipnum, ret = 0;
+ int chipnum, ret;
int i, first;
struct mtd_erase_region_info *regions = mtd->eraseregions;
@@ -1132,7 +1132,7 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr;
- int chipnum, ret = 0;
+ int chipnum, ret;
#ifdef DEBUG_LOCK_BITS
int ofs_factor = cfi->interleave * cfi->device_type;
#endif
@@ -1279,7 +1279,7 @@ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr;
- int chipnum, ret = 0;
+ int chipnum, ret;
#ifdef DEBUG_LOCK_BITS
int ofs_factor = cfi->interleave * cfi->device_type;
#endif
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index e3b266ee06af..e2d4db05aeb3 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -26,7 +26,7 @@
void cfi_udelay(int us)
{
if (us >= 1000) {
- msleep((us+999)/1000);
+ msleep(DIV_ROUND_UP(us, 1000));
} else {
udelay(us);
cond_resched();
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index b20d02b4f830..77c872fd3d83 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -64,15 +64,17 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
- int ret;
+ int ret, cmd_len;
spi_message_init(&message);
+ cmd_len = mchp23k256_cmdsz(flash);
+
command[0] = MCHP23K256_CMD_WRITE;
mchp23k256_addr2cmd(flash, to, command);
transfer[0].tx_buf = command;
- transfer[0].len = mchp23k256_cmdsz(flash);
+ transfer[0].len = cmd_len;
spi_message_add_tail(&transfer[0], &message);
transfer[1].tx_buf = buf;
@@ -88,8 +90,8 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- if (retlen && message.actual_length > sizeof(command))
- *retlen += message.actual_length - sizeof(command);
+ if (retlen && message.actual_length > cmd_len)
+ *retlen += message.actual_length - cmd_len;
return 0;
}
@@ -101,16 +103,18 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
- int ret;
+ int ret, cmd_len;
spi_message_init(&message);
+ cmd_len = mchp23k256_cmdsz(flash);
+
memset(&transfer, 0, sizeof(transfer));
command[0] = MCHP23K256_CMD_READ;
mchp23k256_addr2cmd(flash, from, command);
transfer[0].tx_buf = command;
- transfer[0].len = mchp23k256_cmdsz(flash);
+ transfer[0].len = cmd_len;
spi_message_add_tail(&transfer[0], &message);
transfer[1].rx_buf = buf;
@@ -126,8 +130,8 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
if (ret)
return ret;
- if (retlen && message.actual_length > sizeof(command))
- *retlen += message.actual_length - sizeof(command);
+ if (retlen && message.actual_length > cmd_len)
+ *retlen += message.actual_length - cmd_len;
return 0;
}
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 986f81d2f93e..79dcca16481d 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -592,6 +592,26 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
return 0;
}
+/*
+ * The purpose of this function is to ensure a memcpy_toio() with byte writes
+ * only. Its structure is inspired from the ARM implementation of _memcpy_toio()
+ * which also does single byte writes but cannot be used here as this is just an
+ * implementation detail and not part of the API. Not mentioning the comment
+ * stating that _memcpy_toio() should be optimized.
+ */
+static void spear_smi_memcpy_toio_b(volatile void __iomem *dest,
+ const void *src, size_t len)
+{
+ const unsigned char *from = src;
+
+ while (len) {
+ len--;
+ writeb(*from, dest);
+ from++;
+ dest++;
+ }
+}
+
static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
void __iomem *dest, const void *src, size_t len)
{
@@ -614,7 +634,23 @@ static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
ctrlreg1 = readl(dev->io_base + SMI_CR1);
writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
- memcpy_toio(dest, src, len);
+ /*
+ * In Write Burst mode (WB_MODE), the specs states that writes must be:
+ * - incremental
+ * - of the same size
+ * The ARM implementation of memcpy_toio() will optimize the number of
+ * I/O by using as much 4-byte writes as possible, surrounded by
+ * 2-byte/1-byte access if:
+ * - the destination is not 4-byte aligned
+ * - the length is not a multiple of 4-byte.
+ * Avoid this alternance of write access size by using our own 'byte
+ * access' helper if at least one of the two conditions above is true.
+ */
+ if (IS_ALIGNED(len, sizeof(u32)) &&
+ IS_ALIGNED((uintptr_t)dest, sizeof(u32)))
+ memcpy_toio(dest, src, len);
+ else
+ spear_smi_memcpy_toio_b(dest, src, len);
writel(ctrlreg1, dev->io_base + SMI_CR1);
@@ -777,9 +813,6 @@ static int spear_smi_probe_config_dt(struct platform_device *pdev,
/* Fill structs for each subnode (flash device) */
while ((pp = of_get_next_child(np, pp))) {
- struct spear_smi_flash_info *flash_info;
-
- flash_info = &pdata->board_flash_info[i];
pdata->np[i] = pp;
/* Read base-addr and size from DT */
@@ -933,7 +966,6 @@ static int spear_smi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENODEV;
- dev_err(&pdev->dev, "invalid smi irq\n");
goto err;
}
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index f4d1667daaf9..1888523d9745 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -255,7 +255,6 @@ struct stfsm_seq {
struct stfsm {
struct device *dev;
void __iomem *base;
- struct resource *region;
struct mtd_info mtd;
struct mutex lock;
struct flash_info *info;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index bc82305ebb4c..b28225a7c4f3 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -96,6 +96,17 @@ config MTD_PHYSMAP_GEMINI
platforms, some detection and setting up parallel mode on the
external interface.
+config MTD_PHYSMAP_IXP4XX
+ bool "Intel IXP4xx OF-based physical memory map handling"
+ depends on MTD_PHYSMAP_OF
+ depends on ARM
+ select MTD_COMPLEX_MAPPINGS
+ select MTD_CFI_BE_BYTE_SWAP if CPU_BIG_ENDIAN
+ default ARCH_IXP4XX
+ help
+ This provides some extra DT physmap parsing for the Intel IXP4xx
+ platforms, some elaborate endianness handling in particular.
+
config MTD_PHYSMAP_GPIO_ADDR
bool "GPIO-assisted Flash Chip Support"
depends on MTD_PHYSMAP
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 1146009f41df..c0da86a5d26f 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
physmap-objs-y += physmap-core.o
physmap-objs-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o
physmap-objs-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o
physmap-objs := $(physmap-objs-y)
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 876f12f40018..0eeadfeb620d 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -86,7 +86,7 @@ static int __init init_l440gx(void)
return -ENOMEM;
}
simple_map_init(&l440gx_map);
- printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt);
+ pr_debug("window_addr = %p\n", l440gx_map.virt);
/* Setup the pm iobase resource
* This code should move into some kind of generic bridge
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 21b556afc305..a9f7964e2edb 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -41,6 +41,7 @@
#include <linux/gpio/consumer.h>
#include "physmap-gemini.h"
+#include "physmap-ixp4xx.h"
#include "physmap-versatile.h"
struct physmap_flash_info {
@@ -370,6 +371,10 @@ static int physmap_flash_of_init(struct platform_device *dev)
if (err)
return err;
+ err = of_flash_probe_ixp4xx(dev, dp, &info->maps[i]);
+ if (err)
+ return err;
+
err = of_flash_probe_versatile(dev, dp, &info->maps[i]);
if (err)
return err;
diff --git a/drivers/mtd/maps/physmap-ixp4xx.c b/drivers/mtd/maps/physmap-ixp4xx.c
new file mode 100644
index 000000000000..6a054229a8a0
--- /dev/null
+++ b/drivers/mtd/maps/physmap-ixp4xx.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel IXP4xx OF physmap add-on
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the ixp4xx.c map driver, originally written by:
+ * Intel Corporation
+ * Deepak Saxena <dsaxena@mvista.com>
+ * Copyright (C) 2002 Intel Corporation
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ */
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+#include "physmap-ixp4xx.h"
+
+/*
+ * Read/write a 16 bit word from flash address 'addr'.
+ *
+ * When the cpu is in little-endian mode it swizzles the address lines
+ * ('address coherency') so we need to undo the swizzling to ensure commands
+ * and the like end up on the correct flash address.
+ *
+ * To further complicate matters, due to the way the expansion bus controller
+ * handles 32 bit reads, the byte stream ABCD is stored on the flash as:
+ * D15 D0
+ * +---+---+
+ * | A | B | 0
+ * +---+---+
+ * | C | D | 2
+ * +---+---+
+ * This means that on LE systems each 16 bit word must be swapped. Note that
+ * this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI
+ * data and other flash commands which are always in D7-D0.
+ */
+#ifndef CONFIG_CPU_BIG_ENDIAN
+
+static inline u16 flash_read16(void __iomem *addr)
+{
+ return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2)));
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2));
+}
+
+#define BYTE0(h) ((h) & 0xFF)
+#define BYTE1(h) (((h) >> 8) & 0xFF)
+
+#else
+
+static inline u16 flash_read16(const void __iomem *addr)
+{
+ return __raw_readw(addr);
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(d, addr);
+}
+
+#define BYTE0(h) (((h) >> 8) & 0xFF)
+#define BYTE1(h) ((h) & 0xFF)
+#endif
+
+static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+
+ val.x[0] = flash_read16(map->virt + ofs);
+ return val;
+}
+
+/*
+ * The IXP4xx expansion bus only allows 16-bit wide acceses
+ * when attached to a 16-bit wide device (such as the 28F128J3A),
+ * so we can't just memcpy_fromio().
+ */
+static void ixp4xx_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ u8 *dest = (u8 *) to;
+ void __iomem *src = map->virt + from;
+
+ if (len <= 0)
+ return;
+
+ if (from & 1) {
+ *dest++ = BYTE1(flash_read16(src-1));
+ src++;
+ --len;
+ }
+
+ while (len >= 2) {
+ u16 data = flash_read16(src);
+ *dest++ = BYTE0(data);
+ *dest++ = BYTE1(data);
+ src += 2;
+ len -= 2;
+ }
+
+ if (len > 0)
+ *dest++ = BYTE0(flash_read16(src));
+}
+
+static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ flash_write16(d.x[0], map->virt + adr);
+}
+
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ struct device *dev = &pdev->dev;
+
+ /* Multiplatform guard */
+ if (!of_device_is_compatible(np, "intel,ixp4xx-flash"))
+ return 0;
+
+ map->read = ixp4xx_read16;
+ map->write = ixp4xx_write16;
+ map->copy_from = ixp4xx_copy_from;
+ map->copy_to = NULL;
+
+ dev_info(dev, "initialized Intel IXP4xx-specific physmap control\n");
+
+ return 0;
+}
diff --git a/drivers/mtd/maps/physmap-ixp4xx.h b/drivers/mtd/maps/physmap-ixp4xx.h
new file mode 100644
index 000000000000..b0fc49b7f3ed
--- /dev/null
+++ b/drivers/mtd/maps/physmap-ixp4xx.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_IXP4XX
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map);
+#else
+static inline
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 975aed94f06c..b841008a9eb7 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -174,7 +174,7 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
break;
case MTD_FILE_MODE_RAW:
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
@@ -268,7 +268,7 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c
case MTD_FILE_MODE_RAW:
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
@@ -350,7 +350,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint32_t __user *retp)
{
struct mtd_file_info *mfi = file->private_data;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
uint32_t retlen;
int ret = 0;
@@ -394,7 +394,7 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
uint32_t __user *retp)
{
struct mtd_file_info *mfi = file->private_data;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
int ret = 0;
if (length > 4096)
@@ -587,7 +587,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
struct mtd_write_req req;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
const void __user *usr_data, *usr_oob;
int ret;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 6cc7ecb0c788..5fac4355b9c2 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -382,33 +382,21 @@ static struct dentry *dfs_dir_mtd;
static void mtd_debugfs_populate(struct mtd_info *mtd)
{
struct device *dev = &mtd->dev;
- struct dentry *root, *dent;
+ struct dentry *root;
if (IS_ERR_OR_NULL(dfs_dir_mtd))
return;
root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
- if (IS_ERR_OR_NULL(root)) {
- dev_dbg(dev, "won't show data in debugfs\n");
- return;
- }
-
mtd->dbg.dfs_dir = root;
- if (mtd->dbg.partid) {
- dent = debugfs_create_file("partid", 0400, root, mtd,
- &mtd_partid_debug_fops);
- if (IS_ERR_OR_NULL(dent))
- dev_err(dev, "can't create debugfs entry for partid\n");
- }
+ if (mtd->dbg.partid)
+ debugfs_create_file("partid", 0400, root, mtd,
+ &mtd_partid_debug_fops);
- if (mtd->dbg.partname) {
- dent = debugfs_create_file("partname", 0400, root, mtd,
- &mtd_partname_debug_fops);
- if (IS_ERR_OR_NULL(dent))
- dev_err(dev,
- "can't create debugfs entry for partname\n");
- }
+ if (mtd->dbg.partname)
+ debugfs_create_file("partname", 0400, root, mtd,
+ &mtd_partname_debug_fops);
}
#ifndef CONFIG_MMU
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index f92414eb4c86..58eefa43af14 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1257,7 +1257,6 @@ DEFINE_SHOW_ATTRIBUTE(mtdswap);
static int mtdswap_add_debugfs(struct mtdswap_dev *d)
{
struct dentry *root = d->mtd->dbg.dfs_dir;
- struct dentry *dent;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
@@ -1265,12 +1264,7 @@ static int mtdswap_add_debugfs(struct mtdswap_dev *d)
if (IS_ERR_OR_NULL(root))
return -1;
- dent = debugfs_create_file("mtdswap_stats", S_IRUSR, root, d,
- &mtdswap_fops);
- if (!dent) {
- dev_err(d->dev, "debugfs_create_file failed\n");
- return -1;
- }
+ debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, &mtdswap_fops);
return 0;
}
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index e59de3f60cf6..74fb91adeb46 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -450,6 +450,13 @@ config MTD_NAND_PLATFORM
devices. You will need to provide platform-specific functions
via platform_data.
+config MTD_NAND_CADENCE
+ tristate "Support Cadence NAND (HPNFC) controller"
+ depends on OF || COMPILE_TEST
+ help
+ Enable the driver for NAND flash on platforms using a Cadence NAND
+ controller.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index a98721988e61..2d136b158fb7 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o
obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
+obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 15ef30b368a5..1a66b1cd51c0 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -117,6 +117,18 @@ enum flash_dma_reg {
FLASH_DMA_CURRENT_DESC_EXT,
};
+/* flash_dma registers v0*/
+static const u16 flash_dma_regs_v0[] = {
+ [FLASH_DMA_REVISION] = 0x00,
+ [FLASH_DMA_FIRST_DESC] = 0x04,
+ [FLASH_DMA_CTRL] = 0x08,
+ [FLASH_DMA_MODE] = 0x0c,
+ [FLASH_DMA_STATUS] = 0x10,
+ [FLASH_DMA_INTERRUPT_DESC] = 0x14,
+ [FLASH_DMA_ERROR_STATUS] = 0x18,
+ [FLASH_DMA_CURRENT_DESC] = 0x1c,
+};
+
/* flash_dma registers v1*/
static const u16 flash_dma_regs_v1[] = {
[FLASH_DMA_REVISION] = 0x00,
@@ -597,6 +609,8 @@ static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
/* flash_dma register offsets */
if (ctrl->nand_version >= 0x0703)
ctrl->flash_dma_offsets = flash_dma_regs_v4;
+ else if (ctrl->nand_version == 0x0602)
+ ctrl->flash_dma_offsets = flash_dma_regs_v0;
else
ctrl->flash_dma_offsets = flash_dma_regs_v1;
}
@@ -918,7 +932,7 @@ static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
return;
if (has_flash_dma(ctrl)) {
- ctrl->flash_dma_base = 0;
+ ctrl->flash_dma_base = NULL;
disable_irq(ctrl->dma_irq);
}
@@ -1673,8 +1687,11 @@ static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
- flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
- (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+ if (ctrl->nand_version > 0x0602) {
+ flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT,
+ upper_32_bits(desc));
+ (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+ }
/* Start FLASH_DMA engine */
ctrl->dma_pending = true;
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
new file mode 100644
index 000000000000..3a36285a8d8a
--- /dev/null
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -0,0 +1,3030 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Cadence NAND flash controller driver
+ *
+ * Copyright (C) 2019 Cadence
+ *
+ * Author: Piotr Sroka <piotrs@cadence.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_device.h>
+#include <linux/iopoll.h>
+
+/*
+ * HPNFC can work in 3 modes:
+ * - PIO - can work in master or slave DMA
+ * - CDMA - needs Master DMA for accessing command descriptors.
+ * - Generic mode - can use only slave DMA.
+ * CDMA and PIO modes can be used to execute only base commands.
+ * Generic mode can be used to execute any command
+ * on NAND flash memory. Driver uses CDMA mode for
+ * block erasing, page reading, page programing.
+ * Generic mode is used for executing rest of commands.
+ */
+
+#define MAX_OOB_SIZE_PER_SECTOR 32
+#define MAX_ADDRESS_CYC 6
+#define MAX_ERASE_ADDRESS_CYC 3
+#define MAX_DATA_SIZE 0xFFFC
+#define DMA_DATA_SIZE_ALIGN 8
+
+/* Register definition. */
+/*
+ * Command register 0.
+ * Writing data to this register will initiate a new transaction
+ * of the NF controller.
+ */
+#define CMD_REG0 0x0000
+/* Command type field mask. */
+#define CMD_REG0_CT GENMASK(31, 30)
+/* Command type CDMA. */
+#define CMD_REG0_CT_CDMA 0uL
+/* Command type generic. */
+#define CMD_REG0_CT_GEN 3uL
+/* Command thread number field mask. */
+#define CMD_REG0_TN GENMASK(27, 24)
+
+/* Command register 2. */
+#define CMD_REG2 0x0008
+/* Command register 3. */
+#define CMD_REG3 0x000C
+/* Pointer register to select which thread status will be selected. */
+#define CMD_STATUS_PTR 0x0010
+/* Command status register for selected thread. */
+#define CMD_STATUS 0x0014
+
+/* Interrupt status register. */
+#define INTR_STATUS 0x0110
+#define INTR_STATUS_SDMA_ERR BIT(22)
+#define INTR_STATUS_SDMA_TRIGG BIT(21)
+#define INTR_STATUS_UNSUPP_CMD BIT(19)
+#define INTR_STATUS_DDMA_TERR BIT(18)
+#define INTR_STATUS_CDMA_TERR BIT(17)
+#define INTR_STATUS_CDMA_IDL BIT(16)
+
+/* Interrupt enable register. */
+#define INTR_ENABLE 0x0114
+#define INTR_ENABLE_INTR_EN BIT(31)
+#define INTR_ENABLE_SDMA_ERR_EN BIT(22)
+#define INTR_ENABLE_SDMA_TRIGG_EN BIT(21)
+#define INTR_ENABLE_UNSUPP_CMD_EN BIT(19)
+#define INTR_ENABLE_DDMA_TERR_EN BIT(18)
+#define INTR_ENABLE_CDMA_TERR_EN BIT(17)
+#define INTR_ENABLE_CDMA_IDLE_EN BIT(16)
+
+/* Controller internal state. */
+#define CTRL_STATUS 0x0118
+#define CTRL_STATUS_INIT_COMP BIT(9)
+#define CTRL_STATUS_CTRL_BUSY BIT(8)
+
+/* Command Engine threads state. */
+#define TRD_STATUS 0x0120
+
+/* Command Engine interrupt thread error status. */
+#define TRD_ERR_INT_STATUS 0x0128
+/* Command Engine interrupt thread error enable. */
+#define TRD_ERR_INT_STATUS_EN 0x0130
+/* Command Engine interrupt thread complete status. */
+#define TRD_COMP_INT_STATUS 0x0138
+
+/*
+ * Transfer config 0 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_0 0x0400
+/* Offset value from the beginning of the page. */
+#define TRAN_CFG_0_OFFSET GENMASK(31, 16)
+/* Numbers of sectors to transfer within singlNF device's page. */
+#define TRAN_CFG_0_SEC_CNT GENMASK(7, 0)
+
+/*
+ * Transfer config 1 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_1 0x0404
+/* Size of last data sector. */
+#define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16)
+/* Size of not-last data sector. */
+#define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0)
+
+/* ECC engine configuration register 0. */
+#define ECC_CONFIG_0 0x0428
+/* Correction strength. */
+#define ECC_CONFIG_0_CORR_STR GENMASK(10, 8)
+/* Enable erased pages detection mechanism. */
+#define ECC_CONFIG_0_ERASE_DET_EN BIT(1)
+/* Enable controller ECC check bits generation and correction. */
+#define ECC_CONFIG_0_ECC_EN BIT(0)
+
+/* ECC engine configuration register 1. */
+#define ECC_CONFIG_1 0x042C
+
+/* Multiplane settings register. */
+#define MULTIPLANE_CFG 0x0434
+/* Cache operation settings. */
+#define CACHE_CFG 0x0438
+
+/* DMA settings register. */
+#define DMA_SETINGS 0x043C
+/* Enable SDMA error report on access unprepared slave DMA interface. */
+#define DMA_SETINGS_SDMA_ERR_RSP BIT(17)
+
+/* Transferred data block size for the slave DMA module. */
+#define SDMA_SIZE 0x0440
+
+/* Thread number associated with transferred data block
+ * for the slave DMA module.
+ */
+#define SDMA_TRD_NUM 0x0444
+/* Thread number mask. */
+#define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0)
+
+#define CONTROL_DATA_CTRL 0x0494
+/* Thread number mask. */
+#define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0)
+
+#define CTRL_VERSION 0x800
+#define CTRL_VERSION_REV GENMASK(7, 0)
+
+/* Available hardware features of the controller. */
+#define CTRL_FEATURES 0x804
+/* Support for NV-DDR2/3 work mode. */
+#define CTRL_FEATURES_NVDDR_2_3 BIT(28)
+/* Support for NV-DDR work mode. */
+#define CTRL_FEATURES_NVDDR BIT(27)
+/* Support for asynchronous work mode. */
+#define CTRL_FEATURES_ASYNC BIT(26)
+/* Support for asynchronous work mode. */
+#define CTRL_FEATURES_N_BANKS GENMASK(25, 24)
+/* Slave and Master DMA data width. */
+#define CTRL_FEATURES_DMA_DWITH64 BIT(21)
+/* Availability of Control Data feature.*/
+#define CTRL_FEATURES_CONTROL_DATA BIT(10)
+
+/* BCH Engine identification register 0 - correction strengths. */
+#define BCH_CFG_0 0x838
+#define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0)
+#define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8)
+#define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16)
+#define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24)
+
+/* BCH Engine identification register 1 - correction strengths. */
+#define BCH_CFG_1 0x83C
+#define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0)
+#define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8)
+#define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16)
+#define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24)
+
+/* BCH Engine identification register 2 - sector sizes. */
+#define BCH_CFG_2 0x840
+#define BCH_CFG_2_SECT_0 GENMASK(15, 0)
+#define BCH_CFG_2_SECT_1 GENMASK(31, 16)
+
+/* BCH Engine identification register 3. */
+#define BCH_CFG_3 0x844
+
+/* Ready/Busy# line status. */
+#define RBN_SETINGS 0x1004
+
+/* Common settings. */
+#define COMMON_SET 0x1008
+/* 16 bit device connected to the NAND Flash interface. */
+#define COMMON_SET_DEVICE_16BIT BIT(8)
+
+/* Skip_bytes registers. */
+#define SKIP_BYTES_CONF 0x100C
+#define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16)
+#define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0)
+
+#define SKIP_BYTES_OFFSET 0x1010
+#define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0)
+
+/* Timings configuration. */
+#define ASYNC_TOGGLE_TIMINGS 0x101c
+#define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24)
+#define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16)
+#define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8)
+#define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0)
+
+#define TIMINGS0 0x1024
+#define TIMINGS0_TADL GENMASK(31, 24)
+#define TIMINGS0_TCCS GENMASK(23, 16)
+#define TIMINGS0_TWHR GENMASK(15, 8)
+#define TIMINGS0_TRHW GENMASK(7, 0)
+
+#define TIMINGS1 0x1028
+#define TIMINGS1_TRHZ GENMASK(31, 24)
+#define TIMINGS1_TWB GENMASK(23, 16)
+#define TIMINGS1_TVDLY GENMASK(7, 0)
+
+#define TIMINGS2 0x102c
+#define TIMINGS2_TFEAT GENMASK(25, 16)
+#define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8)
+#define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0)
+
+/* Configuration of the resynchronization of slave DLL of PHY. */
+#define DLL_PHY_CTRL 0x1034
+#define DLL_PHY_CTRL_DLL_RST_N BIT(24)
+#define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17)
+#define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16)
+#define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8)
+#define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0)
+
+/* Register controlling DQ related timing. */
+#define PHY_DQ_TIMING 0x2000
+/* Register controlling DSQ related timing. */
+#define PHY_DQS_TIMING 0x2004
+#define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0)
+#define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16)
+#define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
+
+/* Register controlling the gate and loopback control related timing. */
+#define PHY_GATE_LPBK_CTRL 0x2008
+#define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
+
+/* Register holds the control for the master DLL logic. */
+#define PHY_DLL_MASTER_CTRL 0x200C
+#define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
+
+/* Register holds the control for the slave DLL logic. */
+#define PHY_DLL_SLAVE_CTRL 0x2010
+
+/* This register handles the global control settings for the PHY. */
+#define PHY_CTRL 0x2080
+#define PHY_CTRL_SDR_DQS BIT(14)
+#define PHY_CTRL_PHONY_DQS GENMASK(9, 4)
+
+/*
+ * This register handles the global control settings
+ * for the termination selects for reads.
+ */
+#define PHY_TSEL 0x2084
+
+/* Generic command layout. */
+#define GCMD_LAY_CS GENMASK_ULL(11, 8)
+/*
+ * This bit informs the minicotroller if it has to wait for tWB
+ * after sending the last CMD/ADDR/DATA in the sequence.
+ */
+#define GCMD_LAY_TWB BIT_ULL(6)
+/* Type of generic instruction. */
+#define GCMD_LAY_INSTR GENMASK_ULL(5, 0)
+
+/* Generic CMD sequence type. */
+#define GCMD_LAY_INSTR_CMD 0
+/* Generic ADDR sequence type. */
+#define GCMD_LAY_INSTR_ADDR 1
+/* Generic data transfer sequence type. */
+#define GCMD_LAY_INSTR_DATA 2
+
+/* Input part of generic command type of input is command. */
+#define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16)
+
+/* Generic command address sequence - address fields. */
+#define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16)
+/* Generic command address sequence - address size. */
+#define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11)
+
+/* Transfer direction field of generic command data sequence. */
+#define GCMD_DIR BIT_ULL(11)
+/* Read transfer direction of generic command data sequence. */
+#define GCMD_DIR_READ 0
+/* Write transfer direction of generic command data sequence. */
+#define GCMD_DIR_WRITE 1
+
+/* ECC enabled flag of generic command data sequence - ECC enabled. */
+#define GCMD_ECC_EN BIT_ULL(12)
+/* Generic command data sequence - sector size. */
+#define GCMD_SECT_SIZE GENMASK_ULL(31, 16)
+/* Generic command data sequence - sector count. */
+#define GCMD_SECT_CNT GENMASK_ULL(39, 32)
+/* Generic command data sequence - last sector size. */
+#define GCMD_LAST_SIZE GENMASK_ULL(55, 40)
+
+/* CDMA descriptor fields. */
+/* Erase command type of CDMA descriptor. */
+#define CDMA_CT_ERASE 0x1000
+/* Program page command type of CDMA descriptor. */
+#define CDMA_CT_WR 0x2100
+/* Read page command type of CDMA descriptor. */
+#define CDMA_CT_RD 0x2200
+
+/* Flash pointer memory shift. */
+#define CDMA_CFPTR_MEM_SHIFT 24
+/* Flash pointer memory mask. */
+#define CDMA_CFPTR_MEM GENMASK(26, 24)
+
+/*
+ * Command DMA descriptor flags. If set causes issue interrupt after
+ * the completion of descriptor processing.
+ */
+#define CDMA_CF_INT BIT(8)
+/*
+ * Command DMA descriptor flags - the next descriptor
+ * address field is valid and descriptor processing should continue.
+ */
+#define CDMA_CF_CONT BIT(9)
+/* DMA master flag of command DMA descriptor. */
+#define CDMA_CF_DMA_MASTER BIT(10)
+
+/* Operation complete status of command descriptor. */
+#define CDMA_CS_COMP BIT(15)
+/* Operation complete status of command descriptor. */
+/* Command descriptor status - operation fail. */
+#define CDMA_CS_FAIL BIT(14)
+/* Command descriptor status - page erased. */
+#define CDMA_CS_ERP BIT(11)
+/* Command descriptor status - timeout occurred. */
+#define CDMA_CS_TOUT BIT(10)
+/*
+ * Maximum amount of correction applied to one ECC sector.
+ * It is part of command descriptor status.
+ */
+#define CDMA_CS_MAXERR GENMASK(9, 2)
+/* Command descriptor status - uncorrectable ECC error. */
+#define CDMA_CS_UNCE BIT(1)
+/* Command descriptor status - descriptor error. */
+#define CDMA_CS_ERR BIT(0)
+
+/* Status of operation - OK. */
+#define STAT_OK 0
+/* Status of operation - FAIL. */
+#define STAT_FAIL 2
+/* Status of operation - uncorrectable ECC error. */
+#define STAT_ECC_UNCORR 3
+/* Status of operation - page erased. */
+#define STAT_ERASED 5
+/* Status of operation - correctable ECC error. */
+#define STAT_ECC_CORR 6
+/* Status of operation - unsuspected state. */
+#define STAT_UNKNOWN 7
+/* Status of operation - operation is not completed yet. */
+#define STAT_BUSY 0xFF
+
+#define BCH_MAX_NUM_CORR_CAPS 8
+#define BCH_MAX_NUM_SECTOR_SIZES 2
+
+struct cadence_nand_timings {
+ u32 async_toggle_timings;
+ u32 timings0;
+ u32 timings1;
+ u32 timings2;
+ u32 dll_phy_ctrl;
+ u32 phy_ctrl;
+ u32 phy_dqs_timing;
+ u32 phy_gate_lpbk_ctrl;
+};
+
+/* Command DMA descriptor. */
+struct cadence_nand_cdma_desc {
+ /* Next descriptor address. */
+ u64 next_pointer;
+
+ /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
+ u32 flash_pointer;
+ /*field appears in HPNFC version 13*/
+ u16 bank;
+ u16 rsvd0;
+
+ /* Operation the controller needs to perform. */
+ u16 command_type;
+ u16 rsvd1;
+ /* Flags for operation of this command. */
+ u16 command_flags;
+ u16 rsvd2;
+
+ /* System/host memory address required for data DMA commands. */
+ u64 memory_pointer;
+
+ /* Status of operation. */
+ u32 status;
+ u32 rsvd3;
+
+ /* Address pointer to sync buffer location. */
+ u64 sync_flag_pointer;
+
+ /* Controls the buffer sync mechanism. */
+ u32 sync_arguments;
+ u32 rsvd4;
+
+ /* Control data pointer. */
+ u64 ctrl_data_ptr;
+};
+
+/* Interrupt status. */
+struct cadence_nand_irq_status {
+ /* Thread operation complete status. */
+ u32 trd_status;
+ /* Thread operation error. */
+ u32 trd_error;
+ /* Controller status. */
+ u32 status;
+};
+
+/* Cadence NAND flash controller capabilities get from driver data. */
+struct cadence_nand_dt_devdata {
+ /* Skew value of the output signals of the NAND Flash interface. */
+ u32 if_skew;
+ /* It informs if slave DMA interface is connected to DMA engine. */
+ unsigned int has_dma:1;
+};
+
+/* Cadence NAND flash controller capabilities read from registers. */
+struct cdns_nand_caps {
+ /* Maximum number of banks supported by hardware. */
+ u8 max_banks;
+ /* Slave and Master DMA data width in bytes (4 or 8). */
+ u8 data_dma_width;
+ /* Control Data feature supported. */
+ bool data_control_supp;
+ /* Is PHY type DLL. */
+ bool is_phy_type_dll;
+};
+
+struct cdns_nand_ctrl {
+ struct device *dev;
+ struct nand_controller controller;
+ struct cadence_nand_cdma_desc *cdma_desc;
+ /* IP capability. */
+ const struct cadence_nand_dt_devdata *caps1;
+ struct cdns_nand_caps caps2;
+ u8 ctrl_rev;
+ dma_addr_t dma_cdma_desc;
+ u8 *buf;
+ u32 buf_size;
+ u8 curr_corr_str_idx;
+
+ /* Register interface. */
+ void __iomem *reg;
+
+ struct {
+ void __iomem *virt;
+ dma_addr_t dma;
+ } io;
+
+ int irq;
+ /* Interrupts that have happened. */
+ struct cadence_nand_irq_status irq_status;
+ /* Interrupts we are waiting for. */
+ struct cadence_nand_irq_status irq_mask;
+ struct completion complete;
+ /* Protect irq_mask and irq_status. */
+ spinlock_t irq_lock;
+
+ int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
+ struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
+ struct nand_ecc_caps ecc_caps;
+
+ int curr_trans_type;
+
+ struct dma_chan *dmac;
+
+ u32 nf_clk_rate;
+ /*
+ * Estimated Board delay. The value includes the total
+ * round trip delay for the signals and is used for deciding on values
+ * associated with data read capture.
+ */
+ u32 board_delay;
+
+ struct nand_chip *selected_chip;
+
+ unsigned long assigned_cs;
+ struct list_head chips;
+};
+
+struct cdns_nand_chip {
+ struct cadence_nand_timings timings;
+ struct nand_chip chip;
+ u8 nsels;
+ struct list_head node;
+
+ /*
+ * part of oob area of NAND flash memory page.
+ * This part is available for user to read or write.
+ */
+ u32 avail_oob_size;
+
+ /* Sector size. There are few sectors per mtd->writesize */
+ u32 sector_size;
+ u32 sector_count;
+
+ /* Offset of BBM. */
+ u8 bbm_offs;
+ /* Number of bytes reserved for BBM. */
+ u8 bbm_len;
+ /* ECC strength index. */
+ u8 corr_str_idx;
+
+ u8 cs[];
+};
+
+struct ecc_info {
+ int (*calc_ecc_bytes)(int step_size, int strength);
+ int max_step_size;
+};
+
+static inline struct
+cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
+{
+ return container_of(chip, struct cdns_nand_chip, chip);
+}
+
+static inline struct
+cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
+{
+ return container_of(controller, struct cdns_nand_ctrl, controller);
+}
+
+static bool
+cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
+ u32 buf_len)
+{
+ u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
+
+ return buf && virt_addr_valid(buf) &&
+ likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
+ likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
+}
+
+static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
+ u32 reg_offset, u32 timeout_us,
+ u32 mask, bool is_clear)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
+ val, !(val & mask) == is_clear,
+ 10, timeout_us);
+
+ if (ret < 0) {
+ dev_err(cdns_ctrl->dev,
+ "Timeout while waiting for reg %x with mask %x is clear %d\n",
+ reg_offset, mask, is_clear);
+ }
+
+ return ret;
+}
+
+static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
+ bool enable)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+ if (enable)
+ reg |= ECC_CONFIG_0_ECC_EN;
+ else
+ reg &= ~ECC_CONFIG_0_ECC_EN;
+
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+ return 0;
+}
+
+static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 corr_str_idx)
+{
+ u32 reg;
+
+ if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
+ return;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+ reg &= ~ECC_CONFIG_0_CORR_STR;
+ reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+ cdns_ctrl->curr_corr_str_idx = corr_str_idx;
+}
+
+static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 strength)
+{
+ int i, corr_str_idx = -1;
+
+ for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+ if (cdns_ctrl->ecc_strengths[i] == strength) {
+ corr_str_idx = i;
+ break;
+ }
+ }
+
+ return corr_str_idx;
+}
+
+static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
+ u16 marker_value)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+ reg &= ~SKIP_BYTES_MARKER_VALUE;
+ reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
+ marker_value);
+
+ writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+
+ return 0;
+}
+
+static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 num_of_bytes,
+ u32 offset_value,
+ int enable)
+{
+ u32 reg, skip_bytes_offset;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ if (!enable) {
+ num_of_bytes = 0;
+ offset_value = 0;
+ }
+
+ reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+ reg &= ~SKIP_BYTES_NUM_OF_BYTES;
+ reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
+ num_of_bytes);
+ skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
+ offset_value);
+
+ writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+ writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
+
+ return 0;
+}
+
+/* Functions enables/disables hardware detection of erased data */
+static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
+ bool enable,
+ u8 bitflips_threshold)
+{
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+ if (enable)
+ reg |= ECC_CONFIG_0_ERASE_DET_EN;
+ else
+ reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
+
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+ writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
+}
+
+static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
+ bool bit_bus16)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
+
+ if (!bit_bus16)
+ reg &= ~COMMON_SET_DEVICE_16BIT;
+ else
+ reg |= COMMON_SET_DEVICE_16BIT;
+ writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
+
+ return 0;
+}
+
+static void
+cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
+ writel_relaxed(irq_status->trd_status,
+ cdns_ctrl->reg + TRD_COMP_INT_STATUS);
+ writel_relaxed(irq_status->trd_error,
+ cdns_ctrl->reg + TRD_ERR_INT_STATUS);
+}
+
+static void
+cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
+ irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
+ + TRD_COMP_INT_STATUS);
+ irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
+ + TRD_ERR_INT_STATUS);
+}
+
+static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ cadence_nand_read_int_status(cdns_ctrl, irq_status);
+
+ return irq_status->status || irq_status->trd_status ||
+ irq_status->trd_error;
+}
+
+static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
+ memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
+ memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
+ spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
+}
+
+/*
+ * This is the interrupt service routine. It handles all interrupts
+ * sent to this device.
+ */
+static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = dev_id;
+ struct cadence_nand_irq_status irq_status;
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&cdns_ctrl->irq_lock);
+
+ if (irq_detected(cdns_ctrl, &irq_status)) {
+ /* Handle interrupt. */
+ /* First acknowledge it. */
+ cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
+ /* Status in the device context for someone to read. */
+ cdns_ctrl->irq_status.status |= irq_status.status;
+ cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
+ cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
+ /* Notify anyone who cares that it happened. */
+ complete(&cdns_ctrl->complete);
+ /* Tell the OS that we've handled this. */
+ result = IRQ_HANDLED;
+ }
+ spin_unlock(&cdns_ctrl->irq_lock);
+
+ return result;
+}
+
+static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_mask)
+{
+ writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
+ cdns_ctrl->reg + INTR_ENABLE);
+
+ writel_relaxed(irq_mask->trd_error,
+ cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
+}
+
+static void
+cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_mask,
+ struct cadence_nand_irq_status *irq_status)
+{
+ unsigned long timeout = msecs_to_jiffies(10000);
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
+ timeout);
+
+ *irq_status = cdns_ctrl->irq_status;
+ if (time_left == 0) {
+ /* Timeout error. */
+ dev_err(cdns_ctrl->dev, "timeout occurred:\n");
+ dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
+ irq_status->status, irq_mask->status);
+ dev_err(cdns_ctrl->dev,
+ "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
+ irq_status->trd_status, irq_mask->trd_status);
+ dev_err(cdns_ctrl->dev,
+ "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
+ irq_status->trd_error, irq_mask->trd_error);
+ }
+}
+
+/* Execute generic command on NAND controller. */
+static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 chip_nr,
+ u64 mini_ctrl_cmd)
+{
+ u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
+ mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
+ mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_reset_irq(cdns_ctrl);
+
+ writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
+ writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
+
+ /* Select generic command. */
+ reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
+ /* Thread number. */
+ reg |= FIELD_PREP(CMD_REG0_TN, 0);
+
+ /* Issue command. */
+ writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+ return 0;
+}
+
+/* Wait for data on slave DMA interface. */
+static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 *out_sdma_trd,
+ u32 *out_sdma_size)
+{
+ struct cadence_nand_irq_status irq_mask, irq_status;
+
+ irq_mask.trd_status = 0;
+ irq_mask.trd_error = 0;
+ irq_mask.status = INTR_STATUS_SDMA_TRIGG
+ | INTR_STATUS_SDMA_ERR
+ | INTR_STATUS_UNSUPP_CMD;
+
+ cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+ cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+ if (irq_status.status == 0) {
+ dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
+ return -ETIMEDOUT;
+ }
+
+ if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
+ *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
+ *out_sdma_trd = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
+ *out_sdma_trd =
+ FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
+ } else {
+ dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
+ irq_status.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
+
+ cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
+
+ if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
+ cdns_ctrl->caps2.data_dma_width = 8;
+ else
+ cdns_ctrl->caps2.data_dma_width = 4;
+
+ if (reg & CTRL_FEATURES_CONTROL_DATA)
+ cdns_ctrl->caps2.data_control_supp = true;
+
+ if (reg & (CTRL_FEATURES_NVDDR_2_3
+ | CTRL_FEATURES_NVDDR))
+ cdns_ctrl->caps2.is_phy_type_dll = true;
+}
+
+/* Prepare CDMA descriptor. */
+static void
+cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
+ char nf_mem, u32 flash_ptr, char *mem_ptr,
+ char *ctrl_data_ptr, u16 ctype)
+{
+ struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
+
+ memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
+
+ /* Set fields for one descriptor. */
+ cdma_desc->flash_pointer = flash_ptr;
+ if (cdns_ctrl->ctrl_rev >= 13)
+ cdma_desc->bank = nf_mem;
+ else
+ cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
+
+ cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
+ cdma_desc->command_flags |= CDMA_CF_INT;
+
+ cdma_desc->memory_pointer = (uintptr_t)mem_ptr;
+ cdma_desc->status = 0;
+ cdma_desc->sync_flag_pointer = 0;
+ cdma_desc->sync_arguments = 0;
+
+ cdma_desc->command_type = ctype;
+ cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr;
+}
+
+static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
+ u32 desc_status)
+{
+ if (desc_status & CDMA_CS_ERP)
+ return STAT_ERASED;
+
+ if (desc_status & CDMA_CS_UNCE)
+ return STAT_ECC_UNCORR;
+
+ if (desc_status & CDMA_CS_ERR) {
+ dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
+ return STAT_FAIL;
+ }
+
+ if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
+ return STAT_ECC_CORR;
+
+ return STAT_FAIL;
+}
+
+static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
+ u8 status = STAT_BUSY;
+
+ if (desc_ptr->status & CDMA_CS_FAIL) {
+ status = cadence_nand_check_desc_error(cdns_ctrl,
+ desc_ptr->status);
+ dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
+ } else if (desc_ptr->status & CDMA_CS_COMP) {
+ /* Descriptor finished with no errors. */
+ if (desc_ptr->command_flags & CDMA_CF_CONT) {
+ dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
+ status = STAT_UNKNOWN;
+ } else {
+ /* Last descriptor. */
+ status = STAT_OK;
+ }
+ }
+
+ return status;
+}
+
+static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 thread)
+{
+ u32 reg;
+ int status;
+
+ /* Wait for thread ready. */
+ status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
+ 1000000,
+ BIT(thread), true);
+ if (status)
+ return status;
+
+ cadence_nand_reset_irq(cdns_ctrl);
+
+ writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
+ cdns_ctrl->reg + CMD_REG2);
+ writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
+
+ /* Select CDMA mode. */
+ reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
+ /* Thread number. */
+ reg |= FIELD_PREP(CMD_REG0_TN, thread);
+ /* Issue command. */
+ writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+ return 0;
+}
+
+/* Send SDMA command and wait for finish. */
+static u32
+cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 thread)
+{
+ struct cadence_nand_irq_status irq_mask, irq_status = {0};
+ int status;
+
+ irq_mask.trd_status = BIT(thread);
+ irq_mask.trd_error = BIT(thread);
+ irq_mask.status = INTR_STATUS_CDMA_TERR;
+
+ cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+
+ status = cadence_nand_cdma_send(cdns_ctrl, thread);
+ if (status)
+ return status;
+
+ cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+
+ if (irq_status.status == 0 && irq_status.trd_status == 0 &&
+ irq_status.trd_error == 0) {
+ dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
+ return -ETIMEDOUT;
+ }
+ if (irq_status.status & irq_mask.status) {
+ dev_err(cdns_ctrl->dev, "CDMA command failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * ECC size depends on configured ECC strength and on maximum supported
+ * ECC step size.
+ */
+static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
+{
+ int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
+
+ return ALIGN(nbytes, 2);
+}
+
+#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
+ static int \
+ cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
+ int strength)\
+ {\
+ return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
+ }
+
+CADENCE_NAND_CALC_ECC_BYTES(256)
+CADENCE_NAND_CALC_ECC_BYTES(512)
+CADENCE_NAND_CALC_ECC_BYTES(1024)
+CADENCE_NAND_CALC_ECC_BYTES(2048)
+CADENCE_NAND_CALC_ECC_BYTES(4096)
+
+/* Function reads BCH capabilities. */
+static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
+ int max_step_size = 0, nstrengths, i;
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
+ cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
+ cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
+ cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
+ cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
+ cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
+ cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
+ cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
+ cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
+ cdns_ctrl->ecc_stepinfos[0].stepsize =
+ FIELD_GET(BCH_CFG_2_SECT_0, reg);
+
+ cdns_ctrl->ecc_stepinfos[1].stepsize =
+ FIELD_GET(BCH_CFG_2_SECT_1, reg);
+
+ nstrengths = 0;
+ for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+ if (cdns_ctrl->ecc_strengths[i] != 0)
+ nstrengths++;
+ }
+
+ ecc_caps->nstepinfos = 0;
+ for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
+ /* ECC strengths are common for all step infos. */
+ cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
+ cdns_ctrl->ecc_stepinfos[i].strengths =
+ cdns_ctrl->ecc_strengths;
+
+ if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
+ ecc_caps->nstepinfos++;
+
+ if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
+ max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
+ }
+ ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
+
+ switch (max_step_size) {
+ case 256:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
+ break;
+ case 512:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
+ break;
+ case 1024:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
+ break;
+ case 2048:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
+ break;
+ case 4096:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
+ break;
+ default:
+ dev_err(cdns_ctrl->dev,
+ "Unsupported sector size(ecc step size) %d\n",
+ max_step_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Hardware initialization. */
+static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ int status;
+ u32 reg;
+
+ status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_INIT_COMP, false);
+ if (status)
+ return status;
+
+ reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
+ cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
+
+ dev_info(cdns_ctrl->dev,
+ "%s: cadence nand controller version reg %x\n",
+ __func__, reg);
+
+ /* Disable cache and multiplane. */
+ writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
+ writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
+
+ /* Clear all interrupts. */
+ writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
+
+ cadence_nand_get_caps(cdns_ctrl);
+ cadence_nand_read_bch_caps(cdns_ctrl);
+
+ /*
+ * Set IO width access to 8.
+ * It is because during SW device discovering width access
+ * is expected to be 8.
+ */
+ status = cadence_nand_set_access_width16(cdns_ctrl, false);
+
+ return status;
+}
+
+#define TT_MAIN_OOB_AREAS 2
+#define TT_RAW_PAGE 3
+#define TT_BBM 4
+#define TT_MAIN_OOB_AREA_EXT 5
+
+/* Prepare size of data to transfer. */
+static void
+cadence_nand_prepare_data_size(struct nand_chip *chip,
+ int transfer_type)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 sec_size = 0, offset = 0, sec_cnt = 1;
+ u32 last_sec_size = cdns_chip->sector_size;
+ u32 data_ctrl_size = 0;
+ u32 reg = 0;
+
+ if (cdns_ctrl->curr_trans_type == transfer_type)
+ return;
+
+ switch (transfer_type) {
+ case TT_MAIN_OOB_AREA_EXT:
+ sec_cnt = cdns_chip->sector_count;
+ sec_size = cdns_chip->sector_size;
+ data_ctrl_size = cdns_chip->avail_oob_size;
+ break;
+ case TT_MAIN_OOB_AREAS:
+ sec_cnt = cdns_chip->sector_count;
+ last_sec_size = cdns_chip->sector_size
+ + cdns_chip->avail_oob_size;
+ sec_size = cdns_chip->sector_size;
+ break;
+ case TT_RAW_PAGE:
+ last_sec_size = mtd->writesize + mtd->oobsize;
+ break;
+ case TT_BBM:
+ offset = mtd->writesize + cdns_chip->bbm_offs;
+ last_sec_size = 8;
+ break;
+ }
+
+ reg = 0;
+ reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
+ reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
+ writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
+
+ reg = 0;
+ reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
+ reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
+ writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
+
+ if (cdns_ctrl->caps2.data_control_supp) {
+ reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
+ reg &= ~CONTROL_DATA_CTRL_SIZE;
+ reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
+ writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
+ }
+
+ cdns_ctrl->curr_trans_type = transfer_type;
+}
+
+static int
+cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
+ int page, void *buf, void *ctrl_dat, u32 buf_size,
+ u32 ctrl_dat_size, enum dma_data_direction dir,
+ bool with_ecc)
+{
+ dma_addr_t dma_buf, dma_ctrl_dat = 0;
+ u8 thread_nr = chip_nr;
+ int status;
+ u16 ctype;
+
+ if (dir == DMA_FROM_DEVICE)
+ ctype = CDMA_CT_RD;
+ else
+ ctype = CDMA_CT_WR;
+
+ cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
+
+ dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
+ if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ return -EIO;
+ }
+
+ if (ctrl_dat && ctrl_dat_size) {
+ dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
+ ctrl_dat_size, dir);
+ if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
+ dma_unmap_single(cdns_ctrl->dev, dma_buf,
+ buf_size, dir);
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ return -EIO;
+ }
+ }
+
+ cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
+ (void *)dma_buf, (void *)dma_ctrl_dat,
+ ctype);
+
+ status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+
+ dma_unmap_single(cdns_ctrl->dev, dma_buf,
+ buf_size, dir);
+
+ if (ctrl_dat && ctrl_dat_size)
+ dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
+ ctrl_dat_size, dir);
+ if (status)
+ return status;
+
+ return cadence_nand_cdma_finish(cdns_ctrl);
+}
+
+static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_timings *t)
+{
+ writel_relaxed(t->async_toggle_timings,
+ cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
+ writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
+ writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
+ writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
+
+ writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
+ writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
+ writel_relaxed(t->phy_dqs_timing,
+ cdns_ctrl->reg + PHY_DQS_TIMING);
+ writel_relaxed(t->phy_gate_lpbk_ctrl,
+ cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
+ writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
+ cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
+ writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
+ }
+}
+
+static int cadence_nand_select_target(struct nand_chip *chip)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (chip == cdns_ctrl->selected_chip)
+ return 0;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
+
+ cadence_nand_set_ecc_strength(cdns_ctrl,
+ cdns_chip->corr_str_idx);
+
+ cadence_nand_set_erase_detection(cdns_ctrl, true,
+ chip->ecc.strength);
+
+ cdns_ctrl->curr_trans_type = -1;
+ cdns_ctrl->selected_chip = chip;
+
+ return 0;
+}
+
+static int cadence_nand_erase(struct nand_chip *chip, u32 page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ int status;
+ u8 thread_nr = cdns_chip->cs[chip->cur_cs];
+
+ cadence_nand_cdma_desc_prepare(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, NULL, NULL,
+ CDMA_CT_ERASE);
+ status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "erase operation failed\n");
+ return -EIO;
+ }
+
+ status = cadence_nand_cdma_finish(cdns_ctrl);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
+{
+ int status;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ cadence_nand_prepare_data_size(chip, TT_BBM);
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ /*
+ * Read only bad block marker from offset
+ * defined by a memory manufacturer.
+ */
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->oobsize,
+ 0, DMA_FROM_DEVICE, false);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "read BBM failed\n");
+ return -EIO;
+ }
+
+ memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
+
+ return 0;
+}
+
+static int cadence_nand_write_page(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+ u16 marker_val = 0xFFFF;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+ mtd->writesize
+ + cdns_chip->bbm_offs,
+ 1);
+
+ if (oob_required) {
+ marker_val = *(u16 *)(chip->oob_poi
+ + cdns_chip->bbm_offs);
+ } else {
+ /* Set oob data to 0xFF. */
+ memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
+ cdns_chip->avail_oob_size);
+ }
+
+ cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+ cdns_ctrl->caps2.data_control_supp) {
+ u8 *oob;
+
+ if (oob_required)
+ oob = chip->oob_poi;
+ else
+ oob = cdns_ctrl->buf + mtd->writesize;
+
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, (void *)buf, oob,
+ mtd->writesize,
+ cdns_chip->avail_oob_size,
+ DMA_TO_DEVICE, true);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "write page failed\n");
+ return -EIO;
+ }
+
+ return 0;
+ }
+
+ if (oob_required) {
+ /* Transfer the data to the oob area. */
+ memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
+ cdns_chip->avail_oob_size);
+ }
+
+ memcpy(cdns_ctrl->buf, buf, mtd->writesize);
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+
+ return cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize
+ + cdns_chip->avail_oob_size,
+ 0, DMA_TO_DEVICE, true);
+}
+
+static int cadence_nand_write_oob(struct nand_chip *chip, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
+
+ return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = cdns_ctrl->buf;
+ int oob_skip = cdns_chip->bbm_len;
+ size_t size = writesize + oobsize;
+ int i, pos, len;
+ int status = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ /*
+ * Fill the buffer with 0xff first except the full page transfer.
+ * This simplifies the logic.
+ */
+ if (!buf || !oob_required)
+ memset(tmp_buf, 0xff, size);
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ /* Arrange the buffer for syndrome payload/ecc layout. */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, buf, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(tmp_buf + writesize + oob_skip, buf,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ const u8 *oob = chip->oob_poi;
+ u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+ (cdns_chip->sector_size + chip->ecc.bytes)
+ + cdns_chip->sector_size + oob_skip;
+
+ /* BBM at the beginning of the OOB area. */
+ memcpy(tmp_buf + writesize, oob, oob_skip);
+
+ /* OOB free. */
+ memcpy(tmp_buf + oob_data_offset, oob,
+ cdns_chip->avail_oob_size);
+ oob += cdns_chip->avail_oob_size;
+
+ /* OOB ECC. */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ if (i == (ecc_steps - 1))
+ pos += cdns_chip->avail_oob_size;
+
+ len = ecc_bytes;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, oob, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(tmp_buf + writesize + oob_skip, oob,
+ len);
+ oob += len;
+ }
+ }
+ }
+
+ cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+
+ return cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize +
+ mtd->oobsize,
+ 0, DMA_TO_DEVICE, false);
+}
+
+static int cadence_nand_write_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ return cadence_nand_write_page_raw(chip, NULL, true, page);
+}
+
+static int cadence_nand_read_page(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status = 0;
+ int ecc_err_count = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+ mtd->writesize
+ + cdns_chip->bbm_offs, 1);
+
+ /*
+ * If data buffer can be accessed by DMA and data_control feature
+ * is supported then transfer data and oob directly.
+ */
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+ cdns_ctrl->caps2.data_control_supp) {
+ u8 *oob;
+
+ if (oob_required)
+ oob = chip->oob_poi;
+ else
+ oob = cdns_ctrl->buf + mtd->writesize;
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, buf, oob,
+ mtd->writesize,
+ cdns_chip->avail_oob_size,
+ DMA_FROM_DEVICE, true);
+ /* Otherwise use bounce buffer. */
+ } else {
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf,
+ NULL, mtd->writesize
+ + cdns_chip->avail_oob_size,
+ 0, DMA_FROM_DEVICE, true);
+
+ memcpy(buf, cdns_ctrl->buf, mtd->writesize);
+ if (oob_required)
+ memcpy(chip->oob_poi,
+ cdns_ctrl->buf + mtd->writesize,
+ mtd->oobsize);
+ }
+
+ switch (status) {
+ case STAT_ECC_UNCORR:
+ mtd->ecc_stats.failed++;
+ ecc_err_count++;
+ break;
+ case STAT_ECC_CORR:
+ ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
+ cdns_ctrl->cdma_desc->status);
+ mtd->ecc_stats.corrected += ecc_err_count;
+ break;
+ case STAT_ERASED:
+ case STAT_OK:
+ break;
+ default:
+ dev_err(cdns_ctrl->dev, "read page failed\n");
+ return -EIO;
+ }
+
+ if (oob_required)
+ if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
+ return -EIO;
+
+ return ecc_err_count;
+}
+
+/* Reads OOB data from the device. */
+static int cadence_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+
+ return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_read_page_raw(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int oob_skip = cdns_chip->bbm_len;
+ int writesize = mtd->writesize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = cdns_ctrl->buf;
+ int i, pos, len;
+ int status = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize
+ + mtd->oobsize,
+ 0, DMA_FROM_DEVICE, false);
+
+ switch (status) {
+ case STAT_ERASED:
+ case STAT_OK:
+ break;
+ default:
+ dev_err(cdns_ctrl->dev, "read raw page failed\n");
+ return -EIO;
+ }
+
+ /* Arrange the buffer for syndrome payload/ecc layout. */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(buf, tmp_buf + pos, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(buf, tmp_buf + writesize + oob_skip,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ u8 *oob = chip->oob_poi;
+ u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+ (cdns_chip->sector_size + chip->ecc.bytes)
+ + cdns_chip->sector_size + oob_skip;
+
+ /* OOB free. */
+ memcpy(oob, tmp_buf + oob_data_offset,
+ cdns_chip->avail_oob_size);
+
+ /* BBM at the beginning of the OOB area. */
+ memcpy(oob, tmp_buf + writesize, oob_skip);
+
+ oob += cdns_chip->avail_oob_size;
+
+ /* OOB ECC */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ len = ecc_bytes;
+
+ if (i == (ecc_steps - 1))
+ pos += cdns_chip->avail_oob_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(oob, tmp_buf + pos, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(oob, tmp_buf + writesize + oob_skip,
+ len);
+ oob += len;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int cadence_nand_read_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ return cadence_nand_read_page_raw(chip, NULL, true, page);
+}
+
+static void cadence_nand_slave_dma_transfer_finished(void *data)
+{
+ struct completion *finished = data;
+
+ complete(finished);
+}
+
+static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
+ void *buf,
+ dma_addr_t dev_dma, size_t len,
+ enum dma_data_direction dir)
+{
+ DECLARE_COMPLETION_ONSTACK(finished);
+ struct dma_chan *chan;
+ struct dma_device *dma_dev;
+ dma_addr_t src_dma, dst_dma, buf_dma;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ chan = cdns_ctrl->dmac;
+ dma_dev = chan->device;
+
+ buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
+ if (dma_mapping_error(dma_dev->dev, buf_dma)) {
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ goto err;
+ }
+
+ if (dir == DMA_FROM_DEVICE) {
+ src_dma = cdns_ctrl->io.dma;
+ dst_dma = buf_dma;
+ } else {
+ src_dma = buf_dma;
+ dst_dma = cdns_ctrl->io.dma;
+ }
+
+ tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
+ goto err_unmap;
+ }
+
+ tx->callback = cadence_nand_slave_dma_transfer_finished;
+ tx->callback_param = &finished;
+
+ cookie = dmaengine_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(cdns_ctrl->dmac);
+ wait_for_completion(&finished);
+
+ dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+err:
+ dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
+
+ return -EIO;
+}
+
+static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 *buf, int len)
+{
+ u8 thread_nr = 0;
+ u32 sdma_size;
+ int status;
+
+ /* Wait until slave DMA interface is ready to data transfer. */
+ status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+ if (status)
+ return status;
+
+ if (!cdns_ctrl->caps1->has_dma) {
+ int len_in_words = len >> 2;
+
+ /* read alingment data */
+ ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+ if (sdma_size > len) {
+ /* read rest data from slave DMA interface if any */
+ ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
+ sdma_size / 4 - len_in_words);
+ /* copy rest of data */
+ memcpy(buf + (len_in_words << 2), cdns_ctrl->buf,
+ len - (len_in_words << 2));
+ }
+ return 0;
+ }
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
+ cdns_ctrl->io.dma,
+ len, DMA_FROM_DEVICE);
+ if (status == 0)
+ return 0;
+
+ dev_warn(cdns_ctrl->dev,
+ "Slave DMA transfer failed. Try again using bounce buffer.");
+ }
+
+ /* If DMA transfer is not possible or failed then use bounce buffer. */
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+ cdns_ctrl->io.dma,
+ sdma_size, DMA_FROM_DEVICE);
+
+ if (status) {
+ dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+ return status;
+ }
+
+ memcpy(buf, cdns_ctrl->buf, len);
+
+ return 0;
+}
+
+static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
+ const u8 *buf, int len)
+{
+ u8 thread_nr = 0;
+ u32 sdma_size;
+ int status;
+
+ /* Wait until slave DMA interface is ready to data transfer. */
+ status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+ if (status)
+ return status;
+
+ if (!cdns_ctrl->caps1->has_dma) {
+ int len_in_words = len >> 2;
+
+ iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+ if (sdma_size > len) {
+ /* copy rest of data */
+ memcpy(cdns_ctrl->buf, buf + (len_in_words << 2),
+ len - (len_in_words << 2));
+ /* write all expected by nand controller data */
+ iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
+ sdma_size / 4 - len_in_words);
+ }
+
+ return 0;
+ }
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
+ cdns_ctrl->io.dma,
+ len, DMA_TO_DEVICE);
+ if (status == 0)
+ return 0;
+
+ dev_warn(cdns_ctrl->dev,
+ "Slave DMA transfer failed. Try again using bounce buffer.");
+ }
+
+ /* If DMA transfer is not possible or failed then use bounce buffer. */
+ memcpy(cdns_ctrl->buf, buf, len);
+
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+ cdns_ctrl->io.dma,
+ sdma_size, DMA_TO_DEVICE);
+
+ if (status)
+ dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+
+ return status;
+}
+
+static int cadence_nand_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ int status;
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return 0;
+
+ status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
+
+ return status;
+}
+
+static int cadence_nand_cmd_opcode(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ int ret;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_CMD);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
+ instr->ctx.cmd.opcode);
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret)
+ dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
+ instr->ctx.cmd.opcode);
+
+ return ret;
+}
+
+static int cadence_nand_cmd_address(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ unsigned int offset, naddrs;
+ u64 address = 0;
+ const u8 *addrs;
+ int ret;
+ int i;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_ADDR);
+
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < naddrs; i++)
+ address |= (u64)addrs[i] << (8 * i);
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
+ address);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
+ naddrs - 1);
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret)
+ dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
+
+ return ret;
+}
+
+static int cadence_nand_cmd_erase(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ unsigned int op_id;
+
+ if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
+ int i;
+ const struct nand_op_instr *instr = NULL;
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ u32 page = 0;
+
+ instr = &subop->instrs[1];
+ offset = nand_subop_get_addr_start_off(subop, 1);
+ naddrs = nand_subop_get_num_addr_cyc(subop, 1);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < naddrs; i++)
+ page |= (u32)addrs[i] << (8 * i);
+
+ return cadence_nand_erase(chip, page);
+ }
+
+ /*
+ * If it is not an erase operation then handle operation
+ * by calling exec_op function.
+ */
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ int ret;
+ const struct nand_operation nand_op = {
+ .cs = chip->cur_cs,
+ .instrs = &subop->instrs[op_id],
+ .ninstrs = 1};
+ ret = chip->controller->ops->exec_op(chip, &nand_op, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cadence_nand_cmd_data(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int offset, op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ int len = 0;
+ int ret;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_DATA);
+
+ if (instr->type == NAND_OP_DATA_OUT_INSTR)
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
+ GCMD_DIR_WRITE);
+
+ len = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
+ if (instr->ctx.data.force_8bit) {
+ ret = cadence_nand_force_byte_access(chip, true);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "cannot change byte access generic data cmd failed\n");
+ return ret;
+ }
+ }
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
+ return ret;
+ }
+
+ if (instr->type == NAND_OP_DATA_IN_INSTR) {
+ void *buf = instr->ctx.data.buf.in + offset;
+
+ ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
+ } else {
+ const void *buf = instr->ctx.data.buf.out + offset;
+
+ ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
+ }
+
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
+ return ret;
+ }
+
+ if (instr->ctx.data.force_8bit) {
+ ret = cadence_nand_force_byte_access(chip, false);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "cannot change byte access generic data cmd failed\n");
+ }
+ }
+
+ return ret;
+}
+
+static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ int status;
+ unsigned int op_id = 0;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr = &subop->instrs[op_id];
+ u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+
+ status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
+ timeout_us,
+ BIT(cdns_chip->cs[chip->cur_cs]),
+ false);
+ return status;
+}
+
+static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_erase,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_opcode,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_address,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_data,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_data,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
+ );
+
+static int cadence_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int status = cadence_nand_select_target(chip);
+
+ if (status)
+ return status;
+
+ return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
+ check_only);
+}
+
+static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = cdns_chip->bbm_len;
+ oobregion->length = cdns_chip->avail_oob_size
+ - cdns_chip->bbm_len;
+
+ return 0;
+}
+
+static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = cdns_chip->avail_oob_size;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
+ .free = cadence_nand_ooblayout_free,
+ .ecc = cadence_nand_ooblayout_ecc,
+};
+
+static int calc_cycl(u32 timing, u32 clock)
+{
+ if (timing == 0 || clock == 0)
+ return 0;
+
+ if ((timing % clock) > 0)
+ return timing / clock;
+ else
+ return timing / clock - 1;
+}
+
+/* Calculate max data valid window. */
+static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+ u32 board_delay_skew_min, u32 ext_mode)
+{
+ if (ext_mode == 0)
+ clk_period /= 2;
+
+ return (trp_cnt + 1) * clk_period + trhoh_min +
+ board_delay_skew_min;
+}
+
+/* Calculate data valid window. */
+static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+ u32 trea_max, u32 ext_mode)
+{
+ if (ext_mode == 0)
+ clk_period /= 2;
+
+ return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
+}
+
+static int
+cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf)
+{
+ const struct nand_sdr_timings *sdr;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct cadence_nand_timings *t = &cdns_chip->timings;
+ u32 reg;
+ u32 board_delay = cdns_ctrl->board_delay;
+ u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
+ cdns_ctrl->nf_clk_rate);
+ u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
+ u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
+ u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
+ u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
+ u32 if_skew = cdns_ctrl->caps1->if_skew;
+ u32 board_delay_skew_min = board_delay - if_skew;
+ u32 board_delay_skew_max = board_delay + if_skew;
+ u32 dqs_sampl_res, phony_dqs_mod;
+ u32 tdvw, tdvw_min, tdvw_max;
+ u32 ext_rd_mode, ext_wr_mode;
+ u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
+ u32 sampling_point;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ memset(t, 0, sizeof(*t));
+ /* Sampling point calculation. */
+
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ phony_dqs_mod = 2;
+ else
+ phony_dqs_mod = 1;
+
+ dqs_sampl_res = clk_period / phony_dqs_mod;
+
+ tdvw_min = sdr->tREA_max + board_delay_skew_max;
+ /*
+ * The idea of those calculation is to get the optimum value
+ * for tRP and tRH timings. If it is NOT possible to sample data
+ * with optimal tRP/tRH settings, the parameters will be extended.
+ * If clk_period is 50ns (the lowest value) this condition is met
+ * for asynchronous timing modes 1, 2, 3, 4 and 5.
+ * If clk_period is 20ns the condition is met only
+ * for asynchronous timing mode 5.
+ */
+ if (sdr->tRC_min <= clk_period &&
+ sdr->tRP_min <= (clk_period / 2) &&
+ sdr->tREH_min <= (clk_period / 2)) {
+ /* Performance mode. */
+ ext_rd_mode = 0;
+ tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+ sdr->tREA_max, ext_rd_mode);
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
+ board_delay_skew_min,
+ ext_rd_mode);
+ /*
+ * Check if data valid window and sampling point can be found
+ * and is not on the edge (ie. we have hold margin).
+ * If not extend the tRP timings.
+ */
+ if (tdvw > 0) {
+ if (tdvw_max <= tdvw_min ||
+ (tdvw_max % dqs_sampl_res) == 0) {
+ /*
+ * No valid sampling point so the RE pulse need
+ * to be widen widening by half clock cycle.
+ */
+ ext_rd_mode = 1;
+ }
+ } else {
+ /*
+ * There is no valid window
+ * to be able to sample data the tRP need to be widen.
+ * Very safe calculations are performed here.
+ */
+ trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ + dqs_sampl_res) / clk_period;
+ ext_rd_mode = 1;
+ }
+
+ } else {
+ /* Extended read mode. */
+ u32 trh;
+
+ ext_rd_mode = 1;
+ trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
+ trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
+ if (sdr->tREH_min >= trh)
+ trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
+ else
+ trh_cnt = calc_cycl(trh, clk_period);
+
+ tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+ sdr->tREA_max, ext_rd_mode);
+ /*
+ * Check if data valid window and sampling point can be found
+ * or if it is at the edge check if previous is valid
+ * - if not extend the tRP timings.
+ */
+ if (tdvw > 0) {
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+ sdr->tRHOH_min,
+ board_delay_skew_min,
+ ext_rd_mode);
+
+ if ((((tdvw_max / dqs_sampl_res)
+ * dqs_sampl_res) <= tdvw_min) ||
+ (((tdvw_max % dqs_sampl_res) == 0) &&
+ (((tdvw_max / dqs_sampl_res - 1)
+ * dqs_sampl_res) <= tdvw_min))) {
+ /*
+ * Data valid window width is lower than
+ * sampling resolution and do not hit any
+ * sampling point to be sure the sampling point
+ * will be found the RE low pulse width will be
+ * extended by one clock cycle.
+ */
+ trp_cnt = trp_cnt + 1;
+ }
+ } else {
+ /*
+ * There is no valid window to be able to sample data.
+ * The tRP need to be widen.
+ * Very safe calculations are performed here.
+ */
+ trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ + dqs_sampl_res) / clk_period;
+ }
+ }
+
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+ sdr->tRHOH_min,
+ board_delay_skew_min, ext_rd_mode);
+
+ if (sdr->tWC_min <= clk_period &&
+ (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
+ (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
+ ext_wr_mode = 0;
+ } else {
+ u32 twh;
+
+ ext_wr_mode = 1;
+ twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
+ if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
+ twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
+ clk_period);
+
+ twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
+ if (sdr->tWH_min >= twh)
+ twh = sdr->tWH_min;
+
+ twh_cnt = calc_cycl(twh + if_skew, clk_period);
+ }
+
+ reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
+ t->async_toggle_timings = reg;
+ dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
+
+ tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
+ tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
+ twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
+ trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
+ reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
+
+ /*
+ * If timing exceeds delay field in timing register
+ * then use maximum value.
+ */
+ if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
+ reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
+ else
+ reg |= TIMINGS0_TCCS;
+
+ reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
+ reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
+ t->timings0 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
+
+ /* The following is related to single signal so skew is not needed. */
+ trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
+ trhz_cnt = trhz_cnt + 1;
+ twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
+ /*
+ * Because of the two stage syncflop the value must be increased by 3
+ * first value is related with sync, second value is related
+ * with output if delay.
+ */
+ twb_cnt = twb_cnt + 3 + 5;
+ /*
+ * The following is related to the we edge of the random data input
+ * sequence so skew is not needed.
+ */
+ tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
+ reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
+ t->timings1 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
+
+ tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
+ if (tfeat_cnt < twb_cnt)
+ tfeat_cnt = twb_cnt;
+
+ tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
+ tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
+
+ reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
+ t->timings2 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ reg = DLL_PHY_CTRL_DLL_RST_N;
+ if (ext_wr_mode)
+ reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
+ if (ext_rd_mode)
+ reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
+
+ reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
+ reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
+ t->dll_phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
+ }
+
+ /* Sampling point calculation. */
+ if ((tdvw_max % dqs_sampl_res) > 0)
+ sampling_point = tdvw_max / dqs_sampl_res;
+ else
+ sampling_point = (tdvw_max / dqs_sampl_res - 1);
+
+ if (sampling_point * dqs_sampl_res > tdvw_min) {
+ dll_phy_dqs_timing =
+ FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
+ dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
+ phony_dqs_timing = sampling_point / phony_dqs_mod;
+
+ if ((sampling_point % 2) > 0) {
+ dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
+ if ((tdvw_max % dqs_sampl_res) == 0)
+ /*
+ * Calculation for sampling point at the edge
+ * of data and being odd number.
+ */
+ phony_dqs_timing = (tdvw_max / dqs_sampl_res)
+ / phony_dqs_mod - 1;
+
+ if (!cdns_ctrl->caps2.is_phy_type_dll)
+ phony_dqs_timing--;
+
+ } else {
+ phony_dqs_timing--;
+ }
+ rd_del_sel = phony_dqs_timing + 3;
+ } else {
+ dev_warn(cdns_ctrl->dev,
+ "ERROR : cannot find valid sampling point\n");
+ }
+
+ reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ reg |= PHY_CTRL_SDR_DQS;
+ t->phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
+ dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
+ dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
+ dll_phy_dqs_timing);
+ t->phy_dqs_timing = dll_phy_dqs_timing;
+
+ reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
+ dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
+ reg);
+ t->phy_gate_lpbk_ctrl = reg;
+
+ dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
+ PHY_DLL_MASTER_CTRL_BYPASS_MODE);
+ dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
+ }
+
+ return 0;
+}
+
+int cadence_nand_attach_chip(struct nand_chip *chip)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 max_oob_data_size;
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ret = cadence_nand_set_access_width16(cdns_ctrl, true);
+ if (ret)
+ return ret;
+ }
+
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->ecc.mode = NAND_ECC_HW;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ cdns_chip->bbm_offs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ cdns_chip->bbm_offs &= ~0x01;
+ cdns_chip->bbm_len = 2;
+ } else {
+ cdns_chip->bbm_len = 1;
+ }
+
+ ret = nand_ecc_choose_conf(chip,
+ &cdns_ctrl->ecc_caps,
+ mtd->oobsize - cdns_chip->bbm_len);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
+ return ret;
+ }
+
+ dev_dbg(cdns_ctrl->dev,
+ "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
+ chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
+
+ /* Error correction configuration. */
+ cdns_chip->sector_size = chip->ecc.size;
+ cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
+
+ cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
+
+ max_oob_data_size = MAX_OOB_SIZE_PER_SECTOR;
+
+ if (cdns_chip->avail_oob_size > max_oob_data_size)
+ cdns_chip->avail_oob_size = max_oob_data_size;
+
+ if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
+ > mtd->oobsize)
+ cdns_chip->avail_oob_size -= 4;
+
+ ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
+ if (ret < 0)
+ return -EINVAL;
+
+ cdns_chip->corr_str_idx = (u8)ret;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_set_ecc_strength(cdns_ctrl,
+ cdns_chip->corr_str_idx);
+
+ cadence_nand_set_erase_detection(cdns_ctrl, true,
+ chip->ecc.strength);
+
+ /* Override the default read operations. */
+ chip->ecc.read_page = cadence_nand_read_page;
+ chip->ecc.read_page_raw = cadence_nand_read_page_raw;
+ chip->ecc.write_page = cadence_nand_write_page;
+ chip->ecc.write_page_raw = cadence_nand_write_page_raw;
+ chip->ecc.read_oob = cadence_nand_read_oob;
+ chip->ecc.write_oob = cadence_nand_write_oob;
+ chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
+ chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
+
+ if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
+ cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
+
+ /* Is 32-bit DMA supported? */
+ ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
+ return ret;
+ }
+
+ mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
+
+ return 0;
+}
+
+static const struct nand_controller_ops cadence_nand_controller_ops = {
+ .attach_chip = cadence_nand_attach_chip,
+ .exec_op = cadence_nand_exec_op,
+ .setup_data_interface = cadence_nand_setup_data_interface,
+};
+
+static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
+ struct device_node *np)
+{
+ struct cdns_nand_chip *cdns_chip;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (nsels <= 0) {
+ dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
+ return -EINVAL;
+ }
+
+ /* Allocate the nand chip structure. */
+ cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
+ (nsels * sizeof(u8)),
+ GFP_KERNEL);
+ if (!cdns_chip) {
+ dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ cdns_chip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ /* Retrieve CS id. */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs >= cdns_ctrl->caps2.max_banks) {
+ dev_err(cdns_ctrl->dev,
+ "invalid reg value: %u (max CS = %d)\n",
+ cs, cdns_ctrl->caps2.max_banks);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
+ dev_err(cdns_ctrl->dev,
+ "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ cdns_chip->cs[i] = cs;
+ }
+
+ chip = &cdns_chip->chip;
+ chip->controller = &cdns_ctrl->controller;
+ nand_set_flash_node(chip, np);
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = cdns_ctrl->dev;
+
+ /*
+ * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+ * in the DT node, this entry will be overwritten in nand_scan_ident().
+ */
+ chip->ecc.mode = NAND_ECC_HW;
+
+ ret = nand_scan(chip, cdns_chip->nsels);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
+
+ return 0;
+}
+
+static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct cdns_nand_chip *entry, *temp;
+
+ list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
+ nand_release(&entry->chip);
+ list_del(&entry->node);
+ }
+}
+
+static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct device_node *np = cdns_ctrl->dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = cdns_ctrl->caps2.max_banks;
+ int nchips, ret;
+
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(cdns_ctrl->dev,
+ "too many NAND chips: %d (max = %d CS)\n",
+ nchips, max_cs);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ cadence_nand_chips_cleanup(cdns_ctrl);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
+{
+ /* Disable interrupts. */
+ writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
+}
+
+static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ dma_cap_mask_t mask;
+ int ret;
+
+ cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
+ sizeof(*cdns_ctrl->cdma_desc),
+ &cdns_ctrl->dma_cdma_desc,
+ GFP_KERNEL);
+ if (!cdns_ctrl->dma_cdma_desc)
+ return -ENOMEM;
+
+ cdns_ctrl->buf_size = SZ_16K;
+ cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ if (!cdns_ctrl->buf) {
+ ret = -ENOMEM;
+ goto free_buf_desc;
+ }
+
+ if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
+ IRQF_SHARED, "cadence-nand-controller",
+ cdns_ctrl)) {
+ dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
+ ret = -ENODEV;
+ goto free_buf;
+ }
+
+ spin_lock_init(&cdns_ctrl->irq_lock);
+ init_completion(&cdns_ctrl->complete);
+
+ ret = cadence_nand_hw_init(cdns_ctrl);
+ if (ret)
+ goto disable_irq;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ if (cdns_ctrl->caps1->has_dma) {
+ cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
+ if (!cdns_ctrl->dmac) {
+ dev_err(cdns_ctrl->dev,
+ "Unable to get a DMA channel\n");
+ ret = -EBUSY;
+ goto disable_irq;
+ }
+ }
+
+ nand_controller_init(&cdns_ctrl->controller);
+ INIT_LIST_HEAD(&cdns_ctrl->chips);
+
+ cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
+ cdns_ctrl->curr_corr_str_idx = 0xFF;
+
+ ret = cadence_nand_chips_init(cdns_ctrl);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
+ ret);
+ goto dma_release_chnl;
+ }
+
+ kfree(cdns_ctrl->buf);
+ cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ if (!cdns_ctrl->buf) {
+ ret = -ENOMEM;
+ goto dma_release_chnl;
+ }
+
+ return 0;
+
+dma_release_chnl:
+ if (cdns_ctrl->dmac)
+ dma_release_channel(cdns_ctrl->dmac);
+
+disable_irq:
+ cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+
+free_buf:
+ kfree(cdns_ctrl->buf);
+
+free_buf_desc:
+ dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+ cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+ return ret;
+}
+
+/* Driver exit point. */
+static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ cadence_nand_chips_cleanup(cdns_ctrl);
+ cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+ kfree(cdns_ctrl->buf);
+ dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+ cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+ if (cdns_ctrl->dmac)
+ dma_release_channel(cdns_ctrl->dmac);
+}
+
+struct cadence_nand_dt {
+ struct cdns_nand_ctrl cdns_ctrl;
+ struct clk *clk;
+};
+
+static const struct cadence_nand_dt_devdata cadence_nand_default = {
+ .if_skew = 0,
+ .has_dma = 1,
+};
+
+static const struct of_device_id cadence_nand_dt_ids[] = {
+ {
+ .compatible = "cdns,hp-nfc",
+ .data = &cadence_nand_default
+ }, {}
+};
+
+MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
+
+static int cadence_nand_dt_probe(struct platform_device *ofdev)
+{
+ struct resource *res;
+ struct cadence_nand_dt *dt;
+ struct cdns_nand_ctrl *cdns_ctrl;
+ int ret;
+ const struct of_device_id *of_id;
+ const struct cadence_nand_dt_devdata *devdata;
+ u32 val;
+
+ of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
+ if (of_id) {
+ ofdev->id_entry = of_id->data;
+ devdata = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
+ dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+
+ cdns_ctrl = &dt->cdns_ctrl;
+ cdns_ctrl->caps1 = devdata;
+
+ cdns_ctrl->dev = &ofdev->dev;
+ cdns_ctrl->irq = platform_get_irq(ofdev, 0);
+ if (cdns_ctrl->irq < 0)
+ return cdns_ctrl->irq;
+
+ dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
+
+ cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(cdns_ctrl->reg)) {
+ dev_err(&ofdev->dev, "devm_ioremap_resource res 0 failed\n");
+ return PTR_ERR(cdns_ctrl->reg);
+ }
+
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 1);
+ cdns_ctrl->io.dma = res->start;
+ cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res);
+ if (IS_ERR(cdns_ctrl->io.virt)) {
+ dev_err(cdns_ctrl->dev, "devm_ioremap_resource res 1 failed\n");
+ return PTR_ERR(cdns_ctrl->io.virt);
+ }
+
+ dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
+ if (IS_ERR(dt->clk))
+ return PTR_ERR(dt->clk);
+
+ cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
+
+ ret = of_property_read_u32(ofdev->dev.of_node,
+ "cdns,board-delay-ps", &val);
+ if (ret) {
+ val = 4830;
+ dev_info(cdns_ctrl->dev,
+ "missing cdns,board-delay-ps property, %d was set\n",
+ val);
+ }
+ cdns_ctrl->board_delay = val;
+
+ ret = cadence_nand_init(cdns_ctrl);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(ofdev, dt);
+ return 0;
+}
+
+static int cadence_nand_dt_remove(struct platform_device *ofdev)
+{
+ struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
+
+ cadence_nand_remove(&dt->cdns_ctrl);
+
+ return 0;
+}
+
+static struct platform_driver cadence_nand_dt_driver = {
+ .probe = cadence_nand_dt_probe,
+ .remove = cadence_nand_dt_remove,
+ .driver = {
+ .name = "cadence-nand-controller",
+ .of_match_table = cadence_nand_dt_ids,
+ },
+};
+
+module_platform_driver(cadence_nand_dt_driver);
+
+MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
+
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 5e14836f6bd5..8b779a899dcf 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -102,47 +102,6 @@ static int denali_dt_chip_init(struct denali_controller *denali,
return denali_chip_init(denali, dchip);
}
-/* Backward compatibility for old platforms */
-static int denali_dt_legacy_chip_init(struct denali_controller *denali)
-{
- struct denali_chip *dchip;
- int nsels, i;
-
- nsels = denali->nbanks;
-
- dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
- GFP_KERNEL);
- if (!dchip)
- return -ENOMEM;
-
- dchip->nsels = nsels;
-
- for (i = 0; i < nsels; i++)
- dchip->sels[i].bank = i;
-
- nand_set_flash_node(&dchip->chip, denali->dev->of_node);
-
- return denali_chip_init(denali, dchip);
-}
-
-/*
- * Check the DT binding.
- * The new binding expects chip subnodes in the controller node.
- * So, #address-cells = <1>; #size-cells = <0>; are required.
- * Check the #size-cells to distinguish the binding.
- */
-static bool denali_dt_is_legacy_binding(struct device_node *np)
-{
- u32 cells;
- int ret;
-
- ret = of_property_read_u32(np, "#size-cells", &cells);
- if (ret)
- return true;
-
- return cells != 0;
-}
-
static int denali_dt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -167,10 +126,8 @@ static int denali_dt_probe(struct platform_device *pdev)
denali->dev = dev;
denali->irq = platform_get_irq(pdev, 0);
- if (denali->irq < 0) {
- dev_err(dev, "no irq defined\n");
+ if (denali->irq < 0)
return denali->irq;
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "denali_reg");
denali->reg = devm_ioremap_resource(dev, res);
@@ -213,17 +170,11 @@ static int denali_dt_probe(struct platform_device *pdev)
if (ret)
goto out_disable_clk_ecc;
- if (denali_dt_is_legacy_binding(dev->of_node)) {
- ret = denali_dt_legacy_chip_init(denali);
- if (ret)
+ for_each_child_of_node(dev->of_node, np) {
+ ret = denali_dt_chip_init(denali, np);
+ if (ret) {
+ of_node_put(np);
goto out_remove_denali;
- } else {
- for_each_child_of_node(dev->of_node, np) {
- ret = denali_dt_chip_init(denali, np);
- if (ret) {
- of_node_put(np);
- goto out_remove_denali;
- }
}
}
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index 6a4626a8bf95..0b48be54ba6f 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -751,10 +751,8 @@ static int hisi_nfc_probe(struct platform_device *pdev)
mtd = nand_to_mtd(chip);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no IRQ resource defined\n");
+ if (irq < 0)
return -ENXIO;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->iobase = devm_ioremap_resource(dev, res);
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 78b31f845c50..241b58b83240 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -773,7 +773,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
- dev_err(&pdev->dev, "failed to get platform irq\n");
res = -EINVAL;
goto release_dma_chan;
}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index fc49e13d81ec..fb5abdcfb007 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2862,10 +2862,8 @@ static int marvell_nfc_probe(struct platform_device *pdev)
return PTR_ERR(nfc->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to retrieve irq\n");
+ if (irq < 0)
return irq;
- }
nfc->core_clk = devm_clk_get(&pdev->dev, "core");
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 1b82b687e5a5..9f17b5b8efbf 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -1399,10 +1399,8 @@ static int meson_nfc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no NFC IRQ resource\n");
+ if (irq < 0)
return -EINVAL;
- }
ret = meson_nfc_clk_init(nfc);
if (ret) {
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index 74595b644b7c..75f1fa3d4d35 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -527,10 +527,8 @@ static int mtk_ecc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get irq: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
if (ret) {
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 373d47d1ba4c..b8305e39ab51 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1540,7 +1540,6 @@ static int mtk_nfc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "no nfi irq resource\n");
ret = -EINVAL;
goto clk_disable;
}
diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c
index 9d49e6c845e1..ed7a4e021bf5 100644
--- a/drivers/mtd/nand/raw/mxic_nand.c
+++ b/drivers/mtd/nand/raw/mxic_nand.c
@@ -524,10 +524,8 @@ static int mxic_nfc_probe(struct platform_device *pdev)
nand_chip->controller = &nfc->controller;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to retrieve irq\n");
+ if (irq < 0)
return irq;
- }
mxic_nfc_hw_init(nfc);
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 5c2c30a7dffa..f64e3b6605c6 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -292,12 +292,16 @@ int nand_bbm_get_next_page(struct nand_chip *chip, int page)
struct mtd_info *mtd = nand_to_mtd(chip);
int last_page = ((mtd->erasesize - mtd->writesize) >>
chip->page_shift) & chip->pagemask;
+ unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
+ | NAND_BBM_LASTPAGE;
+ if (page == 0 && !(chip->options & bbm_flags))
+ return 0;
if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
return 0;
- else if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
+ if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
return 1;
- else if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
+ if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
return last_page;
return -EINVAL;
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 8ca9fad6e6ad..56654030ec7f 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -446,8 +446,10 @@ static int micron_nand_init(struct nand_chip *chip)
if (ret)
goto err_free_manuf_data;
+ chip->options |= NAND_BBM_FIRSTPAGE;
+
if (mtd->writesize == 2048)
- chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+ chip->options |= NAND_BBM_SECONDPAGE;
ondie = micron_supports_on_die_ecc(chip);
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 6ec65f48501c..ad77c112a78a 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1967,10 +1967,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case NAND_OMAP_PREFETCH_IRQ:
info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
- if (info->gpmc_irq_fifo <= 0) {
- dev_err(dev, "Error getting fifo IRQ\n");
+ if (info->gpmc_irq_fifo <= 0)
return -ENODEV;
- }
err = devm_request_irq(dev, info->gpmc_irq_fifo,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-fifo", info);
@@ -1982,10 +1980,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
}
info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
- if (info->gpmc_irq_count <= 0) {
- dev_err(dev, "Error getting IRQ count\n");
+ if (info->gpmc_irq_count <= 0)
return -ENODEV;
- }
err = devm_request_irq(dev, info->gpmc_irq_count,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-count", info);
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index e509c93737c4..058e99d0cbcf 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1129,10 +1129,8 @@ static int flctl_probe(struct platform_device *pdev)
flctl->fifo = res->start + 0x24; /* FLDTFIFO */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
"flste", flctl);
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 8cc852dc7d54..9e63800f768a 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1880,11 +1880,8 @@ static int stm32_fmc2_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(dev, "IRQ error missing or invalid\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
dev_name(dev), fmc2);
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 89773293c64d..37a4ac0dd85b 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -2071,10 +2071,8 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
return PTR_ERR(nfc->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to retrieve irq\n");
+ if (irq < 0)
return irq;
- }
nfc->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(nfc->ahb_clk)) {
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c
index 009c1da8574c..2b7cabbb680c 100644
--- a/drivers/mtd/spi-nor/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/aspeed-smc.c
@@ -320,7 +320,8 @@ static void aspeed_smc_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
mutex_unlock(&chip->controller->mutex);
}
-static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ size_t len)
{
struct aspeed_smc_chip *chip = nor->priv;
@@ -331,8 +332,8 @@ static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return 0;
}
-static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
struct aspeed_smc_chip *chip = nor->priv;
@@ -746,6 +747,15 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
return 0;
}
+static const struct spi_nor_controller_ops aspeed_smc_controller_ops = {
+ .prepare = aspeed_smc_prep,
+ .unprepare = aspeed_smc_unprep,
+ .read_reg = aspeed_smc_read_reg,
+ .write_reg = aspeed_smc_write_reg,
+ .read = aspeed_smc_read_user,
+ .write = aspeed_smc_write_user,
+};
+
static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
struct device_node *np, struct resource *r)
{
@@ -805,12 +815,7 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
nor->dev = dev;
nor->priv = chip;
spi_nor_set_flash_node(nor, child);
- nor->read = aspeed_smc_read_user;
- nor->write = aspeed_smc_write_user;
- nor->read_reg = aspeed_smc_read_reg;
- nor->write_reg = aspeed_smc_write_reg;
- nor->prepare = aspeed_smc_prep;
- nor->unprepare = aspeed_smc_unprep;
+ nor->controller_ops = &aspeed_smc_controller_ops;
ret = aspeed_smc_chip_setup_init(chip, r);
if (ret)
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 7bef63947b29..06f997247d0f 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -285,7 +285,7 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
return IRQ_HANDLED;
}
-static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
+static unsigned int cqspi_calc_rdreg(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
u32 rdreg = 0;
@@ -354,27 +354,27 @@ static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
return cqspi_wait_idle(cqspi);
}
-static int cqspi_command_read(struct spi_nor *nor,
- const u8 *txbuf, const unsigned n_tx,
- u8 *rxbuf, const unsigned n_rx)
+static int cqspi_command_read(struct spi_nor *nor, u8 opcode,
+ u8 *rxbuf, size_t n_rx)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int rdreg;
unsigned int reg;
- unsigned int read_len;
+ size_t read_len;
int status;
if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
- dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
+ dev_err(nor->dev,
+ "Invalid input argument, len %zu rxbuf 0x%p\n",
n_rx, rxbuf);
return -EINVAL;
}
- reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
- rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
+ rdreg = cqspi_calc_rdreg(nor);
writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
@@ -404,19 +404,19 @@ static int cqspi_command_read(struct spi_nor *nor,
}
static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
- const u8 *txbuf, const unsigned n_tx)
+ const u8 *txbuf, size_t n_tx)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int reg;
unsigned int data;
- u32 write_len;
+ size_t write_len;
int ret;
if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
dev_err(nor->dev,
- "Invalid input argument, cmdlen %d txbuf 0x%p\n",
+ "Invalid input argument, cmdlen %zu txbuf 0x%p\n",
n_tx, txbuf);
return -EINVAL;
}
@@ -470,7 +470,7 @@ static int cqspi_read_setup(struct spi_nor *nor)
unsigned int reg;
reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
- reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
+ reg |= cqspi_calc_rdreg(nor);
/* Setup dummy clock cycles */
dummy_clk = nor->read_dummy;
@@ -603,7 +603,7 @@ static int cqspi_write_setup(struct spi_nor *nor)
/* Set opcode. */
reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
writel(reg, reg_base + CQSPI_REG_WR_INSTR);
- reg = cqspi_calc_rdreg(nor, nor->program_opcode);
+ reg = cqspi_calc_rdreg(nor);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
reg = readl(reg_base + CQSPI_REG_SIZE);
@@ -1050,7 +1050,7 @@ static int cqspi_erase(struct spi_nor *nor, loff_t offs)
return ret;
/* Send write enable, then erase commands. */
- ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
if (ret)
return ret;
@@ -1080,18 +1080,19 @@ static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
mutex_unlock(&cqspi->bus_mutex);
}
-static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len)
{
int ret;
ret = cqspi_set_protocol(nor, 0);
if (!ret)
- ret = cqspi_command_read(nor, &opcode, 1, buf, len);
+ ret = cqspi_command_read(nor, opcode, buf, len);
return ret;
}
-static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
int ret;
@@ -1216,6 +1217,16 @@ static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
init_completion(&cqspi->rx_dma_complete);
}
+static const struct spi_nor_controller_ops cqspi_controller_ops = {
+ .prepare = cqspi_prep,
+ .unprepare = cqspi_unprep,
+ .read_reg = cqspi_read_reg,
+ .write_reg = cqspi_write_reg,
+ .read = cqspi_read,
+ .write = cqspi_write,
+ .erase = cqspi_erase,
+};
+
static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
{
struct platform_device *pdev = cqspi->pdev;
@@ -1265,14 +1276,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
nor->dev = dev;
spi_nor_set_flash_node(nor, np);
nor->priv = f_pdata;
-
- nor->read_reg = cqspi_read_reg;
- nor->write_reg = cqspi_write_reg;
- nor->read = cqspi_read;
- nor->write = cqspi_write;
- nor->erase = cqspi_erase;
- nor->prepare = cqspi_prep;
- nor->unprepare = cqspi_unprep;
+ nor->controller_ops = &cqspi_controller_ops;
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
dev_name(dev), cs);
@@ -1366,10 +1370,8 @@ static int cqspi_probe(struct platform_device *pdev)
/* Obtain IRQ line. */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Cannot obtain IRQ.\n");
+ if (irq < 0)
return -ENXIO;
- }
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
index 6dac9dd8bf42..a1258216f89d 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -177,7 +177,7 @@ static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
}
static int hisi_spi_nor_op_reg(struct spi_nor *nor,
- u8 opcode, int len, u8 optype)
+ u8 opcode, size_t len, u8 optype)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
@@ -200,7 +200,7 @@ static int hisi_spi_nor_op_reg(struct spi_nor *nor,
}
static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+ size_t len)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
@@ -215,7 +215,7 @@ static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
}
static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode,
- u8 *buf, int len)
+ const u8 *buf, size_t len)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
@@ -311,6 +311,15 @@ static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to,
return len;
}
+static const struct spi_nor_controller_ops hisi_controller_ops = {
+ .prepare = hisi_spi_nor_prep,
+ .unprepare = hisi_spi_nor_unprep,
+ .read_reg = hisi_spi_nor_read_reg,
+ .write_reg = hisi_spi_nor_write_reg,
+ .read = hisi_spi_nor_read,
+ .write = hisi_spi_nor_write,
+};
+
/**
* Get spi flash device information and register it as a mtd device.
*/
@@ -357,14 +366,8 @@ static int hisi_spi_nor_register(struct device_node *np,
}
priv->host = host;
nor->priv = priv;
+ nor->controller_ops = &hisi_controller_ops;
- nor->prepare = hisi_spi_nor_prep;
- nor->unprepare = hisi_spi_nor_unprep;
- nor->read_reg = hisi_spi_nor_read_reg;
- nor->write_reg = hisi_spi_nor_write_reg;
- nor->read = hisi_spi_nor_read;
- nor->write = hisi_spi_nor_write;
- nor->erase = NULL;
ret = spi_nor_scan(nor, NULL, &hwcaps);
if (ret)
return ret;
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index 3cda8e7a68f8..3d8987baea2a 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -20,6 +20,10 @@ static const struct intel_spi_boardinfo bxt_info = {
.type = INTEL_SPI_BXT,
};
+static const struct intel_spi_boardinfo cnl_info = {
+ .type = INTEL_SPI_CNL,
+};
+
static int intel_spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -61,6 +65,7 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
@@ -68,6 +73,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index 43e55a2e9b27..61d2a0ad2131 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -108,6 +108,10 @@
#define BXT_FREG_NUM 12
#define BXT_PR_NUM 6
+#define CNL_PR 0x84
+#define CNL_FREG_NUM 6
+#define CNL_PR_NUM 5
+
#define LVSCC 0xc4
#define UVSCC 0xc8
#define ERASE_OPCODE_SHIFT 8
@@ -187,12 +191,16 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
readl(ispi->pregs + PR(i)));
- value = readl(ispi->sregs + SSFSTS_CTL);
- dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
- dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
- readl(ispi->sregs + PREOP_OPTYPE));
- dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0));
- dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1));
+ if (ispi->sregs) {
+ value = readl(ispi->sregs + SSFSTS_CTL);
+ dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
+ dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
+ readl(ispi->sregs + PREOP_OPTYPE));
+ dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
+ readl(ispi->sregs + OPMENU0));
+ dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
+ readl(ispi->sregs + OPMENU1));
+ }
if (ispi->info->type == INTEL_SPI_BYT)
dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
@@ -340,6 +348,13 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->erase_64k = true;
break;
+ case INTEL_SPI_CNL:
+ ispi->sregs = NULL;
+ ispi->pregs = ispi->base + CNL_PR;
+ ispi->nregions = CNL_FREG_NUM;
+ ispi->pr_num = CNL_PR_NUM;
+ break;
+
default:
return -EINVAL;
}
@@ -367,6 +382,11 @@ static int intel_spi_init(struct intel_spi *ispi)
!(uvscc & ERASE_64K_OPCODE_MASK))
ispi->erase_64k = false;
+ if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) {
+ dev_err(ispi->dev, "software sequencer not supported, but required\n");
+ return -EINVAL;
+ }
+
/*
* Some controllers can only do basic operations using hardware
* sequencer. All other operations are supposed to be carried out
@@ -383,7 +403,7 @@ static int intel_spi_init(struct intel_spi *ispi)
val = readl(ispi->base + HSFSTS_CTL);
ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
- if (ispi->locked) {
+ if (ispi->locked && ispi->sregs) {
/*
* BIOS programs allowed opcodes and then locks down the
* register. So read back what opcodes it decided to support.
@@ -426,7 +446,7 @@ static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
return 0;
}
-static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len)
{
u32 val, status;
int ret;
@@ -469,7 +489,7 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
return 0;
}
-static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
int optype)
{
u32 val = 0, status;
@@ -535,7 +555,8 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
return 0;
}
-static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ size_t len)
{
struct intel_spi *ispi = nor->priv;
int ret;
@@ -555,7 +576,8 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return intel_spi_read_block(ispi, buf, len);
}
-static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
struct intel_spi *ispi = nor->priv;
int ret;
@@ -864,6 +886,14 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
}
}
+static const struct spi_nor_controller_ops intel_spi_controller_ops = {
+ .read_reg = intel_spi_read_reg,
+ .write_reg = intel_spi_write_reg,
+ .read = intel_spi_read,
+ .write = intel_spi_write,
+ .erase = intel_spi_erase,
+};
+
struct intel_spi *intel_spi_probe(struct device *dev,
struct resource *mem, const struct intel_spi_boardinfo *info)
{
@@ -897,11 +927,7 @@ struct intel_spi *intel_spi_probe(struct device *dev,
ispi->nor.dev = ispi->dev;
ispi->nor.priv = ispi;
- ispi->nor.read_reg = intel_spi_read_reg;
- ispi->nor.write_reg = intel_spi_write_reg;
- ispi->nor.read = intel_spi_read;
- ispi->nor.write = intel_spi_write;
- ispi->nor.erase = intel_spi_erase;
+ ispi->nor.controller_ops = &intel_spi_controller_ops;
ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
if (ret) {
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index 34db01ab6cab..b1691680d174 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -151,9 +151,9 @@ static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval)
}
static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
- u8 *tx, int txlen, u8 *rx, int rxlen)
+ const u8 *tx, size_t txlen, u8 *rx, size_t rxlen)
{
- int len = 1 + txlen + rxlen;
+ size_t len = 1 + txlen + rxlen;
int i, ret, idx;
if (len > MTK_NOR_MAX_SHIFT)
@@ -193,7 +193,7 @@ static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
}
/* Do a WRSR (Write Status Register) command */
-static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, u8 sr)
+static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, const u8 sr)
{
writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG);
writeb(8, mtk_nor->base + MTK_NOR_CNT_REG);
@@ -354,7 +354,7 @@ static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
return len;
}
-static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len)
{
int ret;
struct mtk_nor *mtk_nor = nor->priv;
@@ -376,8 +376,8 @@ static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return ret;
}
-static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
int ret;
struct mtk_nor *mtk_nor = nor->priv;
@@ -419,6 +419,13 @@ static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor)
return 0;
}
+static const struct spi_nor_controller_ops mtk_controller_ops = {
+ .read_reg = mtk_nor_read_reg,
+ .write_reg = mtk_nor_write_reg,
+ .read = mtk_nor_read,
+ .write = mtk_nor_write,
+};
+
static int mtk_nor_init(struct mtk_nor *mtk_nor,
struct device_node *flash_node)
{
@@ -438,12 +445,8 @@ static int mtk_nor_init(struct mtk_nor *mtk_nor,
nor->dev = mtk_nor->dev;
nor->priv = mtk_nor;
spi_nor_set_flash_node(nor, flash_node);
+ nor->controller_ops = &mtk_controller_ops;
- /* fill the hooks to spi nor */
- nor->read = mtk_nor_read;
- nor->read_reg = mtk_nor_read_reg;
- nor->write = mtk_nor_write;
- nor->write_reg = mtk_nor_write_reg;
nor->mtd.name = "mtk_nor";
/* initialized with NULL */
ret = spi_nor_scan(nor, NULL, &hwcaps);
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c
index 4a871587392b..9a5b1a7c636a 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/nxp-spifi.c
@@ -123,7 +123,8 @@ static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi)
return ret;
}
-static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ size_t len)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
@@ -145,7 +146,8 @@ static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return nxp_spifi_wait_for_cmd(spifi);
}
-static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
@@ -263,9 +265,18 @@ static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
{
u8 id[SPI_NOR_MAX_ID_LEN];
- nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+ nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+ SPI_NOR_MAX_ID_LEN);
}
+static const struct spi_nor_controller_ops nxp_spifi_controller_ops = {
+ .read_reg = nxp_spifi_read_reg,
+ .write_reg = nxp_spifi_write_reg,
+ .read = nxp_spifi_read,
+ .write = nxp_spifi_write,
+ .erase = nxp_spifi_erase,
+};
+
static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
struct device_node *np)
{
@@ -332,11 +343,7 @@ static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
spifi->nor.dev = spifi->dev;
spi_nor_set_flash_node(&spifi->nor, np);
spifi->nor.priv = spifi;
- spifi->nor.read = nxp_spifi_read;
- spifi->nor.write = nxp_spifi_write;
- spifi->nor.erase = nxp_spifi_erase;
- spifi->nor.read_reg = nxp_spifi_read_reg;
- spifi->nor.write_reg = nxp_spifi_write_reg;
+ spifi->nor.controller_ops = &nxp_spifi_controller_ops;
/*
* The first read on a hard reset isn't reliable so do a
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 7acf4a93b592..f4afe123e9dc 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -338,7 +338,7 @@ static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
if (nor->spimem)
return spi_nor_spimem_read_data(nor, from, len, buf);
- return nor->read(nor, from, len, buf);
+ return nor->controller_ops->read(nor, from, len, buf);
}
/**
@@ -385,239 +385,172 @@ static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
if (nor->spimem)
return spi_nor_spimem_write_data(nor, to, len, buf);
- return nor->write(nor, to, len, buf);
+ return nor->controller_ops->write(nor, to, len, buf);
}
-/*
- * Read the status register, returning its value in the location
- * Return the status register value.
- * Returns negative if error occurred.
+/**
+ * spi_nor_write_enable() - Set write enable latch with Write Enable command.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int read_sr(struct spi_nor *nor)
+static int spi_nor_write_enable(struct spi_nor *nor)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_NO_DATA);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->read_reg(nor, SPINOR_OP_RDSR, nor->bouncebuf, 1);
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
+ NULL, 0);
}
- if (ret < 0) {
- pr_err("error %d reading SR\n", (int) ret);
- return ret;
- }
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
- return nor->bouncebuf[0];
+ return ret;
}
-/*
- * Read the flag status register, returning its value in the location
- * Return the status register value.
- * Returns negative if error occurred.
+/**
+ * spi_nor_write_disable() - Send Write Disable instruction to the chip.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int read_fsr(struct spi_nor *nor)
+static int spi_nor_write_disable(struct spi_nor *nor)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_NO_DATA);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->read_reg(nor, SPINOR_OP_RDFSR, nor->bouncebuf, 1);
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
+ NULL, 0);
}
- if (ret < 0) {
- pr_err("error %d reading FSR\n", ret);
- return ret;
- }
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
- return nor->bouncebuf[0];
+ return ret;
}
-/*
- * Read configuration register, returning its value in the
- * location. Return the configuration register value.
- * Returns negative if error occurred.
+/**
+ * spi_nor_read_sr() - Read the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int read_cr(struct spi_nor *nor)
+static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_DATA_IN(1, sr, 1));
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->read_reg(nor, SPINOR_OP_RDCR, nor->bouncebuf, 1);
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
+ sr, 1);
}
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading CR\n", ret);
- return ret;
- }
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR\n", ret);
- return nor->bouncebuf[0];
+ return ret;
}
-/*
- * Write status register 1 byte
- * Returns negative if error occurred.
+/**
+ * spi_nor_read_fsr() - Read the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'
+ * @fsr: pointer to a DMA-able buffer where the value of the
+ * Flag Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int write_sr(struct spi_nor *nor, u8 val)
+static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
{
- nor->bouncebuf[0] = val;
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
- return spi_mem_exec_op(nor->spimem, &op);
- }
-
- return nor->write_reg(nor, SPINOR_OP_WRSR, nor->bouncebuf, 1);
-}
+ int ret;
-/*
- * Set write enable latch with Write Enable command.
- * Returns negative if error occurred.
- */
-static int write_enable(struct spi_nor *nor)
-{
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
+ SPI_MEM_OP_DATA_IN(1, fsr, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
+ fsr, 1);
}
- return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading FSR\n", ret);
+
+ return ret;
}
-/*
- * Send write disable instruction to the chip.
+/**
+ * spi_nor_read_cr() - Read the Configuration Register using the
+ * SPINOR_OP_RDCR (35h) command.
+ * @nor: pointer to 'struct spi_nor'
+ * @cr: pointer to a DMA-able buffer where the value of the
+ * Configuration Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int write_disable(struct spi_nor *nor)
+static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
+ SPI_MEM_OP_DATA_IN(1, cr, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
}
- return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
-}
-
-static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
-{
- return mtd->priv;
-}
-
-
-static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
-{
- size_t i;
-
- for (i = 0; i < size; i++)
- if (table[i][0] == opcode)
- return table[i][1];
-
- /* No conversion found, keep input op code. */
- return opcode;
-}
-
-static u8 spi_nor_convert_3to4_read(u8 opcode)
-{
- static const u8 spi_nor_3to4_read[][2] = {
- { SPINOR_OP_READ, SPINOR_OP_READ_4B },
- { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
- { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
- { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
- { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
- { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
- { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
- { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
-
- { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
- { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
- { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
- ARRAY_SIZE(spi_nor_3to4_read));
-}
-
-static u8 spi_nor_convert_3to4_program(u8 opcode)
-{
- static const u8 spi_nor_3to4_program[][2] = {
- { SPINOR_OP_PP, SPINOR_OP_PP_4B },
- { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
- { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
- { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
- { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
- ARRAY_SIZE(spi_nor_3to4_program));
-}
-
-static u8 spi_nor_convert_3to4_erase(u8 opcode)
-{
- static const u8 spi_nor_3to4_erase[][2] = {
- { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
- { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
- { SPINOR_OP_SE, SPINOR_OP_SE_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
- ARRAY_SIZE(spi_nor_3to4_erase));
-}
-
-static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
-{
- nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
- nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
- nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
-
- if (!spi_nor_has_uniform_erase(nor)) {
- struct spi_nor_erase_map *map = &nor->params.erase_map;
- struct spi_nor_erase_type *erase;
- int i;
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading CR\n", ret);
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- erase = &map->erase_type[i];
- erase->opcode =
- spi_nor_convert_3to4_erase(erase->opcode);
- }
- }
+ return ret;
}
+/**
+ * macronix_set_4byte() - Set 4-byte address mode for Macronix flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int macronix_set_4byte(struct spi_nor *nor, bool enable)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
@@ -628,26 +561,55 @@ static int macronix_set_4byte(struct spi_nor *nor, bool enable)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor,
+ enable ? SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ NULL, 0);
}
- return nor->write_reg(nor, enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B,
- NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
}
+/**
+ * st_micron_set_4byte() - Set 4-byte address mode for ST and Micron flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int st_micron_set_4byte(struct spi_nor *nor, bool enable)
{
int ret;
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
ret = macronix_set_4byte(nor, enable);
- write_disable(nor);
+ if (ret)
+ return ret;
- return ret;
+ return spi_nor_write_disable(nor);
}
+/**
+ * spansion_set_4byte() - Set 4-byte address mode for Spansion flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spansion_set_4byte(struct spi_nor *nor, bool enable)
{
+ int ret;
+
nor->bouncebuf[0] = enable << 7;
if (nor->spimem) {
@@ -657,14 +619,29 @@ static int spansion_set_4byte(struct spi_nor *nor, bool enable)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
+ nor->bouncebuf, 1);
}
- return nor->write_reg(nor, SPINOR_OP_BRWR, nor->bouncebuf, 1);
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
}
+/**
+ * spi_nor_write_ear() - Write Extended Address Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @ear: value to write to the Extended Address Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
{
+ int ret;
+
nor->bouncebuf[0] = ear;
if (nor->spimem) {
@@ -674,12 +651,26 @@ static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
+ nor->bouncebuf, 1);
}
- return nor->write_reg(nor, SPINOR_OP_WREAR, nor->bouncebuf, 1);
+ if (ret)
+ dev_dbg(nor->dev, "error %d writing EAR\n", ret);
+
+ return ret;
}
+/**
+ * winbond_set_4byte() - Set 4-byte address mode for Winbond flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int winbond_set_4byte(struct spi_nor *nor, bool enable)
{
int ret;
@@ -693,15 +684,29 @@ static int winbond_set_4byte(struct spi_nor *nor, bool enable)
* Register to be set to 1, so all 3-byte-address reads come from the
* second 16M. We must clear the register to enable normal behavior.
*/
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
ret = spi_nor_write_ear(nor, 0);
- write_disable(nor);
+ if (ret)
+ return ret;
- return ret;
+ return spi_nor_write_disable(nor);
}
+/**
+ * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
@@ -709,27 +714,44 @@ static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_IN(1, sr, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
+ sr, 1);
}
- return nor->read_reg(nor, SPINOR_OP_XRDSR, sr, 1);
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
+
+ return ret;
}
+/**
+ * s3an_sr_ready() - Query the Status Register of the S3AN flash to see if the
+ * flash is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int s3an_sr_ready(struct spi_nor *nor)
{
int ret;
ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+ if (ret)
return ret;
- }
return !!(nor->bouncebuf[0] & XSR_RDY);
}
-static int spi_nor_clear_sr(struct spi_nor *nor)
+/**
+ * spi_nor_clear_sr() - Clear the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_sr(struct spi_nor *nor)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
@@ -737,20 +759,33 @@ static int spi_nor_clear_sr(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
+ NULL, 0);
}
- return nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing SR\n", ret);
}
+/**
+ * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
+ * for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_sr_ready(struct spi_nor *nor)
{
- int sr = read_sr(nor);
- if (sr < 0)
- return sr;
+ int ret = spi_nor_read_sr(nor, nor->bouncebuf);
+
+ if (ret)
+ return ret;
- if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
- if (sr & SR_E_ERR)
+ if (nor->flags & SNOR_F_USE_CLSR &&
+ nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
+ if (nor->bouncebuf[0] & SR_E_ERR)
dev_err(nor->dev, "Erase Error occurred\n");
else
dev_err(nor->dev, "Programming Error occurred\n");
@@ -759,11 +794,17 @@ static int spi_nor_sr_ready(struct spi_nor *nor)
return -EIO;
}
- return !(sr & SR_WIP);
+ return !(nor->bouncebuf[0] & SR_WIP);
}
-static int spi_nor_clear_fsr(struct spi_nor *nor)
+/**
+ * spi_nor_clear_fsr() - Clear the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_fsr(struct spi_nor *nor)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
@@ -771,25 +812,37 @@ static int spi_nor_clear_fsr(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
+ NULL, 0);
}
- return nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
}
+/**
+ * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
+ * ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_fsr_ready(struct spi_nor *nor)
{
- int fsr = read_fsr(nor);
- if (fsr < 0)
- return fsr;
+ int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
- if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
- if (fsr & FSR_E_ERR)
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
+ if (nor->bouncebuf[0] & FSR_E_ERR)
dev_err(nor->dev, "Erase operation failed.\n");
else
dev_err(nor->dev, "Program operation failed.\n");
- if (fsr & FSR_PT_ERR)
+ if (nor->bouncebuf[0] & FSR_PT_ERR)
dev_err(nor->dev,
"Attempted to modify a protected sector.\n");
@@ -797,9 +850,15 @@ static int spi_nor_fsr_ready(struct spi_nor *nor)
return -EIO;
}
- return fsr & FSR_READY;
+ return nor->bouncebuf[0] & FSR_READY;
}
+/**
+ * spi_nor_ready() - Query the flash to see if it is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_ready(struct spi_nor *nor)
{
int sr, fsr;
@@ -816,9 +875,13 @@ static int spi_nor_ready(struct spi_nor *nor)
return sr && fsr;
}
-/*
- * Service routine to read status register until ready, or timeout occurs.
- * Returns non-zero if error.
+/**
+ * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
+ * Status Register until ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ * @timeout_jiffies: jiffies to wait until timeout.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
unsigned long timeout_jiffies)
@@ -841,24 +904,305 @@ static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
cond_resched();
}
- dev_err(nor->dev, "flash operation timed out\n");
+ dev_dbg(nor->dev, "flash operation timed out\n");
return -ETIMEDOUT;
}
+/**
+ * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
+ * flash to be ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_wait_till_ready(struct spi_nor *nor)
{
return spi_nor_wait_till_ready_with_timeout(nor,
DEFAULT_READY_WAIT_JIFFIES);
}
-/*
- * Erase the whole flash memory
+/**
+ * spi_nor_write_sr() - Write the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to DMA-able buffer to write to the Status Register.
+ * @len: number of bytes to write to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
+ sr, len);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
+ * ensure that the byte written match the received value.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+
+ nor->bouncebuf[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != sr1) {
+ dev_dbg(nor->dev, "SR1: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
+ * Status Register 2 in one shot. Ensure that the byte written in the Status
+ * Register 1 match the received value, and that the 16-bit Write did not
+ * affect what was already in the Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register 1.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 cr_written;
+
+ /* Make sure we don't overwrite the contents of Status Register 2. */
+ if (!(nor->flags & SNOR_F_NO_READ_CR)) {
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+ } else if (nor->params.quad_enable) {
+ /*
+ * If the Status Register 2 Read command (35h) is not
+ * supported, we should at least be sure we don't
+ * change the value of the SR2 Quad Enable bit.
+ *
+ * We can safely assume that when the Quad Enable method is
+ * set, the value of the QE bit is one, as a consequence of the
+ * nor->params.quad_enable() call.
+ *
+ * We can safely assume that the Quad Enable bit is present in
+ * the Status Register 2 at BIT(1). According to the JESD216
+ * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
+ * Write Status (01h) command is available just for the cases
+ * in which the QE bit is described in SR2 at BIT(1).
+ */
+ sr_cr[1] = SR2_QUAD_EN_BIT1;
+ } else {
+ sr_cr[1] = 0;
+ }
+
+ sr_cr[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ cr_written = sr_cr[1];
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr_written != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
+ * Configuration Register in one shot. Ensure that the byte written in the
+ * Configuration Register match the received value, and that the 16-bit Write
+ * did not affect what was already in the Status Register 1.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @cr: byte value to be written to the Configuration Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 sr_written;
+
+ /* Keep the current value of the Status Register 1. */
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ sr_cr[1] = cr;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ sr_written = sr_cr[0];
+
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ if (sr_written != sr_cr[0]) {
+ dev_dbg(nor->dev, "SR: Read back test failed\n");
+ return -EIO;
+ }
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
+ * the byte written match the received value without affecting other bits in the
+ * Status Register 1 and 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ if (nor->flags & SNOR_F_HAS_16BIT_SR)
+ return spi_nor_write_16bit_sr_and_check(nor, sr1);
+
+ return spi_nor_write_sr1_and_check(nor, sr1);
+}
+
+/**
+ * spi_nor_write_sr2() - Write the Status Register 2 using the
+ * SPINOR_OP_WRSR2 (3eh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
+ sr2, 1);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR2\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_read_sr2() - Read the Status Register 2 using the
+ * SPINOR_OP_RDSR2 (3fh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer where the value of the
+ * Status Register 2 will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
+ sr2, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR2\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_erase_chip() - Erase the entire flash memory.
+ * @nor: pointer to 'struct spi_nor'.
*
- * Returns 0 if successful, non-zero otherwise.
+ * Return: 0 on success, -errno otherwise.
*/
-static int erase_chip(struct spi_nor *nor)
+static int spi_nor_erase_chip(struct spi_nor *nor)
{
+ int ret;
+
dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
if (nor->spimem) {
@@ -868,10 +1212,99 @@ static int erase_chip(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
+ NULL, 0);
}
- return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d erasing chip\n", ret);
+
+ return ret;
+}
+
+static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == opcode)
+ return table[i][1];
+
+ /* No conversion found, keep input op code. */
+ return opcode;
+}
+
+static u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+ static const u8 spi_nor_3to4_read[][2] = {
+ { SPINOR_OP_READ, SPINOR_OP_READ_4B },
+ { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
+ { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+ { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+ { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+ { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
+ { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
+
+ { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
+ { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
+ { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+ ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+ static const u8 spi_nor_3to4_program[][2] = {
+ { SPINOR_OP_PP, SPINOR_OP_PP_4B },
+ { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
+ { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
+ { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+ ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+ static const u8 spi_nor_3to4_erase[][2] = {
+ { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
+ { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
+ { SPINOR_OP_SE, SPINOR_OP_SE_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+ ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
+{
+ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+ nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+ nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+
+ if (!spi_nor_has_uniform_erase(nor)) {
+ struct spi_nor_erase_map *map = &nor->params.erase_map;
+ struct spi_nor_erase_type *erase;
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ erase = &map->erase_type[i];
+ erase->opcode =
+ spi_nor_convert_3to4_erase(erase->opcode);
+ }
+ }
}
static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
@@ -880,10 +1313,9 @@ static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
mutex_lock(&nor->lock);
- if (nor->prepare) {
- ret = nor->prepare(nor, ops);
+ if (nor->controller_ops && nor->controller_ops->prepare) {
+ ret = nor->controller_ops->prepare(nor, ops);
if (ret) {
- dev_err(nor->dev, "failed in the preparation.\n");
mutex_unlock(&nor->lock);
return ret;
}
@@ -893,8 +1325,8 @@ static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
{
- if (nor->unprepare)
- nor->unprepare(nor, ops);
+ if (nor->controller_ops && nor->controller_ops->unprepare)
+ nor->controller_ops->unprepare(nor, ops);
mutex_unlock(&nor->lock);
}
@@ -935,9 +1367,6 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
addr = spi_nor_convert_addr(nor, addr);
- if (nor->erase)
- return nor->erase(nor, addr);
-
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
@@ -946,6 +1375,8 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
SPI_MEM_OP_NO_DATA);
return spi_mem_exec_op(nor->spimem, &op);
+ } else if (nor->controller_ops->erase) {
+ return nor->controller_ops->erase(nor, addr);
}
/*
@@ -957,8 +1388,8 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
addr >>= 8;
}
- return nor->write_reg(nor, nor->erase_opcode, nor->bouncebuf,
- nor->addr_width);
+ return nor->controller_ops->write_reg(nor, nor->erase_opcode,
+ nor->bouncebuf, nor->addr_width);
}
/**
@@ -1208,7 +1639,9 @@ static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
list_for_each_entry_safe(cmd, next, &erase_list, list) {
nor->erase_opcode = cmd->opcode;
while (cmd->count) {
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
ret = spi_nor_erase_sector(nor, addr);
if (ret)
@@ -1263,12 +1696,13 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
unsigned long timeout;
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
- if (erase_chip(nor)) {
- ret = -EIO;
+ ret = spi_nor_erase_chip(nor);
+ if (ret)
goto erase_err;
- }
/*
* Scale the timeout linearly with the size of the flash, with
@@ -1291,7 +1725,9 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* "sector"-at-a-time erase */
} else if (spi_nor_has_uniform_erase(nor)) {
while (len) {
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
ret = spi_nor_erase_sector(nor, addr);
if (ret)
@@ -1312,7 +1748,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
goto erase_err;
}
- write_disable(nor);
+ ret = spi_nor_write_disable(nor);
erase_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
@@ -1320,27 +1756,6 @@ erase_err:
return ret;
}
-/* Write status register and ensure bits in mask match written values */
-static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
-{
- int ret;
-
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- return ret;
-
- ret = read_sr(nor);
- if (ret < 0)
- return ret;
-
- return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
-}
-
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
uint64_t *len)
{
@@ -1433,16 +1848,18 @@ static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
struct mtd_info *mtd = &nor->mtd;
- int status_old, status_new;
+ int ret, status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- status_old = read_sr(nor);
- if (status_old < 0)
- return status_old;
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
/* If nothing in our range is unlocked, we don't need to do anything */
if (stm_is_locked_sr(nor, ofs, len, status_old))
@@ -1502,7 +1919,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) < (status_old & mask))
return -EINVAL;
- return write_sr_and_check(nor, status_new, mask);
+ return spi_nor_write_sr_and_check(nor, status_new);
}
/*
@@ -1513,16 +1930,18 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
struct mtd_info *mtd = &nor->mtd;
- int status_old, status_new;
+ int ret, status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- status_old = read_sr(nor);
- if (status_old < 0)
- return status_old;
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
/* If nothing in our range is locked, we don't need to do anything */
if (stm_is_unlocked_sr(nor, ofs, len, status_old))
@@ -1585,7 +2004,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) > (status_old & mask))
return -EINVAL;
- return write_sr_and_check(nor, status_new, mask);
+ return spi_nor_write_sr_and_check(nor, status_new);
}
/*
@@ -1597,13 +2016,13 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
*/
static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
- int status;
+ int ret;
- status = read_sr(nor);
- if (status < 0)
- return status;
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
- return stm_is_locked_sr(nor, ofs, len, status);
+ return stm_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
}
static const struct spi_nor_locking_ops stm_locking_ops = {
@@ -1657,242 +2076,59 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-/*
- * Write status Register and configuration register with 2 bytes
- * The first byte will be written to the status register, while the
- * second byte will be written to the configuration register.
- * Return negative if error occurred.
- */
-static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
-{
- int ret;
-
- write_enable(nor);
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(2, sr_cr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
- }
-
- if (ret < 0) {
- dev_err(nor->dev,
- "error while writing configuration register\n");
- return -EINVAL;
- }
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret) {
- dev_err(nor->dev,
- "timeout while writing configuration register\n");
- return ret;
- }
-
- return 0;
-}
-
-/**
- * macronix_quad_enable() - set QE bit in Status Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Status Register.
- *
- * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int macronix_quad_enable(struct spi_nor *nor)
-{
- int ret, val;
-
- val = read_sr(nor);
- if (val < 0)
- return val;
- if (val & SR_QUAD_EN_MX)
- return 0;
-
- write_enable(nor);
-
- write_sr(nor, val | SR_QUAD_EN_MX);
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- return ret;
-
- ret = read_sr(nor);
- if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
- dev_err(nor->dev, "Macronix Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
/**
- * spansion_quad_enable() - set QE bit in Configuraiton Register.
+ * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
+ * Register 1.
* @nor: pointer to a 'struct spi_nor'
*
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function is kept for legacy purpose because it has been used for a
- * long time without anybody complaining but it should be considered as
- * deprecated and maybe buggy.
- * First, this function doesn't care about the previous values of the Status
- * and Configuration Registers when it sets the QE bit (bit 1) in the
- * Configuration Register: all other bits are cleared, which may have unwanted
- * side effects like removing some block protections.
- * Secondly, it uses the Read Configuration Register (35h) instruction though
- * some very old and few memories don't support this instruction. If a pull-up
- * resistor is present on the MISO/IO1 line, we might still be able to pass the
- * "read back" test because the QSPI memory doesn't recognize the command,
- * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
+ * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
*
* Return: 0 on success, -errno otherwise.
*/
-static int spansion_quad_enable(struct spi_nor *nor)
+static int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
{
- u8 *sr_cr = nor->bouncebuf;
int ret;
- sr_cr[0] = 0;
- sr_cr[1] = CR_QUAD_EN_SPAN;
- ret = write_sr_cr(nor, sr_cr);
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
- /* read back and check it */
- ret = read_cr(nor);
- if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
- dev_err(nor->dev, "Spansion Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function should be used with QSPI memories not supporting the Read
- * Configuration Register (35h) instruction.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
-{
- u8 *sr_cr = nor->bouncebuf;
- int ret;
+ if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
+ return 0;
- /* Keep the current value of the Status Register. */
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(nor->dev, "error while reading status register\n");
- return -EINVAL;
- }
- sr_cr[0] = ret;
- sr_cr[1] = CR_QUAD_EN_SPAN;
+ nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
- return write_sr_cr(nor, sr_cr);
+ return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
}
/**
- * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function should be used with QSPI memories supporting the Read
- * Configuration Register (35h) instruction.
+ * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
+ * Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
*
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
+ * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
*
* Return: 0 on success, -errno otherwise.
*/
-static int spansion_read_cr_quad_enable(struct spi_nor *nor)
+static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
{
- struct device *dev = nor->dev;
- u8 *sr_cr = nor->bouncebuf;
int ret;
- /* Check current Quad Enable bit value. */
- ret = read_cr(nor);
- if (ret < 0) {
- dev_err(dev, "error while reading configuration register\n");
- return -EINVAL;
- }
-
- if (ret & CR_QUAD_EN_SPAN)
- return 0;
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
- sr_cr[1] = ret | CR_QUAD_EN_SPAN;
-
- /* Keep the current value of the Status Register. */
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(dev, "error while reading status register\n");
- return -EINVAL;
- }
- sr_cr[0] = ret;
-
- ret = write_sr_cr(nor, sr_cr);
+ ret = spi_nor_read_cr(nor, nor->bouncebuf);
if (ret)
return ret;
- /* Read back and check it. */
- ret = read_cr(nor);
- if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
- dev_err(nor->dev, "Spansion Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int spi_nor_write_sr2(struct spi_nor *nor, u8 *sr2)
-{
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, sr2, 1));
-
- return spi_mem_exec_op(nor->spimem, &op);
- }
-
- return nor->write_reg(nor, SPINOR_OP_WRSR2, sr2, 1);
-}
-
-static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
-{
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr2, 1));
-
- return spi_mem_exec_op(nor->spimem, &op);
- }
+ if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
+ return 0;
- return nor->read_reg(nor, SPINOR_OP_RDSR2, sr2, 1);
+ return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
}
/**
- * sr2_bit7_quad_enable() - set QE bit in Status Register 2.
+ * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
* @nor: pointer to a 'struct spi_nor'
*
* Set the Quad Enable (QE) bit in the Status Register 2.
@@ -1903,10 +2139,11 @@ static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
*
* Return: 0 on success, -errno otherwise.
*/
-static int sr2_bit7_quad_enable(struct spi_nor *nor)
+static int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
{
u8 *sr2 = nor->bouncebuf;
int ret;
+ u8 sr2_written;
/* Check current Quad Enable bit value. */
ret = spi_nor_read_sr2(nor, sr2);
@@ -1918,117 +2155,23 @@ static int sr2_bit7_quad_enable(struct spi_nor *nor)
/* Update the Quad Enable bit. */
*sr2 |= SR2_QUAD_EN_BIT7;
- write_enable(nor);
-
ret = spi_nor_write_sr2(nor, sr2);
- if (ret < 0) {
- dev_err(nor->dev, "error while writing status register 2\n");
- return -EINVAL;
- }
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret < 0) {
- dev_err(nor->dev, "timeout while writing status register 2\n");
+ if (ret)
return ret;
- }
+
+ sr2_written = *sr2;
/* Read back and check it. */
ret = spi_nor_read_sr2(nor, sr2);
- if (!(ret > 0 && (*sr2 & SR2_QUAD_EN_BIT7))) {
- dev_err(nor->dev, "SR2 Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_clear_sr_bp() - clear the Status Register Block Protection bits.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Read-modify-write function that clears the Block Protection bits from the
- * Status Register without affecting other bits.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_clear_sr_bp(struct spi_nor *nor)
-{
- int ret;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
-
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(nor->dev, "error while reading status register\n");
- return ret;
- }
-
- write_enable(nor);
-
- ret = write_sr(nor, ret & ~mask);
- if (ret) {
- dev_err(nor->dev, "write to status register failed\n");
- return ret;
- }
-
- ret = spi_nor_wait_till_ready(nor);
if (ret)
- dev_err(nor->dev, "timeout while writing status register\n");
- return ret;
-}
-
-/**
- * spi_nor_spansion_clear_sr_bp() - clear the Status Register Block Protection
- * bits on spansion flashes.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Read-modify-write function that clears the Block Protection bits from the
- * Status Register without affecting other bits. The function is tightly
- * coupled with the spansion_quad_enable() function. Both assume that the Write
- * Register with 16 bits, together with the Read Configuration Register (35h)
- * instructions are supported.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
-{
- int ret;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 *sr_cr = nor->bouncebuf;
-
- /* Check current Quad Enable bit value. */
- ret = read_cr(nor);
- if (ret < 0) {
- dev_err(nor->dev,
- "error while reading configuration register\n");
return ret;
- }
-
- /*
- * When the configuration register Quad Enable bit is one, only the
- * Write Status (01h) command with two data bytes may be used.
- */
- if (ret & CR_QUAD_EN_SPAN) {
- sr_cr[1] = ret;
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(nor->dev,
- "error while reading status register\n");
- return ret;
- }
- sr_cr[0] = ret & ~mask;
-
- ret = write_sr_cr(nor, sr_cr);
- if (ret)
- dev_err(nor->dev, "16-bit write register failed\n");
- return ret;
+ if (*sr2 != sr2_written) {
+ dev_dbg(nor->dev, "SR2: Read back test failed\n");
+ return -EIO;
}
- /*
- * If the Quad Enable bit is zero, use the Write Status (01h) command
- * with one data byte.
- */
- return spi_nor_clear_sr_bp(nor);
+ return 0;
}
/* Used when the "_ext_id" is two bytes at most */
@@ -2136,7 +2279,7 @@ static void gd25q256_default_init(struct spi_nor *nor)
* indicate the quad_enable method for this case, we need
* to set it in the default_init fixup hook.
*/
- nor->params.quad_enable = macronix_quad_enable;
+ nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
}
static struct spi_nor_fixups gd25q256_fixups = {
@@ -2179,6 +2322,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
{ "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
{ "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ) },
@@ -2267,6 +2412,10 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES)
+ .fixups = &is25lp256_fixups },
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
@@ -2482,6 +2631,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
@@ -2520,11 +2671,11 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
tmp = spi_mem_exec_op(nor->spimem, &op);
} else {
- tmp = nor->read_reg(nor, SPINOR_OP_RDID, id,
- SPI_NOR_MAX_ID_LEN);
+ tmp = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+ SPI_NOR_MAX_ID_LEN);
}
- if (tmp < 0) {
- dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
+ if (tmp) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
return ERR_PTR(tmp);
}
@@ -2544,7 +2695,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
+ ssize_t ret;
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
@@ -2583,7 +2734,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- size_t actual;
+ size_t actual = 0;
int ret;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
@@ -2592,26 +2743,28 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
nor->sst_write_second = false;
- actual = to % 2;
/* Start write from odd address. */
- if (actual) {
+ if (to % 2) {
nor->program_opcode = SPINOR_OP_BP;
/* write one byte. */
ret = spi_nor_write_data(nor, to, 1, buf);
if (ret < 0)
- goto sst_write_err;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n",
- (int)ret);
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
+ goto out;
+
+ to++;
+ actual++;
}
- to += actual;
/* Write out most of the data here. */
for (; actual < len - 1; actual += 2) {
@@ -2620,39 +2773,44 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
/* write two bytes. */
ret = spi_nor_write_data(nor, to, 2, buf + actual);
if (ret < 0)
- goto sst_write_err;
- WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
- (int)ret);
+ goto out;
+ WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
+ goto out;
to += 2;
nor->sst_write_second = true;
}
nor->sst_write_second = false;
- write_disable(nor);
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ goto out;
+
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
+ goto out;
/* Write out trailing byte if it exists. */
if (actual != len) {
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
nor->program_opcode = SPINOR_OP_BP;
ret = spi_nor_write_data(nor, to, 1, buf + actual);
if (ret < 0)
- goto sst_write_err;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n",
- (int)ret);
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
- write_disable(nor);
+ goto out;
+
actual += 1;
+
+ ret = spi_nor_write_disable(nor);
}
-sst_write_err:
+out:
*retlen += actual;
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
return ret;
@@ -2701,7 +2859,10 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
addr = spi_nor_convert_addr(nor, addr);
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto write_err;
+
ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
if (ret < 0)
goto write_err;
@@ -2722,13 +2883,21 @@ write_err:
static int spi_nor_check(struct spi_nor *nor)
{
if (!nor->dev ||
- (!nor->spimem &&
- (!nor->read || !nor->write || !nor->read_reg ||
- !nor->write_reg))) {
+ (!nor->spimem && !nor->controller_ops) ||
+ (!nor->spimem && nor->controller_ops &&
+ (!nor->controller_ops->read ||
+ !nor->controller_ops->write ||
+ !nor->controller_ops->read_reg ||
+ !nor->controller_ops->write_reg))) {
pr_err("spi-nor: please fill all the necessary fields!\n");
return -EINVAL;
}
+ if (nor->spimem && nor->controller_ops) {
+ dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -2738,10 +2907,8 @@ static int s3an_nor_setup(struct spi_nor *nor,
int ret;
ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+ if (ret)
return ret;
- }
nor->erase_opcode = SPINOR_OP_XSE;
nor->program_opcode = SPINOR_OP_XPP;
@@ -2865,7 +3032,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
*/
static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
{
- int ret;
+ ssize_t ret;
while (len) {
ret = spi_nor_read_data(nor, addr, len, buf);
@@ -3489,20 +3656,39 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
break;
case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
+ /*
+ * Writing only one byte to the Status Register has the
+ * side-effect of clearing Status Register 2.
+ */
case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
- params->quad_enable = spansion_no_read_cr_quad_enable;
+ /*
+ * Read Configuration Register (35h) instruction is not
+ * supported.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
break;
case BFPT_DWORD15_QER_SR1_BIT6:
- params->quad_enable = macronix_quad_enable;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr1_bit6_quad_enable;
break;
case BFPT_DWORD15_QER_SR2_BIT7:
- params->quad_enable = sr2_bit7_quad_enable;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr2_bit7_quad_enable;
break;
case BFPT_DWORD15_QER_SR2_BIT1:
- params->quad_enable = spansion_read_cr_quad_enable;
+ /*
+ * JESD216 rev B or later does not specify if writing only one
+ * byte to the Status Register clears or not the Status
+ * Register 2, so let's be cautious and keep the default
+ * assumption of a 16-bit Write Status (01h) command.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
break;
default:
@@ -4101,7 +4287,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
err = spi_nor_read_sfdp(nor, sizeof(header),
psize, param_headers);
if (err < 0) {
- dev_err(dev, "failed to read SFDP parameter headers\n");
+ dev_dbg(dev, "failed to read SFDP parameter headers\n");
goto exit;
}
}
@@ -4348,7 +4534,7 @@ static int spi_nor_default_setup(struct spi_nor *nor,
/* Select the (Fast) Read command. */
err = spi_nor_select_read(nor, shared_mask);
if (err) {
- dev_err(nor->dev,
+ dev_dbg(nor->dev,
"can't select read settings supported by both the SPI controller and memory.\n");
return err;
}
@@ -4356,7 +4542,7 @@ static int spi_nor_default_setup(struct spi_nor *nor,
/* Select the Page Program command. */
err = spi_nor_select_pp(nor, shared_mask);
if (err) {
- dev_err(nor->dev,
+ dev_dbg(nor->dev,
"can't select write settings supported by both the SPI controller and memory.\n");
return err;
}
@@ -4364,7 +4550,7 @@ static int spi_nor_default_setup(struct spi_nor *nor,
/* Select the Sector Erase command. */
err = spi_nor_select_erase(nor);
if (err) {
- dev_err(nor->dev,
+ dev_dbg(nor->dev,
"can't select erase settings supported by both the SPI controller and memory.\n");
return err;
}
@@ -4381,12 +4567,32 @@ static int spi_nor_setup(struct spi_nor *nor,
return nor->params.setup(nor, hwcaps);
}
+static void atmel_set_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static void intel_set_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static void issi_set_default_init(struct spi_nor *nor)
+{
+ nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
static void macronix_set_default_init(struct spi_nor *nor)
{
- nor->params.quad_enable = macronix_quad_enable;
+ nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
nor->params.set_4byte = macronix_set_4byte;
}
+static void sst_set_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
static void st_micron_set_default_init(struct spi_nor *nor)
{
nor->flags |= SNOR_F_HAS_LOCK;
@@ -4408,6 +4614,18 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
{
/* Init flash parameters based on MFR */
switch (JEDEC_MFR(nor->info)) {
+ case SNOR_MFR_ATMEL:
+ atmel_set_default_init(nor);
+ break;
+
+ case SNOR_MFR_INTEL:
+ intel_set_default_init(nor);
+ break;
+
+ case SNOR_MFR_ISSI:
+ issi_set_default_init(nor);
+ break;
+
case SNOR_MFR_MACRONIX:
macronix_set_default_init(nor);
break;
@@ -4417,6 +4635,10 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
st_micron_set_default_init(nor);
break;
+ case SNOR_MFR_SST:
+ sst_set_default_init(nor);
+ break;
+
case SNOR_MFR_WINBOND:
winbond_set_default_init(nor);
break;
@@ -4465,9 +4687,11 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
u8 i, erase_mask;
/* Initialize legacy flash parameters and settings. */
- params->quad_enable = spansion_quad_enable;
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
params->set_4byte = spansion_set_4byte;
params->setup = spi_nor_default_setup;
+ /* Default to 16-bit Write Status (01h) Command */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
/* Set SPI NOR sizes. */
params->size = (u64)info->sector_size * info->n_sectors;
@@ -4675,25 +4899,36 @@ static int spi_nor_quad_enable(struct spi_nor *nor)
return nor->params.quad_enable(nor);
}
+/**
+ * spi_nor_unlock_all() - Unlocks the entire flash memory array.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * Some SPI NOR flashes are write protected by default after a power-on reset
+ * cycle, in order to avoid inadvertent writes during power-up. Backward
+ * compatibility imposes to unlock the entire flash memory array at power-up
+ * by default.
+ */
+static int spi_nor_unlock_all(struct spi_nor *nor)
+{
+ if (nor->flags & SNOR_F_HAS_LOCK)
+ return spi_nor_unlock(&nor->mtd, 0, nor->params.size);
+
+ return 0;
+}
+
static int spi_nor_init(struct spi_nor *nor)
{
int err;
- if (nor->clear_sr_bp) {
- if (nor->params.quad_enable == spansion_quad_enable)
- nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
-
- err = nor->clear_sr_bp(nor);
- if (err) {
- dev_err(nor->dev,
- "fail to clear block protection bits\n");
- return err;
- }
+ err = spi_nor_quad_enable(nor);
+ if (err) {
+ dev_dbg(nor->dev, "quad mode not supported\n");
+ return err;
}
- err = spi_nor_quad_enable(nor);
+ err = spi_nor_unlock_all(nor);
if (err) {
- dev_err(nor->dev, "quad mode not supported\n");
+ dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
return err;
}
@@ -4761,7 +4996,7 @@ static int spi_nor_set_addr_width(struct spi_nor *nor)
}
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_err(nor->dev, "address width is too large: %u\n",
+ dev_dbg(nor->dev, "address width is too large: %u\n",
nor->addr_width);
return -EINVAL;
}
@@ -4879,16 +5114,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & SPI_NOR_HAS_LOCK)
nor->flags |= SNOR_F_HAS_LOCK;
- /*
- * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
- * with the software protection bits set.
- */
- if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
- JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
- JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
- nor->info->flags & SPI_NOR_HAS_LOCK)
- nor->clear_sr_bp = spi_nor_clear_sr_bp;
-
/* Init flash parameters based on flash_info struct and SFDP */
spi_nor_init_params(nor);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index a1dff92ceedf..0f847d510950 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -509,11 +509,9 @@ static const struct file_operations eraseblk_count_fops = {
*/
int ubi_debugfs_init_dev(struct ubi_device *ubi)
{
- int err, n;
unsigned long ubi_num = ubi->ubi_num;
- const char *fname;
- struct dentry *dent;
struct ubi_debug_info *d = &ubi->dbg;
+ int n;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
@@ -522,95 +520,52 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
ubi->ubi_num);
if (n == UBI_DFS_DIR_LEN) {
/* The array size is too small */
- fname = UBI_DFS_DIR_NAME;
- dent = ERR_PTR(-EINVAL);
- goto out;
+ return -EINVAL;
}
- fname = d->dfs_dir_name;
- dent = debugfs_create_dir(fname, dfs_rootdir);
- if (IS_ERR_OR_NULL(dent))
- goto out;
- d->dfs_dir = dent;
-
- fname = "chk_gen";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_chk_gen = dent;
-
- fname = "chk_io";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_chk_io = dent;
-
- fname = "chk_fastmap";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_chk_fastmap = dent;
-
- fname = "tst_disable_bgt";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_disable_bgt = dent;
-
- fname = "tst_emulate_bitflips";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_emulate_bitflips = dent;
-
- fname = "tst_emulate_io_failures";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_emulate_io_failures = dent;
-
- fname = "tst_emulate_power_cut";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_emulate_power_cut = dent;
-
- fname = "tst_emulate_power_cut_min";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_power_cut_min = dent;
-
- fname = "tst_emulate_power_cut_max";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_power_cut_max = dent;
-
- fname = "detailed_erase_block_info";
- dent = debugfs_create_file(fname, S_IRUSR, d->dfs_dir, (void *)ubi_num,
- &eraseblk_count_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
+ d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
- return 0;
+ d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
-out_remove:
- debugfs_remove_recursive(d->dfs_dir);
-out:
- err = dent ? PTR_ERR(dent) : -ENODEV;
- ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n",
- fname, err);
- return err;
+ d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR,
+ d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR,
+ d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
+ (void *)ubi_num, &eraseblk_count_fops);
+
+ return 0;
}
/**
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 6ac4c35f247a..38ea5e600fb8 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -107,7 +107,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->is_peripheral = false;
- platform_set_drvdata(pdev, mcan_class->dev);
+ platform_set_drvdata(pdev, mcan_class->net);
m_can_init_ram(mcan_class);
@@ -166,8 +166,6 @@ static int __maybe_unused m_can_runtime_resume(struct device *dev)
if (err)
clk_disable_unprepare(mcan_class->hclk);
- m_can_class_resume(dev);
-
return err;
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a9c386b63581..4bb30761abfc 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3636,6 +3636,11 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev);
@@ -3643,15 +3648,17 @@ fec_drv_remove(struct platform_device *pdev)
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(fep->clk_ahb);
- clk_disable_unprepare(fep->clk_ipg);
+
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
free_netdev(ndev);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 0a9a7ee2a866..f4889431f9b7 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -393,12 +393,13 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
u64 iov_offset, u64 iov_len)
{
+ u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
+ u64 first_page = iov_offset / PAGE_SIZE;
dma_addr_t dma;
- u64 addr;
+ u64 page;
- for (addr = iov_offset; addr < iov_offset + iov_len;
- addr += PAGE_SIZE) {
- dma = page_buses[addr / PAGE_SIZE];
+ for (page = first_page; page <= last_page; page++) {
+ dma = page_buses[page];
dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 59b824347ba4..615cde1cbf0b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -166,7 +166,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1
#define HCLGE_IMP_RESET_BIT 2
-#define HCLGE_RESET_INT_M GENMASK(2, 0)
+#define HCLGE_RESET_INT_M GENMASK(7, 5)
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index f59d9a8e35e2..0686ded7ad3a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2878,10 +2878,15 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
- struct irq_desc *desc = irq_to_desc(scrq->irq);
- struct irq_chip *chip = irq_desc_get_chip(desc);
+ u64 val = (0xff000000) | scrq->hw_irq;
- chip->irq_eoi(&desc->irq_data);
+ rc = plpar_hcall_norets(H_EOI, val);
+ /* H_EOI would fail with rc = H_FUNCTION when running
+ * in XIVE mode which is expected, but not an error.
+ */
+ if (rc && (rc != H_FUNCTION))
+ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
+ val, rc);
}
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index d8313e2ee600..a1202e53710c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1745,6 +1745,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
+ cmd->data = MAX_NUM_OF_FS_RULES;
while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
err = mlx4_en_get_flow(dev, cmd, i);
if (!err)
@@ -1811,6 +1812,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
+ int total_tx_count;
int port_up = 0;
int xdp_count;
int err = 0;
@@ -1825,13 +1827,12 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
- if (channel->tx_count * priv->prof->num_up + xdp_count >
- priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) {
+ total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count;
+ if (total_tx_count > MAX_TX_RINGS) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
- channel->tx_count * priv->prof->num_up + xdp_count,
- MAX_TX_RINGS);
+ total_tx_count, MAX_TX_RINGS);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 40ec5acf79c0..70fd246840e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -91,6 +91,7 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
+ int total_count;
int port_up = 0;
int err = 0;
@@ -104,6 +105,14 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
MLX4_EN_NUM_UP_HIGH;
new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
new_prof.num_up;
+ total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP];
+ if (total_count > MAX_TX_RINGS) {
+ err = -EINVAL;
+ en_err(priv,
+ "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
+ total_count, MAX_TX_RINGS);
+ goto out;
+ }
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 13af72556987..745ab6cd7c30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -239,12 +239,15 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv4_encap_size, max_encap_size);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto out;
+ }
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
@@ -355,12 +358,15 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv6_encap_size, max_encap_size);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto out;
+ }
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 327c93a7bd55..95601269fa2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -708,9 +708,9 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap,
- u8 connector_type)
+ u8 connector_type, bool ext)
{
- if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
+ if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@@ -842,9 +842,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_OTHER] = PORT_OTHER,
};
-static u8 get_connector_port(u32 eth_proto, u8 connector_type)
+static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
{
- if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
+ if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
return ptys2connector_type[connector_type];
if (eth_proto &
@@ -945,9 +945,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
link_ksettings->base.port = get_connector_port(eth_proto_oper,
- connector_type);
+ connector_type, ext);
ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
- connector_type);
+ connector_type, ext);
get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 772bfdbdeb9c..2a56e66f58d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4252,9 +4252,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
switch (proto) {
case IPPROTO_GRE:
+ return features;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
- return features;
+ if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))
+ return features;
+ break;
case IPPROTO_UDP:
udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fda0b37075e8..f90a9f8e0fc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -3268,7 +3268,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
+ if (encap) {
+ parse_attr->mirred_ifindex[attr->out_count] =
+ out_dev->ifindex;
+ parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
+ if (!parse_attr->tun_info[attr->out_count])
+ return -ENOMEM;
+ encap = false;
+ attr->dests[attr->out_count].flags |=
+ MLX5_ESW_DEST_ENCAP;
+ attr->out_count++;
+ /* attr->dests[].rep is resolved when we
+ * handle encap
+ */
+ } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper;
@@ -3310,19 +3323,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->dests[attr->out_count].rep = rpriv->rep;
attr->dests[attr->out_count].mdev = out_priv->mdev;
attr->out_count++;
- } else if (encap) {
- parse_attr->mirred_ifindex[attr->out_count] =
- out_dev->ifindex;
- parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
- if (!parse_attr->tun_info[attr->out_count])
- return -ENOMEM;
- encap = false;
- attr->dests[attr->out_count].flags |=
- MLX5_ESW_DEST_ENCAP;
- attr->out_count++;
- /* attr->dests[].rep is resolved when we
- * handle encap
- */
} else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
@@ -4000,9 +4000,8 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
struct netlink_ext_ack *extack = ma->common.extack;
- int prio = TC_H_MAJ(ma->common.prio) >> 16;
- if (prio != 1) {
+ if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 30aae76b6a1d..60fddf8afc99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -2117,7 +2117,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
unlock:
mutex_unlock(&esw->state_lock);
- return 0;
+ return err;
}
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 3bbb49354829..791e14ac26f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -579,7 +579,7 @@ static void del_sw_flow_group(struct fs_node *node)
rhashtable_destroy(&fg->ftes_hash);
ida_destroy(&fg->fte_allocator);
- if (ft->autogroup.active)
+ if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash,
&fg->hash,
@@ -1126,6 +1126,8 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups;
+ /* We save place for flow groups in addition to max types */
+ ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
return ft;
}
@@ -1328,8 +1330,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
return ERR_PTR(-ENOENT);
if (ft->autogroup.num_groups < ft->autogroup.required_groups)
- /* We save place for flow groups in addition to max types */
- group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
+ group_size = ft->autogroup.group_size;
/* ft->max_fte == ft->autogroup.max_types */
if (group_size == 0)
@@ -1356,7 +1357,8 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
if (IS_ERR(fg))
goto out;
- ft->autogroup.num_groups++;
+ if (group_size == ft->autogroup.group_size)
+ ft->autogroup.num_groups++;
out:
return fg;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 00717eba2256..c2621b911563 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -162,6 +162,7 @@ struct mlx5_flow_table {
struct {
bool active;
unsigned int required_groups;
+ unsigned int group_size;
unsigned int num_groups;
} autogroup;
/* Protect fwd_rules */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index e47dd7c1b909..50ab88d80033 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1566,6 +1566,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
+ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 5dcb8baf491a..bd1699e62142 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -595,6 +595,18 @@ static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
}
}
+static u16 dr_get_bits_per_mask(u16 byte_mask)
+{
+ u16 bits = 0;
+
+ while (byte_mask) {
+ byte_mask = byte_mask & (byte_mask - 1);
+ bits++;
+ }
+
+ return bits;
+}
+
static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn)
@@ -607,6 +619,9 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
if (!ctrl->may_grow)
return false;
+ if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
+ return false;
+
if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
(ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 5df8436b2ae3..51803eef13dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -700,6 +700,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
unsigned int irqn;
void *cqc, *in;
__be64 *pas;
+ int vector;
u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -728,7 +729,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+ vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
+ err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
if (err) {
kvfree(in);
goto err_cqwq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 4efe1b0be4a8..3cbf74b44d1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -560,18 +560,6 @@ bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
return !refcount_read(&ste->refcount);
}
-static u16 get_bits_per_mask(u16 byte_mask)
-{
- u16 bits = 0;
-
- while (byte_mask) {
- byte_mask = byte_mask & (byte_mask - 1);
- bits++;
- }
-
- return bits;
-}
-
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
@@ -620,20 +608,12 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
- u32 bits_in_mask;
u8 next_lu_type;
u16 byte_mask;
next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
- /* Don't allocate table more than required,
- * the size of the table defined via the byte_mask, so no need
- * to allocate more than that.
- */
- bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE;
- log_table_size = min(log_table_size, bits_in_mask);
-
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size,
next_lu_type,
@@ -671,7 +651,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
htbl->ctrl.may_grow = true;
- if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1)
+ if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
htbl->ctrl.may_grow = false;
/* Threshold is 50%, one is added to table of size 1 */
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 67990406cba2..29e95d0a6ad1 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -66,6 +66,8 @@ retry:
return err;
if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
+ fsm_state_err = min_t(enum mlxfw_fsm_state_err,
+ fsm_state_err, MLXFW_FSM_STATE_ERR_MAX);
pr_err("Firmware flash failed: %s\n",
mlxfw_fsm_state_err_str[fsm_state_err]);
NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index a330b369e899..39d600c8b92d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -994,7 +994,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
if (d)
return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else
- return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
+ return RT_TABLE_MAIN;
}
static struct mlxsw_sp_rif *
@@ -1598,27 +1598,10 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_ipip_entry *ipip_entry =
mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
- enum mlxsw_sp_l3proto ul_proto;
- union mlxsw_sp_l3addr saddr;
- u32 ul_tb_id;
if (!ipip_entry)
return 0;
- /* For flat configuration cases, moving overlay to a different VRF might
- * cause local address conflict, and the conflicting tunnels need to be
- * demoted.
- */
- ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
- ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
- saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
- if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
- saddr, ul_tb_id,
- ipip_entry)) {
- mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
- return 0;
- }
-
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
true, false, false, extack);
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5ecf61df78bd..baac016f3ec0 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -363,7 +363,7 @@ qcaspi_receive(struct qcaspi *qca)
netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
available);
- if (available > QCASPI_HW_BUF_LEN) {
+ if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) {
/* This could only happen by interferences on the SPI line.
* So retry later ...
*/
@@ -496,7 +496,6 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
u16 signature = 0;
u16 spi_config;
u16 wrbuf_space = 0;
- static u16 reset_count;
if (event == QCASPI_EVENT_CPUON) {
/* Read signature twice, if not valid
@@ -549,13 +548,13 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
qca->sync = QCASPI_SYNC_RESET;
qca->stats.trig_reset++;
- reset_count = 0;
+ qca->reset_count = 0;
break;
case QCASPI_SYNC_RESET:
- reset_count++;
+ qca->reset_count++;
netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n",
- reset_count);
- if (reset_count >= QCASPI_RESET_TIMEOUT) {
+ qca->reset_count);
+ if (qca->reset_count >= QCASPI_RESET_TIMEOUT) {
/* reset did not seem to take place, try again */
qca->sync = QCASPI_SYNC_UNKNOWN;
qca->stats.reset_timeout++;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index eb9af45fcc5e..d13a67e20d65 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -94,6 +94,7 @@ struct qcaspi {
unsigned int intr_req;
unsigned int intr_svc;
+ u16 reset_count;
#ifdef CONFIG_DEBUG_FS
struct dentry *device_root;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index c4e961ea44d5..c33c438850cc 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -7179,8 +7179,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
}
- /* RTL8168e-vl has a HW issue with TSO */
- if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ /* RTL8168e-vl and one RTL8168c variant are known to have a
+ * HW issue with TSO.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_22) {
dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 02ed6d1b716c..af15a737c675 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1531,7 +1531,8 @@ void efx_ptp_remove(struct efx_nic *efx)
(void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work);
- cancel_work_sync(&efx->ptp_data->pps_work);
+ if (efx->ptp_data->pps_workwq)
+ cancel_work_sync(&efx->ptp_data->pps_work);
skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 670ef682f268..fb547f37af1e 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -609,7 +609,8 @@ struct nvsp_5_send_indirect_table {
/* The number of entries in the send indirection table */
u32 count;
- /* The offset of the send indirection table from top of this struct.
+ /* The offset of the send indirection table from the beginning of
+ * struct nvsp_message.
* The send indirection table tells which channel to put the send
* traffic on. Each entry is a channel number.
*/
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d22a36fc7a7c..eab83e71567a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1178,20 +1178,39 @@ static int netvsc_receive(struct net_device *ndev,
}
static void netvsc_send_table(struct net_device *ndev,
- const struct nvsp_message *nvmsg)
+ struct netvsc_device *nvscdev,
+ const struct nvsp_message *nvmsg,
+ u32 msglen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- u32 count, *tab;
+ u32 count, offset, *tab;
int i;
count = nvmsg->msg.v5_msg.send_table.count;
+ offset = nvmsg->msg.v5_msg.send_table.offset;
+
if (count != VRSS_SEND_TAB_SIZE) {
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
return;
}
- tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
- nvmsg->msg.v5_msg.send_table.offset);
+ /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
+ * wrong due to a host bug. So fix the offset here.
+ */
+ if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
+ msglen >= sizeof(struct nvsp_message_header) +
+ sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
+ offset = sizeof(struct nvsp_message_header) +
+ sizeof(union nvsp_6_message_uber);
+
+ /* Boundary check for all versions */
+ if (offset > msglen - count * sizeof(u32)) {
+ netdev_err(ndev, "Received send-table offset too big:%u\n",
+ offset);
+ return;
+ }
+
+ tab = (void *)nvmsg + offset;
for (i = 0; i < count; i++)
net_device_ctx->tx_table[i] = tab[i];
@@ -1209,12 +1228,14 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc ? "added" : "removed");
}
-static void netvsc_receive_inband(struct net_device *ndev,
- const struct nvsp_message *nvmsg)
+static void netvsc_receive_inband(struct net_device *ndev,
+ struct netvsc_device *nvscdev,
+ const struct nvsp_message *nvmsg,
+ u32 msglen)
{
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
- netvsc_send_table(ndev, nvmsg);
+ netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
@@ -1232,6 +1253,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
{
struct vmbus_channel *channel = nvchan->channel;
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
+ u32 msglen = hv_pkt_datalen(desc);
trace_nvsp_recv(ndev, channel, nvmsg);
@@ -1247,7 +1269,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
break;
case VM_PKT_DATA_INBAND:
- netvsc_receive_inband(ndev, nvmsg);
+ netvsc_receive_inband(ndev, net_device, nvmsg, msglen);
break;
default:
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 58d6504495e0..f798de3276dc 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -145,8 +145,11 @@ err_out_free_mdiobus:
static int sun4i_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct sun4i_mdio_data *data = bus->priv;
mdiobus_unregister(bus);
+ if (data->regulator)
+ regulator_disable(data->regulator);
mdiobus_free(bus);
return 0;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 35876562e32a..dbacb0031877 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -65,7 +65,7 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev)
reset = devm_reset_control_get_exclusive(&mdiodev->dev,
"phy");
if (IS_ERR(reset)) {
- if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOSYS)
+ if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP)
reset = NULL;
else
return PTR_ERR(reset);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index a578f7ebf715..536236fdb232 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -601,6 +601,8 @@ static int phylink_register_sfp(struct phylink *pl,
* Create a new phylink instance, and parse the link parameters found in @np.
* This will parse in-band modes, fixed-link or SFP configuration.
*
+ * Note: the rtnl lock must not be held when calling this function.
+ *
* Returns a pointer to a &struct phylink, or an error-pointer value. Users
* must use IS_ERR() to check for errors from this function.
*/
@@ -678,6 +680,8 @@ EXPORT_SYMBOL_GPL(phylink_create);
*
* Destroy a phylink instance. Any PHY that has been attached must have been
* cleaned up via phylink_disconnect_phy() prior to calling this function.
+ *
+ * Note: the rtnl lock must not be held when calling this function.
*/
void phylink_destroy(struct phylink *pl)
{
@@ -1254,7 +1258,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
pl->link_config.duplex = our_kset.base.duplex;
pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
- if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
+ /* If we have a PHY, phylib will call our link state function if the
+ * mode has changed, which will trigger a resolve and update the MAC
+ * configuration. For a fixed link, this isn't able to change any
+ * parameters, which just leaves inband mode.
+ */
+ if (pl->link_an_mode == MLO_AN_INBAND &&
+ !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
phylink_mac_config(pl, &pl->link_config);
phylink_mac_an_restart(pl);
}
@@ -1334,15 +1344,16 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
if (pause->tx_pause)
config->pause |= MLO_PAUSE_TX;
- if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
+ /* If we have a PHY, phylib will call our link state function if the
+ * mode has changed, which will trigger a resolve and update the MAC
+ * configuration.
+ */
+ if (pl->phydev) {
+ phy_set_asym_pause(pl->phydev, pause->rx_pause,
+ pause->tx_pause);
+ } else if (!test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state)) {
switch (pl->link_an_mode) {
- case MLO_AN_PHY:
- /* Silently mark the carrier down, and then trigger a resolve */
- if (pl->netdev)
- netif_carrier_off(pl->netdev);
- phylink_run_resolve(pl);
- break;
-
case MLO_AN_FIXED:
/* Should we allow fixed links to change against the config? */
phylink_resolve_flow(pl, config);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d4a95b50bda6..b2507c59ba8b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4283,10 +4283,10 @@ static int rtl8152_close(struct net_device *netdev)
unregister_pm_notifier(&tp->pm_notifier);
#endif
tasklet_disable(&tp->tx_tl);
- napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
+ napi_disable(&tp->napi);
netif_stop_queue(netdev);
res = usb_autopm_get_interface(tp->intf);
@@ -4552,10 +4552,10 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
netif_stop_queue(netdev);
tasklet_disable(&tp->tx_tl);
- napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
+ napi_disable(&tp->napi);
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
tp->rtl_ops.disable(tp);
@@ -4673,7 +4673,7 @@ static int rtl8152_system_resume(struct r8152 *tp)
netif_device_attach(netdev);
- if (netif_running(netdev) && netdev->flags & IFF_UP) {
+ if (netif_running(netdev) && (netdev->flags & IFF_UP)) {
tp->rtl_ops.up(tp);
netif_carrier_off(netdev);
set_bit(WORK_ENABLE, &tp->flags);
@@ -5244,9 +5244,15 @@ static int rtl8152_set_tunable(struct net_device *netdev,
}
if (tp->rx_copybreak != val) {
- napi_disable(&tp->napi);
- tp->rx_copybreak = val;
- napi_enable(&tp->napi);
+ if (netdev->flags & IFF_UP) {
+ mutex_lock(&tp->control);
+ napi_disable(&tp->napi);
+ tp->rx_copybreak = val;
+ napi_enable(&tp->napi);
+ mutex_unlock(&tp->control);
+ } else {
+ tp->rx_copybreak = val;
+ }
}
break;
default:
@@ -5274,9 +5280,15 @@ static int rtl8152_set_ringparam(struct net_device *netdev,
return -EINVAL;
if (tp->rx_pending != ring->rx_pending) {
- napi_disable(&tp->napi);
- tp->rx_pending = ring->rx_pending;
- napi_enable(&tp->napi);
+ if (netdev->flags & IFF_UP) {
+ mutex_lock(&tp->control);
+ napi_disable(&tp->napi);
+ tp->rx_pending = ring->rx_pending;
+ napi_enable(&tp->napi);
+ mutex_unlock(&tp->control);
+ } else {
+ tp->rx_pending = ring->rx_pending;
+ }
}
return 0;
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 145ddf3f0a45..604dba4f18af 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -783,7 +783,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
if (rc)
- usb_unlink_urb(dev->out_urb);
+ usb_kill_urb(dev->out_urb);
exit:
mutex_unlock(&dev->out_urb_lock);
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 2b36f052bfb9..c6439638a419 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -23,6 +23,16 @@ config NVME_MULTIPATH
/dev/nvmeXnY device will show up for each NVMe namespaces,
even if it is accessible through multiple controllers.
+config NVME_HWMON
+ bool "NVMe hardware monitoring"
+ depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
+ help
+ This provides support for NVMe hardware monitoring. If enabled,
+ a hardware monitoring device will be created for each NVMe drive
+ in the system.
+
+ If unsure, say N.
+
config NVME_FABRICS
tristate
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 8a4b671c5f0c..fc7b26be692d 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -14,6 +14,7 @@ nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
+nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
nvme-y += pci.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fa7ba09dca77..8e8527408db3 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -283,6 +283,8 @@ void nvme_complete_rq(struct request *req)
trace_nvme_complete_rq(req);
+ nvme_cleanup_cmd(req);
+
if (nvme_req(req)->ctrl->kas)
nvme_req(req)->ctrl->comp_seen = true;
@@ -313,7 +315,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
if (blk_mq_request_completed(req))
return true;
- nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+ nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
blk_mq_complete_request(req);
return true;
}
@@ -611,8 +613,14 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
struct nvme_dsm_range *range;
struct bio *bio;
- range = kmalloc_array(segments, sizeof(*range),
- GFP_ATOMIC | __GFP_NOWARN);
+ /*
+ * Some devices do not consider the DSM 'Number of Ranges' field when
+ * determining how much data to DMA. Always allocate memory for maximum
+ * number of segments to prevent device reading beyond end of buffer.
+ */
+ static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
+
+ range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
if (!range) {
/*
* If we fail allocation our range, fallback to the controller
@@ -626,7 +634,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
}
__rq_for_each_bio(bio, req) {
- u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
+ u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
if (n < segments) {
@@ -652,7 +660,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
req->special_vec.bv_page = virt_to_page(range);
req->special_vec.bv_offset = offset_in_page(range);
- req->special_vec.bv_len = sizeof(*range) * segments;
+ req->special_vec.bv_len = alloc_size;
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
return BLK_STS_OK;
@@ -667,7 +675,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->write_zeroes.slba =
- cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
cmnd->write_zeroes.control = 0;
@@ -691,7 +699,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
@@ -1647,7 +1655,7 @@ static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
static void nvme_set_chunk_size(struct nvme_ns *ns)
{
- u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
+ u32 chunk_size = nvme_lba_to_sect(ns, ns->noiob);
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
}
@@ -1684,8 +1692,7 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
{
- u32 max_sectors;
- unsigned short bs = 1 << ns->lba_shift;
+ u64 max_blocks;
if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
(ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
@@ -1701,11 +1708,12 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
* nvme_init_identify() if available.
*/
if (ns->ctrl->max_hw_sectors == UINT_MAX)
- max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9;
+ max_blocks = (u64)USHRT_MAX + 1;
else
- max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9;
+ max_blocks = ns->ctrl->max_hw_sectors + 1;
- blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors);
+ blk_queue_max_write_zeroes_sectors(disk->queue,
+ nvme_lba_to_sect(ns, max_blocks));
}
static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
@@ -1748,7 +1756,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
- sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
+ sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
unsigned short bs = 1 << ns->lba_shift;
u32 atomic_bs, phys_bs, io_opt;
@@ -2796,6 +2804,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->oncs = le16_to_cpu(id->oncs);
ctrl->mtfa = le16_to_cpu(id->mtfa);
ctrl->oaes = le32_to_cpu(id->oaes);
+ ctrl->wctemp = le16_to_cpu(id->wctemp);
+ ctrl->cctemp = le16_to_cpu(id->cctemp);
+
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
if (id->mdts)
@@ -2895,6 +2906,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;
+ if (!ctrl->identified)
+ nvme_hwmon_init(ctrl);
+
ctrl->identified = true;
return 0;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 265f89e11d8b..679a721ae229 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1224,7 +1224,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
lsreq->rqstlen = sizeof(*assoc_rqst);
lsreq->rspaddr = assoc_acc;
lsreq->rsplen = sizeof(*assoc_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
if (ret)
@@ -1264,7 +1264,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
if (fcret) {
ret = -EBADF;
dev_err(ctrl->dev,
- "q %d connect failed: %s\n",
+ "q %d Create Association LS failed: %s\n",
queue->qnum, validation_errors[fcret]);
} else {
ctrl->association_id =
@@ -1332,7 +1332,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
lsreq->rqstlen = sizeof(*conn_rqst);
lsreq->rspaddr = conn_acc;
lsreq->rsplen = sizeof(*conn_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
if (ret)
@@ -1363,7 +1363,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (fcret) {
ret = -EBADF;
dev_err(ctrl->dev,
- "q %d connect failed: %s\n",
+ "q %d Create I/O Connection LS failed: %s\n",
queue->qnum, validation_errors[fcret]);
} else {
queue->connection_id =
@@ -1376,7 +1376,7 @@ out_free_buffer:
out_no_memory:
if (ret)
dev_err(ctrl->dev,
- "queue %d connect command failed (%d).\n",
+ "queue %d connect I/O queue failed (%d).\n",
queue->qnum, ret);
return ret;
}
@@ -1413,8 +1413,8 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
static void
nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
{
- struct fcnvme_ls_disconnect_rqst *discon_rqst;
- struct fcnvme_ls_disconnect_acc *discon_acc;
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
struct nvmefc_ls_req_op *lsop;
struct nvmefc_ls_req *lsreq;
int ret;
@@ -1430,11 +1430,11 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
lsreq = &lsop->ls_req;
lsreq->private = (void *)&lsop[1];
- discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
+ discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)
(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
- discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
+ discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
- discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
+ discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC;
discon_rqst->desc_list_len = cpu_to_be32(
sizeof(struct fcnvme_lsdesc_assoc_id) +
sizeof(struct fcnvme_lsdesc_disconn_cmd));
@@ -1451,22 +1451,17 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
discon_rqst->discon_cmd.desc_len =
fcnvme_lsdesc_len(
sizeof(struct fcnvme_lsdesc_disconn_cmd));
- discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
- discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
lsreq->rqstaddr = discon_rqst;
lsreq->rqstlen = sizeof(*discon_rqst);
lsreq->rspaddr = discon_acc;
lsreq->rsplen = sizeof(*discon_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
nvme_fc_disconnect_assoc_done);
if (ret)
kfree(lsop);
-
- /* only meaningful part to terminating the association */
- ctrl->association_id = 0;
}
@@ -1662,7 +1657,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
(freq->rcv_rsplen / 4) ||
be32_to_cpu(op->rsp_iu.xfrd_len) !=
freq->transferred_length ||
- op->rsp_iu.status_code ||
+ op->rsp_iu.ersp_result ||
sqe->common.command_id != cqe->command_id)) {
status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device,
@@ -1672,7 +1667,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
be32_to_cpu(op->rsp_iu.xfrd_len),
freq->transferred_length,
- op->rsp_iu.status_code,
+ op->rsp_iu.ersp_result,
sqe->common.command_id,
cqe->command_id);
goto done;
@@ -1731,9 +1726,14 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
op->rq = rq;
op->rqno = rqno;
- cmdiu->scsi_id = NVME_CMD_SCSI_ID;
+ cmdiu->format_id = NVME_CMD_FORMAT_ID;
cmdiu->fc_id = NVME_CMD_FC_ID;
cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+ if (queue->qnum)
+ cmdiu->rsv_cat = fccmnd_set_cat_css(0,
+ (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
+ else
+ cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
&op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
@@ -2173,8 +2173,6 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
rq_dma_dir(rq));
- nvme_cleanup_cmd(rq);
-
sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
freq->sg_cnt = 0;
@@ -2305,6 +2303,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (!(op->flags & FCOP_FLAGS_AEN))
nvme_fc_unmap_data(ctrl, op->rq, op);
+ nvme_cleanup_cmd(op->rq);
nvme_fc_ctrl_put(ctrl);
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
@@ -2695,7 +2694,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
/* warn if maxcmd is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl maxcmd %u, reducing "
- "to queue_size\n",
+ "to maxcmd\n",
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
}
@@ -2703,7 +2702,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
/* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
- "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ "queue_size %zu > ctrl sqsize %u, reducing "
+ "to sqsize\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
opts->queue_size = ctrl->ctrl.sqsize + 1;
}
@@ -2739,6 +2739,7 @@ out_term_aen_ops:
out_disconnect_admin_queue:
/* send a Disconnect(association) LS to fc-nvme target */
nvme_fc_xmt_disconnect_assoc(ctrl);
+ ctrl->association_id = 0;
out_delete_hw_queue:
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
out_free_queue:
@@ -2830,6 +2831,8 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->association_id)
nvme_fc_xmt_disconnect_assoc(ctrl);
+ ctrl->association_id = 0;
+
if (ctrl->ctrl.tagset) {
nvme_fc_delete_hw_io_queues(ctrl);
nvme_fc_free_io_queues(ctrl);
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
new file mode 100644
index 000000000000..a5af21f5d370
--- /dev/null
+++ b/drivers/nvme/host/hwmon.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express hardware monitoring support
+ * Copyright (c) 2019, Guenter Roeck
+ */
+
+#include <linux/hwmon.h>
+#include <asm/unaligned.h>
+
+#include "nvme.h"
+
+/* These macros should be moved to linux/temperature.h */
+#define MILLICELSIUS_TO_KELVIN(t) DIV_ROUND_CLOSEST((t) + 273150, 1000)
+#define KELVIN_TO_MILLICELSIUS(t) ((t) * 1000L - 273150)
+
+struct nvme_hwmon_data {
+ struct nvme_ctrl *ctrl;
+ struct nvme_smart_log log;
+ struct mutex read_lock;
+};
+
+static int nvme_get_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
+ long *temp)
+{
+ unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
+ u32 status;
+ int ret;
+
+ if (under)
+ threshold |= NVME_TEMP_THRESH_TYPE_UNDER;
+
+ ret = nvme_get_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0,
+ &status);
+ if (ret > 0)
+ return -EIO;
+ if (ret < 0)
+ return ret;
+ *temp = KELVIN_TO_MILLICELSIUS(status & NVME_TEMP_THRESH_MASK);
+
+ return 0;
+}
+
+static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
+ long temp)
+{
+ unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
+ int ret;
+
+ temp = MILLICELSIUS_TO_KELVIN(temp);
+ threshold |= clamp_val(temp, 0, NVME_TEMP_THRESH_MASK);
+
+ if (under)
+ threshold |= NVME_TEMP_THRESH_TYPE_UNDER;
+
+ ret = nvme_set_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0,
+ NULL);
+ if (ret > 0)
+ return -EIO;
+
+ return ret;
+}
+
+static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
+{
+ int ret;
+
+ ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
+ &data->log, sizeof(data->log), 0);
+
+ return ret <= 0 ? ret : -EIO;
+}
+
+static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct nvme_hwmon_data *data = dev_get_drvdata(dev);
+ struct nvme_smart_log *log = &data->log;
+ int temp;
+ int err;
+
+ /*
+ * First handle attributes which don't require us to read
+ * the smart log.
+ */
+ switch (attr) {
+ case hwmon_temp_max:
+ return nvme_get_temp_thresh(data->ctrl, channel, false, val);
+ case hwmon_temp_min:
+ return nvme_get_temp_thresh(data->ctrl, channel, true, val);
+ case hwmon_temp_crit:
+ *val = KELVIN_TO_MILLICELSIUS(data->ctrl->cctemp);
+ return 0;
+ default:
+ break;
+ }
+
+ mutex_lock(&data->read_lock);
+ err = nvme_hwmon_get_smart_log(data);
+ if (err)
+ goto unlock;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (!channel)
+ temp = get_unaligned_le16(log->temperature);
+ else
+ temp = le16_to_cpu(log->temp_sensor[channel - 1]);
+ *val = KELVIN_TO_MILLICELSIUS(temp);
+ break;
+ case hwmon_temp_alarm:
+ *val = !!(log->critical_warning & NVME_SMART_CRIT_TEMPERATURE);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+unlock:
+ mutex_unlock(&data->read_lock);
+ return err;
+}
+
+static int nvme_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct nvme_hwmon_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ return nvme_set_temp_thresh(data->ctrl, channel, false, val);
+ case hwmon_temp_min:
+ return nvme_set_temp_thresh(data->ctrl, channel, true, val);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const char * const nvme_hwmon_sensor_names[] = {
+ "Composite",
+ "Sensor 1",
+ "Sensor 2",
+ "Sensor 3",
+ "Sensor 4",
+ "Sensor 5",
+ "Sensor 6",
+ "Sensor 7",
+ "Sensor 8",
+};
+
+static int nvme_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ *str = nvme_hwmon_sensor_names[channel];
+ return 0;
+}
+
+static umode_t nvme_hwmon_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct nvme_hwmon_data *data = _data;
+
+ switch (attr) {
+ case hwmon_temp_crit:
+ if (!channel && data->ctrl->cctemp)
+ return 0444;
+ break;
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ if ((!channel && data->ctrl->wctemp) ||
+ (channel && data->log.temp_sensor[channel - 1])) {
+ if (data->ctrl->quirks &
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
+ return 0444;
+ return 0644;
+ }
+ break;
+ case hwmon_temp_alarm:
+ if (!channel)
+ return 0444;
+ break;
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ if (!channel || data->log.temp_sensor[channel - 1])
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *nvme_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_CRIT | HWMON_T_LABEL | HWMON_T_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops nvme_hwmon_ops = {
+ .is_visible = nvme_hwmon_is_visible,
+ .read = nvme_hwmon_read,
+ .read_string = nvme_hwmon_read_string,
+ .write = nvme_hwmon_write,
+};
+
+static const struct hwmon_chip_info nvme_hwmon_chip_info = {
+ .ops = &nvme_hwmon_ops,
+ .info = nvme_hwmon_info,
+};
+
+void nvme_hwmon_init(struct nvme_ctrl *ctrl)
+{
+ struct device *dev = ctrl->dev;
+ struct nvme_hwmon_data *data;
+ struct device *hwmon;
+ int err;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ data->ctrl = ctrl;
+ mutex_init(&data->read_lock);
+
+ err = nvme_hwmon_get_smart_log(data);
+ if (err) {
+ dev_warn(dev, "Failed to read smart log (error %d)\n", err);
+ devm_kfree(dev, data);
+ return;
+ }
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
+ &nvme_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwmon)) {
+ dev_warn(dev, "Failed to instantiate hwmon device\n");
+ devm_kfree(dev, data);
+ }
+}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index e0f064dcbd02..797c18337d96 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -95,6 +95,7 @@ void nvme_failover_req(struct request *req)
}
break;
case NVME_SC_HOST_PATH_ERROR:
+ case NVME_SC_HOST_ABORTED_CMD:
/*
* Temporary transport disruption in talking to the controller.
* Try to send on a new path.
@@ -446,8 +447,14 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
struct nvme_ana_group_desc *desc = base + offset;
- u32 nr_nsids = le32_to_cpu(desc->nnsids);
- size_t nsid_buf_size = nr_nsids * sizeof(__le32);
+ u32 nr_nsids;
+ size_t nsid_buf_size;
+
+ if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
+ return -EINVAL;
+
+ nr_nsids = le32_to_cpu(desc->nnsids);
+ nsid_buf_size = nr_nsids * sizeof(__le32);
if (WARN_ON_ONCE(desc->grpid == 0))
return -EINVAL;
@@ -467,8 +474,6 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
return error;
offset += nsid_buf_size;
- if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
- return -EINVAL;
}
return 0;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 22e8401352c2..3b9cbe0668fa 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -115,6 +115,11 @@ enum nvme_quirks {
* Prevent tag overlap between queues
*/
NVME_QUIRK_SHARED_TAGS = (1 << 13),
+
+ /*
+ * Don't change the value of the temperature threshold feature
+ */
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
};
/*
@@ -231,6 +236,8 @@ struct nvme_ctrl {
u16 kas;
u8 npss;
u8 apsta;
+ u16 wctemp;
+ u16 cctemp;
u32 oaes;
u32 aen_result;
u32 ctratt;
@@ -419,9 +426,20 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
}
-static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
+/*
+ * Convert a 512B sector number to a device logical block number.
+ */
+static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
+{
+ return sector >> (ns->lba_shift - SECTOR_SHIFT);
+}
+
+/*
+ * Convert a device logical block number to a 512B sector number.
+ */
+static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
{
- return (sector >> (ns->lba_shift - 9));
+ return lba << (ns->lba_shift - SECTOR_SHIFT);
}
static inline void nvme_end_request(struct request *req, __le16 status,
@@ -446,6 +464,11 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
put_device(ctrl->device);
}
+static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
+{
+ return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
+}
+
void nvme_complete_rq(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
@@ -652,4 +675,10 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
return dev_to_disk(dev)->private_data;
}
+#ifdef CONFIG_NVME_HWMON
+void nvme_hwmon_init(struct nvme_ctrl *ctrl);
+#else
+static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { }
+#endif
+
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 869f462e6b6e..dcaad5831cee 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -925,7 +925,6 @@ static void nvme_pci_complete_rq(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_dev *dev = iod->nvmeq->dev;
- nvme_cleanup_cmd(req);
if (blk_integrity_rq(req))
dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
@@ -968,8 +967,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvmeq->qid == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
@@ -2982,7 +2980,7 @@ static int nvme_suspend(struct device *dev)
/*
* Clearing npss forces a controller reset on resume. The
- * correct value will be resdicovered then.
+ * correct value will be rediscovered then.
*/
ret = nvme_disable_prepare_reset(ndev, true);
ctrl->npss = 0;
@@ -3082,7 +3080,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
- NVME_QUIRK_MEDIUM_PRIO_SQ },
+ NVME_QUIRK_MEDIUM_PRIO_SQ |
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE },
{ PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index cb4c3000a57e..dce59459ed41 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1160,8 +1160,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
}
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
-
- nvme_cleanup_cmd(rq);
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
}
@@ -1501,8 +1499,8 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+ if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
+ cqe->command_id)))
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else
@@ -1768,7 +1766,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(err < 0)) {
dev_err(queue->ctrl->ctrl.device,
"Failed to map data (%d)\n", err);
- nvme_cleanup_cmd(rq);
goto err;
}
@@ -1779,18 +1776,19 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
req->mr ? &req->reg_wr.wr : NULL);
- if (unlikely(err)) {
- nvme_rdma_unmap_data(queue, rq);
- goto err;
- }
+ if (unlikely(err))
+ goto err_unmap;
return BLK_STS_OK;
+err_unmap:
+ nvme_rdma_unmap_data(queue, rq);
err:
if (err == -ENOMEM || err == -EAGAIN)
ret = BLK_STS_RESOURCE;
else
ret = BLK_STS_IOERR;
+ nvme_cleanup_cmd(rq);
unmap_qe:
ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
DMA_TO_DEVICE);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 7544be84ab35..6d43b23a0fc8 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -491,8 +491,8 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+ if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
+ cqe->command_id)))
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 831a062d27cb..56c21b501185 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -31,7 +31,7 @@ u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
{
- nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
+ nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
}
static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
@@ -134,7 +134,7 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
u16 status = NVME_SC_INTERNAL;
unsigned long flags;
- if (req->data_len != sizeof(*log))
+ if (req->transfer_len != sizeof(*log))
goto out;
log = kzalloc(sizeof(*log), GFP_KERNEL);
@@ -196,7 +196,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
u16 status = NVME_SC_INTERNAL;
size_t len;
- if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
+ if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
goto out;
mutex_lock(&ctrl->lock);
@@ -206,7 +206,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
len = ctrl->nr_changed_ns * sizeof(__le32);
status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
if (!status)
- status = nvmet_zero_sgl(req, len, req->data_len - len);
+ status = nvmet_zero_sgl(req, len, req->transfer_len - len);
ctrl->nr_changed_ns = 0;
nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
mutex_unlock(&ctrl->lock);
@@ -282,6 +282,36 @@ out:
nvmet_req_complete(req, status);
}
+static void nvmet_execute_get_log_page(struct nvmet_req *req)
+{
+ if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd)))
+ return;
+
+ switch (req->cmd->get_log_page.lid) {
+ case NVME_LOG_ERROR:
+ return nvmet_execute_get_log_page_error(req);
+ case NVME_LOG_SMART:
+ return nvmet_execute_get_log_page_smart(req);
+ case NVME_LOG_FW_SLOT:
+ /*
+ * We only support a single firmware slot which always is
+ * active, so we can zero out the whole firmware slot log and
+ * still claim to fully implement this mandatory log page.
+ */
+ return nvmet_execute_get_log_page_noop(req);
+ case NVME_LOG_CHANGED_NS:
+ return nvmet_execute_get_log_changed_ns(req);
+ case NVME_LOG_CMD_EFFECTS:
+ return nvmet_execute_get_log_cmd_effects_ns(req);
+ case NVME_LOG_ANA:
+ return nvmet_execute_get_log_page_ana(req);
+ }
+ pr_err("unhandled lid %d on qid %d\n",
+ req->cmd->get_log_page.lid, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -565,6 +595,28 @@ out:
nvmet_req_complete(req, status);
}
+static void nvmet_execute_identify(struct nvmet_req *req)
+{
+ if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+ return;
+
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_NS:
+ return nvmet_execute_identify_ns(req);
+ case NVME_ID_CNS_CTRL:
+ return nvmet_execute_identify_ctrl(req);
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
+ return nvmet_execute_identify_nslist(req);
+ case NVME_ID_CNS_NS_DESC_LIST:
+ return nvmet_execute_identify_desclist(req);
+ }
+
+ pr_err("unhandled identify cns %d on qid %d\n",
+ req->cmd->identify.cns, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
/*
* A "minimum viable" abort implementation: the command is mandatory in the
* spec, but we are not required to do any useful work. We couldn't really
@@ -574,6 +626,8 @@ out:
*/
static void nvmet_execute_abort(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, 0))
+ return;
nvmet_set_result(req, 1);
nvmet_req_complete(req, 0);
}
@@ -658,6 +712,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
case NVME_FEAT_NUM_QUEUES:
nvmet_set_result(req,
@@ -721,6 +778,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
/*
* These features are mandatory in the spec, but we don't
@@ -785,6 +845,9 @@ void nvmet_execute_async_event(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
mutex_lock(&ctrl->lock);
if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
mutex_unlock(&ctrl->lock);
@@ -801,6 +864,9 @@ void nvmet_execute_keep_alive(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
pr_debug("ctrl %d update keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
@@ -813,77 +879,36 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
struct nvme_command *cmd = req->cmd;
u16 ret;
+ if (nvme_is_fabrics(cmd))
+ return nvmet_parse_fabrics_cmd(req);
+ if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
+ return nvmet_parse_discovery_cmd(req);
+
ret = nvmet_check_ctrl_status(req, cmd);
if (unlikely(ret))
return ret;
switch (cmd->common.opcode) {
case nvme_admin_get_log_page:
- req->data_len = nvmet_get_log_page_len(cmd);
-
- switch (cmd->get_log_page.lid) {
- case NVME_LOG_ERROR:
- req->execute = nvmet_execute_get_log_page_error;
- return 0;
- case NVME_LOG_SMART:
- req->execute = nvmet_execute_get_log_page_smart;
- return 0;
- case NVME_LOG_FW_SLOT:
- /*
- * We only support a single firmware slot which always
- * is active, so we can zero out the whole firmware slot
- * log and still claim to fully implement this mandatory
- * log page.
- */
- req->execute = nvmet_execute_get_log_page_noop;
- return 0;
- case NVME_LOG_CHANGED_NS:
- req->execute = nvmet_execute_get_log_changed_ns;
- return 0;
- case NVME_LOG_CMD_EFFECTS:
- req->execute = nvmet_execute_get_log_cmd_effects_ns;
- return 0;
- case NVME_LOG_ANA:
- req->execute = nvmet_execute_get_log_page_ana;
- return 0;
- }
- break;
+ req->execute = nvmet_execute_get_log_page;
+ return 0;
case nvme_admin_identify:
- req->data_len = NVME_IDENTIFY_DATA_SIZE;
- switch (cmd->identify.cns) {
- case NVME_ID_CNS_NS:
- req->execute = nvmet_execute_identify_ns;
- return 0;
- case NVME_ID_CNS_CTRL:
- req->execute = nvmet_execute_identify_ctrl;
- return 0;
- case NVME_ID_CNS_NS_ACTIVE_LIST:
- req->execute = nvmet_execute_identify_nslist;
- return 0;
- case NVME_ID_CNS_NS_DESC_LIST:
- req->execute = nvmet_execute_identify_desclist;
- return 0;
- }
- break;
+ req->execute = nvmet_execute_identify;
+ return 0;
case nvme_admin_abort_cmd:
req->execute = nvmet_execute_abort;
- req->data_len = 0;
return 0;
case nvme_admin_set_features:
req->execute = nvmet_execute_set_features;
- req->data_len = 0;
return 0;
case nvme_admin_get_features:
req->execute = nvmet_execute_get_features;
- req->data_len = 0;
return 0;
case nvme_admin_async_event:
req->execute = nvmet_execute_async_event;
- req->data_len = 0;
return 0;
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
- req->data_len = 0;
return 0;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 3a67e244e568..28438b833c1b 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -892,14 +892,10 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
}
if (unlikely(!req->sq->ctrl))
- /* will return an error for any Non-connect command: */
+ /* will return an error for any non-connect command: */
status = nvmet_parse_connect_cmd(req);
else if (likely(req->sq->qid != 0))
status = nvmet_parse_io_cmd(req);
- else if (nvme_is_fabrics(req->cmd))
- status = nvmet_parse_fabrics_cmd(req);
- else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
- status = nvmet_parse_discovery_cmd(req);
else
status = nvmet_parse_admin_cmd(req);
@@ -930,15 +926,17 @@ void nvmet_req_uninit(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
-void nvmet_req_execute(struct nvmet_req *req)
+bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
{
- if (unlikely(req->data_len != req->transfer_len)) {
+ if (unlikely(data_len != req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
- } else
- req->execute(req);
+ return false;
+ }
+
+ return true;
}
-EXPORT_SYMBOL_GPL(nvmet_req_execute);
+EXPORT_SYMBOL_GPL(nvmet_check_data_len);
int nvmet_req_alloc_sgl(struct nvmet_req *req)
{
@@ -966,7 +964,7 @@ int nvmet_req_alloc_sgl(struct nvmet_req *req)
}
req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
- if (!req->sg)
+ if (unlikely(!req->sg))
return -ENOMEM;
return 0;
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 3764a8900850..0c2274b21e15 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -157,7 +157,7 @@ static size_t discovery_log_entries(struct nvmet_req *req)
return entries;
}
-static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
+static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
{
const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -171,6 +171,16 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
u16 status = 0;
void *buffer;
+ if (!nvmet_check_data_len(req, data_len))
+ return;
+
+ if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
+ req->error_loc =
+ offsetof(struct nvme_get_log_page_command, lid);
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ goto out;
+ }
+
/* Spec requires dword aligned offsets */
if (offset & 0x3) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
@@ -227,20 +237,35 @@ out:
nvmet_req_complete(req, status);
}
-static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
+static void nvmet_execute_disc_identify(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
+ const char model[] = "Linux";
u16 status = 0;
+ if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+ return;
+
+ if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ goto out;
+ }
+
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
status = NVME_SC_INTERNAL;
goto out;
}
+ memset(id->sn, ' ', sizeof(id->sn));
+ bin2hex(id->sn, &ctrl->subsys->serial,
+ min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
memset(id->fr, ' ', sizeof(id->fr));
- strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
+ memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+ memcpy_and_pad(id->fr, sizeof(id->fr),
+ UTS_RELEASE, strlen(UTS_RELEASE), ' ');
/* no limit on data transfer sizes for now */
id->mdts = 0;
@@ -273,6 +298,9 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
case NVME_FEAT_KATO:
stat = nvmet_set_feat_kato(req);
@@ -296,6 +324,9 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
case NVME_FEAT_KATO:
nvmet_get_feat_kato(req);
@@ -328,47 +359,22 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
switch (cmd->common.opcode) {
case nvme_admin_set_features:
req->execute = nvmet_execute_disc_set_features;
- req->data_len = 0;
return 0;
case nvme_admin_get_features:
req->execute = nvmet_execute_disc_get_features;
- req->data_len = 0;
return 0;
case nvme_admin_async_event:
req->execute = nvmet_execute_async_event;
- req->data_len = 0;
return 0;
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
- req->data_len = 0;
return 0;
case nvme_admin_get_log_page:
- req->data_len = nvmet_get_log_page_len(cmd);
-
- switch (cmd->get_log_page.lid) {
- case NVME_LOG_DISC:
- req->execute = nvmet_execute_get_disc_log_page;
- return 0;
- default:
- pr_err("unsupported get_log_page lid %d\n",
- cmd->get_log_page.lid);
- req->error_loc =
- offsetof(struct nvme_get_log_page_command, lid);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
- }
+ req->execute = nvmet_execute_disc_get_log_page;
+ return 0;
case nvme_admin_identify:
- req->data_len = NVME_IDENTIFY_DATA_SIZE;
- switch (cmd->identify.cns) {
- case NVME_ID_CNS_CTRL:
- req->execute =
- nvmet_execute_identify_disc_ctrl;
- return 0;
- default:
- pr_err("unsupported identify cns %d\n",
- cmd->identify.cns);
- req->error_loc = offsetof(struct nvme_identify, cns);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
- }
+ req->execute = nvmet_execute_disc_identify;
+ return 0;
default:
pr_err("unhandled cmd %d\n", cmd->common.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index d16b55ffe79f..f7297473d9eb 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -12,6 +12,9 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
u64 val = le64_to_cpu(req->cmd->prop_set.value);
u16 status = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
if (req->cmd->prop_set.attrib & 1) {
req->error_loc =
offsetof(struct nvmf_property_set_command, attrib);
@@ -38,6 +41,9 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
u16 status = 0;
u64 val = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
if (req->cmd->prop_get.attrib & 1) {
switch (le32_to_cpu(req->cmd->prop_get.offset)) {
case NVME_REG_CAP:
@@ -82,11 +88,9 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
switch (cmd->fabrics.fctype) {
case nvme_fabrics_type_property_set:
- req->data_len = 0;
req->execute = nvmet_execute_prop_set;
break;
case nvme_fabrics_type_property_get:
- req->data_len = 0;
req->execute = nvmet_execute_prop_get;
break;
default:
@@ -147,6 +151,9 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = NULL;
u16 status = 0;
+ if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+ return;
+
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
status = NVME_SC_INTERNAL;
@@ -211,6 +218,9 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
u16 qid = le16_to_cpu(c->qid);
u16 status = 0;
+ if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+ return;
+
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
status = NVME_SC_INTERNAL;
@@ -281,7 +291,6 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
- req->data_len = sizeof(struct nvmf_connect_data);
if (cmd->connect.qid == 0)
req->execute = nvmet_execute_admin_connect;
else
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index ce8d819f86cc..a0db6371b43e 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1495,20 +1495,20 @@ static void
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_disconnect_rqst *rqst =
- (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
- struct fcnvme_ls_disconnect_acc *acc =
- (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ (struct fcnvme_ls_disconnect_assoc_rqst *)iod->rqstbuf;
+ struct fcnvme_ls_disconnect_assoc_acc *acc =
+ (struct fcnvme_ls_disconnect_assoc_acc *)iod->rspbuf;
struct nvmet_fc_tgt_assoc *assoc;
int ret = 0;
memset(acc, 0, sizeof(*acc));
- if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
+ if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst))
ret = VERR_DISCONN_LEN;
else if (rqst->desc_list_len !=
fcnvme_lsdesc_len(
- sizeof(struct fcnvme_ls_disconnect_rqst)))
+ sizeof(struct fcnvme_ls_disconnect_assoc_rqst)))
ret = VERR_DISCONN_RQST_LEN;
else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
ret = VERR_ASSOC_ID;
@@ -1523,8 +1523,11 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
fcnvme_lsdesc_len(
sizeof(struct fcnvme_lsdesc_disconn_cmd)))
ret = VERR_DISCONN_CMD_LEN;
- else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
- (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
+ /*
+ * As the standard changed on the LS, check if old format and scope
+ * something other than Association (e.g. 0).
+ */
+ else if (rqst->discon_cmd.rsvd8[0])
ret = VERR_DISCONN_SCOPE;
else {
/* match an active association */
@@ -1556,8 +1559,8 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
- sizeof(struct fcnvme_ls_disconnect_acc)),
- FCNVME_LS_DISCONNECT);
+ sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ FCNVME_LS_DISCONNECT_ASSOC);
/* release get taken in nvmet_fc_find_target_assoc */
nvmet_fc_tgt_a_put(iod->assoc);
@@ -1632,7 +1635,7 @@ nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
/* Creates an IO Queue/Connection */
nvmet_fc_ls_create_connection(tgtport, iod);
break;
- case FCNVME_LS_DISCONNECT:
+ case FCNVME_LS_DISCONNECT_ASSOC:
/* Terminate a Queue/Connection or the Association */
nvmet_fc_ls_disconnect(tgtport, iod);
break;
@@ -2015,7 +2018,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
}
/* data transfer complete, resume with nvmet layer */
- nvmet_req_execute(&fod->req);
+ fod->req.execute(&fod->req);
break;
case NVMET_FCOP_READDATA:
@@ -2231,7 +2234,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
* can invoke the nvmet_layer now. If read data, cmd completion will
* push the data
*/
- nvmet_req_execute(&fod->req);
+ fod->req.execute(&fod->req);
return;
transport_error:
@@ -2299,7 +2302,7 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
/* validate iu, so the connection id can be used to find the queue */
if ((cmdiubuf_len != sizeof(*cmdiu)) ||
- (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
+ (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
(cmdiu->fc_id != NVME_CMD_FC_ID) ||
(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
return -EIO;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 32008d85172b..b6fca0e421ef 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -147,8 +147,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
int sg_cnt = req->sg_cnt;
struct bio *bio;
struct scatterlist *sg;
+ struct blk_plug plug;
sector_t sector;
- int op, op_flags = 0, i;
+ int op, i;
+
+ if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+ return;
if (!req->sg_cnt) {
nvmet_req_complete(req, 0);
@@ -156,21 +160,20 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
if (req->cmd->rw.opcode == nvme_cmd_write) {
- op = REQ_OP_WRITE;
- op_flags = REQ_SYNC | REQ_IDLE;
+ op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
- op_flags |= REQ_FUA;
+ op |= REQ_FUA;
} else {
op = REQ_OP_READ;
}
if (is_pci_p2pdma_page(sg_page(req->sg)))
- op_flags |= REQ_NOMERGE;
+ op |= REQ_NOMERGE;
sector = le64_to_cpu(req->cmd->rw.slba);
sector <<= (req->ns->blksize_shift - 9);
- if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
@@ -180,8 +183,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = op;
+ blk_start_plug(&plug);
for_each_sg(req->sg, sg, req->sg_cnt, i) {
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
@@ -190,7 +194,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = op;
bio_chain(bio, prev);
submit_bio(prev);
@@ -201,12 +205,16 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
submit_bio(bio);
+ blk_finish_plug(&plug);
}
static void nvmet_bdev_execute_flush(struct nvmet_req *req)
{
struct bio *bio = &req->b.inline_bio;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio_set_dev(bio, req->ns->bdev);
bio->bi_private = req;
@@ -261,12 +269,10 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
if (bio) {
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- if (status) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
- } else {
+ if (status)
+ bio_io_error(bio);
+ else
submit_bio(bio);
- }
} else {
nvmet_req_complete(req, status);
}
@@ -274,6 +280,9 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+ return;
+
switch (le32_to_cpu(req->cmd->dsm.attributes)) {
case NVME_DSMGMT_AD:
nvmet_bdev_execute_discard(req);
@@ -295,6 +304,9 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
sector_t nr_sector;
int ret;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
sector = le64_to_cpu(write_zeroes->slba) <<
(req->ns->blksize_shift - 9);
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
@@ -319,20 +331,15 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_bdev_execute_rw;
- req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
req->execute = nvmet_bdev_execute_flush;
- req->data_len = 0;
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_bdev_execute_dsm;
- req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
- sizeof(struct nvme_dsm_range);
return 0;
case nvme_cmd_write_zeroes:
req->execute = nvmet_bdev_execute_write_zeroes;
- req->data_len = 0;
return 0;
default:
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 05453f5d1448..caebfce06605 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -126,7 +126,7 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
mempool_free(req->f.bvec, req->ns->bvec_pool);
}
- if (unlikely(ret != req->data_len))
+ if (unlikely(ret != req->transfer_len))
status = errno_to_nvme_status(req, ret);
nvmet_req_complete(req, status);
}
@@ -146,7 +146,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
is_sync = true;
pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
- if (unlikely(pos + req->data_len > req->ns->size)) {
+ if (unlikely(pos + req->transfer_len > req->ns->size)) {
nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
return true;
}
@@ -173,7 +173,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
nr_bvec--;
}
- if (WARN_ON_ONCE(total_len != req->data_len)) {
+ if (WARN_ON_ONCE(total_len != req->transfer_len)) {
ret = -EIO;
goto complete;
}
@@ -232,6 +232,9 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
{
ssize_t nr_bvec = req->sg_cnt;
+ if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+ return;
+
if (!req->sg_cnt || !nr_bvec) {
nvmet_req_complete(req, 0);
return;
@@ -273,6 +276,8 @@ static void nvmet_file_flush_work(struct work_struct *w)
static void nvmet_file_execute_flush(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, 0))
+ return;
INIT_WORK(&req->f.work, nvmet_file_flush_work);
schedule_work(&req->f.work);
}
@@ -331,6 +336,8 @@ static void nvmet_file_dsm_work(struct work_struct *w)
static void nvmet_file_execute_dsm(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+ return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
schedule_work(&req->f.work);
}
@@ -359,6 +366,8 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, 0))
+ return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
schedule_work(&req->f.work);
}
@@ -371,20 +380,15 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_file_execute_rw;
- req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
req->execute = nvmet_file_execute_flush;
- req->data_len = 0;
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_file_execute_dsm;
- req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
- sizeof(struct nvme_dsm_range);
return 0;
case nvme_cmd_write_zeroes:
req->execute = nvmet_file_execute_write_zeroes;
- req->data_len = 0;
return 0;
default:
pr_err("unhandled cmd for file ns %d on qid %d\n",
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 11f5aea97d1b..a758bb3d5dd4 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -76,7 +76,6 @@ static void nvme_loop_complete_rq(struct request *req)
{
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
- nvme_cleanup_cmd(req);
sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
nvme_complete_rq(req);
}
@@ -102,8 +101,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+ if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
+ cqe->command_id))) {
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
} else {
@@ -126,7 +125,7 @@ static void nvme_loop_execute_work(struct work_struct *work)
struct nvme_loop_iod *iod =
container_of(work, struct nvme_loop_iod, work);
- nvmet_req_execute(&iod->req);
+ iod->req.execute(&iod->req);
}
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index c51f8dd01dc4..46df45e837c9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -304,8 +304,6 @@ struct nvmet_req {
} f;
};
int sg_cnt;
- /* data length as parsed from the command: */
- size_t data_len;
/* data length as parsed from the SGL descriptor: */
size_t transfer_len;
@@ -375,7 +373,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
-void nvmet_req_execute(struct nvmet_req *req);
+bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
int nvmet_req_alloc_sgl(struct nvmet_req *req);
void nvmet_req_free_sgl(struct nvmet_req *req);
@@ -495,6 +493,12 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req)
req->ns->blksize_shift;
}
+static inline u32 nvmet_dsm_len(struct nvmet_req *req)
+{
+ return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
+ sizeof(struct nvme_dsm_range);
+}
+
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
/* Convert a 32-bit number to a 16-bit 0's based number */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 36d906a7f70d..37d262a65877 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -603,7 +603,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
- nvmet_req_execute(&rsp->req);
+ rsp->req.execute(&rsp->req);
}
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
@@ -672,13 +672,13 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
return 0;
ret = nvmet_req_alloc_sgl(&rsp->req);
- if (ret < 0)
+ if (unlikely(ret < 0))
goto error_out;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
nvmet_data_dir(&rsp->req));
- if (ret < 0)
+ if (unlikely(ret < 0))
goto error_out;
rsp->n_rdma += ret;
@@ -746,7 +746,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else {
- nvmet_req_execute(&rsp->req);
+ rsp->req.execute(&rsp->req);
}
return true;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index d535080b781f..af674fc0bb1e 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -320,7 +320,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
u32 len = le32_to_cpu(sgl->length);
- if (!cmd->req.data_len)
+ if (!len)
return 0;
if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
@@ -813,13 +813,11 @@ free_crypto:
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
{
+ size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
int ret;
- /* recover the expected data transfer length */
- req->data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
-
if (!nvme_is_write(cmd->req.cmd) ||
- req->data_len > cmd->req.port->inline_data_size) {
+ data_len > cmd->req.port->inline_data_size) {
nvmet_prepare_receive_pdu(queue);
return;
}
@@ -932,7 +930,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
goto out;
}
- nvmet_req_execute(&queue->cmd->req);
+ queue->cmd->req.execute(&queue->cmd->req);
out:
nvmet_prepare_receive_pdu(queue);
return ret;
@@ -1052,7 +1050,7 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
nvmet_tcp_prep_recv_ddgst(cmd);
return 0;
}
- nvmet_req_execute(&cmd->req);
+ cmd->req.execute(&cmd->req);
}
nvmet_prepare_receive_pdu(queue);
@@ -1092,7 +1090,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len)
- nvmet_req_execute(&cmd->req);
+ cmd->req.execute(&cmd->req);
ret = 0;
out:
nvmet_prepare_receive_pdu(queue);
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 5542d9eadfe0..7d079154f849 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -116,7 +116,9 @@ int dasd_scan_partitions(struct dasd_block *block)
return -ENODEV;
}
- rc = blkdev_reread_part(bdev);
+ mutex_lock(&bdev->bd_mutex);
+ rc = bdev_disk_changed(bdev, false);
+ mutex_unlock(&bdev->bd_mutex);
if (rc)
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, rc %d", rc);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e4b55f9aa062..65e31df37b1f 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -839,6 +839,7 @@ struct qeth_card {
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
debug_info_t *debug;
+ struct mutex sbp_lock;
struct mutex conf_mutex;
struct mutex discipline_mutex;
struct napi_struct napi;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index dda274351c21..83794d7494d4 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -901,30 +901,30 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
CCW_DEVID(cdev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
- return 1;
+ return -EIO;
}
if (dstat & DEV_STAT_UNIT_CHECK) {
if (sense[SENSE_RESETTING_EVENT_BYTE] &
SENSE_RESETTING_EVENT_FLAG) {
QETH_CARD_TEXT(card, 2, "REVIND");
- return 1;
+ return -EIO;
}
if (sense[SENSE_COMMAND_REJECT_BYTE] &
SENSE_COMMAND_REJECT_FLAG) {
QETH_CARD_TEXT(card, 2, "CMDREJi");
- return 1;
+ return -EIO;
}
if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
QETH_CARD_TEXT(card, 2, "AFFE");
- return 1;
+ return -EIO;
}
if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
QETH_CARD_TEXT(card, 2, "ZEROSEN");
return 0;
}
QETH_CARD_TEXT(card, 2, "DGENCHK");
- return 1;
+ return -EIO;
}
return 0;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bd8143e51747..4bccdce19b5a 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -467,10 +467,14 @@ static void qeth_l2_set_promisc_mode(struct qeth_card *card)
if (card->info.promisc_mode == enable)
return;
- if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) {
qeth_setadp_promisc_mode(card, enable);
- else if (card->options.sbp.reflect_promisc)
- qeth_l2_promisc_to_bridge(card, enable);
+ } else {
+ mutex_lock(&card->sbp_lock);
+ if (card->options.sbp.reflect_promisc)
+ qeth_l2_promisc_to_bridge(card, enable);
+ mutex_unlock(&card->sbp_lock);
+ }
}
/* New MAC address is added to the hash table and marked to be written on card
@@ -631,6 +635,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
int rc;
qeth_l2_vnicc_set_defaults(card);
+ mutex_init(&card->sbp_lock);
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l2_create_device_attributes(&gdev->dev);
@@ -804,10 +809,12 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
} else
card->info.hwtrap = 0;
+ mutex_lock(&card->sbp_lock);
qeth_bridgeport_query_support(card);
if (card->options.sbp.supported_funcs)
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
+ mutex_unlock(&card->sbp_lock);
qeth_l2_register_dev_addr(card);
@@ -1162,9 +1169,9 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
/* Role should not change by itself, but if it did, */
/* information from the hardware is authoritative. */
- mutex_lock(&data->card->conf_mutex);
+ mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.role = entry->role;
- mutex_unlock(&data->card->conf_mutex);
+ mutex_unlock(&data->card->sbp_lock);
snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
snprintf(env_role, sizeof(env_role), "ROLE=%s",
@@ -1230,9 +1237,9 @@ static void qeth_bridge_host_event_worker(struct work_struct *work)
: (data->hostevs.lost_event_mask == 0x02)
? "Bridge port state change"
: "Unknown reason");
- mutex_lock(&data->card->conf_mutex);
+ mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.hostnotification = 0;
- mutex_unlock(&data->card->conf_mutex);
+ mutex_unlock(&data->card->sbp_lock);
qeth_bridge_emit_host_event(data->card, anev_abort,
0, NULL, NULL);
} else
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index f2c3b127b1e4..e2bcb26105a3 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -24,6 +24,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
+ mutex_lock(&card->sbp_lock);
if (qeth_card_hw_is_reachable(card) &&
card->options.sbp.supported_funcs)
rc = qeth_bridgeport_query_ports(card,
@@ -57,6 +58,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
else
rc = sprintf(buf, "%s\n", word);
}
+ mutex_unlock(&card->sbp_lock);
return rc;
}
@@ -91,6 +93,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY;
@@ -104,6 +107,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
} else
card->options.sbp.role = role;
+ mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -158,6 +162,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
return rc;
mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY;
@@ -168,6 +173,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
} else
card->options.sbp.hostnotification = enable;
+ mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -223,6 +229,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY;
@@ -234,6 +241,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
rc = 0;
}
+ mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -269,6 +277,8 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
return;
if (!card->options.sbp.supported_funcs)
return;
+
+ mutex_lock(&card->sbp_lock);
if (card->options.sbp.role != QETH_SBP_ROLE_NONE) {
/* Conditional to avoid spurious error messages */
qeth_bridgeport_setrole(card, card->options.sbp.role);
@@ -280,8 +290,10 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
rc = qeth_bridgeport_an_set(card, 1);
if (rc)
card->options.sbp.hostnotification = 0;
- } else
+ } else {
qeth_bridgeport_an_set(card, 0);
+ }
+ mutex_unlock(&card->sbp_lock);
}
/* VNIC CHARS support */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ebb40160539f..470ee6dc3f7e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1291,9 +1291,17 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
case REQ_OP_ZONE_RESET:
- return sd_zbc_setup_reset_cmnd(cmd, false);
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
+ false);
case REQ_OP_ZONE_RESET_ALL:
- return sd_zbc_setup_reset_cmnd(cmd, true);
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
+ true);
+ case REQ_OP_ZONE_OPEN:
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
+ case REQ_OP_ZONE_CLOSE:
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
+ case REQ_OP_ZONE_FINISH:
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
default:
WARN_ON_ONCE(1);
return BLK_STS_NOTSUPP;
@@ -1961,6 +1969,9 @@ static int sd_done(struct scsi_cmnd *SCpnt)
case REQ_OP_WRITE_SAME:
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
if (!result) {
good_bytes = blk_rq_bytes(req);
scsi_set_resid(SCpnt, 0);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 1eab779f812b..42fd3f00e4a5 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -209,11 +209,12 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
-extern blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all);
+blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
+ unsigned char op, bool all);
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr);
-extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones);
+int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
#else /* CONFIG_BLK_DEV_ZONED */
@@ -225,8 +226,9 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
-static inline blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd,
- bool all)
+static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
+ unsigned char op,
+ bool all)
{
return BLK_STS_TARGET;
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 1efc69e194f8..0e5ede48f045 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -19,34 +19,27 @@
#include "sd.h"
-/**
- * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
- * @sdkp: The disk the report originated from
- * @buf: Address of the report zone descriptor
- * @zone: the destination zone structure
- *
- * All LBA sized values are converted to 512B sectors unit.
- */
-static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
- struct blk_zone *zone)
+static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
+ unsigned int idx, report_zones_cb cb, void *data)
{
struct scsi_device *sdp = sdkp->device;
+ struct blk_zone zone = { 0 };
- memset(zone, 0, sizeof(struct blk_zone));
-
- zone->type = buf[0] & 0x0f;
- zone->cond = (buf[1] >> 4) & 0xf;
+ zone.type = buf[0] & 0x0f;
+ zone.cond = (buf[1] >> 4) & 0xf;
if (buf[1] & 0x01)
- zone->reset = 1;
+ zone.reset = 1;
if (buf[1] & 0x02)
- zone->non_seq = 1;
-
- zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
- zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
- zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
- if (zone->type != ZBC_ZONE_TYPE_CONV &&
- zone->cond == ZBC_ZONE_COND_FULL)
- zone->wp = zone->start + zone->len;
+ zone.non_seq = 1;
+
+ zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
+ zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
+ zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
+ if (zone.type != ZBC_ZONE_TYPE_CONV &&
+ zone.cond == ZBC_ZONE_COND_FULL)
+ zone.wp = zone.start + zone.len;
+
+ return cb(&zone, idx, data);
}
/**
@@ -104,11 +97,6 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
return 0;
}
-/*
- * Maximum number of zones to get with one report zones command.
- */
-#define SD_ZBC_REPORT_MAX_ZONES 8192U
-
/**
* Allocate a buffer for report zones reply.
* @sdkp: The target disk
@@ -138,82 +126,94 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
* sure that the allocated buffer can always be mapped by limiting the
* number of pages allocated to the HBA max segments limit.
*/
- nr_zones = min(nr_zones, SD_ZBC_REPORT_MAX_ZONES);
- bufsize = roundup((nr_zones + 1) * 64, 512);
+ nr_zones = min(nr_zones, sdkp->nr_zones);
+ bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
bufsize = min_t(size_t, bufsize,
queue_max_hw_sectors(q) << SECTOR_SHIFT);
bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
- buf = vzalloc(bufsize);
- if (buf)
- *buflen = bufsize;
+ while (bufsize >= SECTOR_SIZE) {
+ buf = __vmalloc(bufsize,
+ GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY,
+ PAGE_KERNEL);
+ if (buf) {
+ *buflen = bufsize;
+ return buf;
+ }
+ bufsize >>= 1;
+ }
- return buf;
+ return NULL;
}
/**
- * sd_zbc_report_zones - Disk report zones operation.
- * @disk: The target disk
- * @sector: Start 512B sector of the report
- * @zones: Array of zone descriptors
- * @nr_zones: Number of descriptors in the array
- *
- * Execute a report zones command on the target disk.
+ * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
+ * @sdkp: The target disk
*/
+static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
+{
+ return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
+}
+
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct scsi_disk *sdkp = scsi_disk(disk);
- unsigned int i, nrz = *nr_zones;
+ unsigned int nr, i;
unsigned char *buf;
- size_t buflen = 0, offset = 0;
- int ret = 0;
+ size_t offset, buflen = 0;
+ int zone_idx = 0;
+ int ret;
if (!sd_is_zoned(sdkp))
/* Not a zoned device */
return -EOPNOTSUPP;
- buf = sd_zbc_alloc_report_buffer(sdkp, nrz, &buflen);
+ buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen);
if (!buf)
return -ENOMEM;
- ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
- sectors_to_logical(sdkp->device, sector), true);
- if (ret)
- goto out;
+ while (zone_idx < nr_zones && sector < get_capacity(disk)) {
+ ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
+ sectors_to_logical(sdkp->device, sector), true);
+ if (ret)
+ goto out;
+
+ offset = 0;
+ nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64);
+ if (!nr)
+ break;
+
+ for (i = 0; i < nr && zone_idx < nr_zones; i++) {
+ offset += 64;
+ ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
+ cb, data);
+ if (ret)
+ goto out;
+ zone_idx++;
+ }
- nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64);
- for (i = 0; i < nrz; i++) {
- offset += 64;
- sd_zbc_parse_report(sdkp, buf + offset, zones);
- zones++;
+ sector += sd_zbc_zone_sectors(sdkp) * i;
}
- *nr_zones = nrz;
-
+ ret = zone_idx;
out:
kvfree(buf);
-
return ret;
}
/**
- * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
- * @sdkp: The target disk
- */
-static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
-{
- return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
-}
-
-/**
- * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
+ * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations
+ * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH.
* @cmd: the command to setup
- * @all: Reset all zones control.
+ * @op: Operation to be performed
+ * @all: All zones control
*
- * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
+ * Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL,
+ * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests.
*/
-blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all)
+blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
+ unsigned char op, bool all)
{
struct request *rq = cmd->request;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
@@ -234,7 +234,7 @@ blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all)
cmd->cmd_len = 16;
memset(cmd->cmnd, 0, cmd->cmd_len);
cmd->cmnd[0] = ZBC_OUT;
- cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
+ cmd->cmnd[1] = op;
if (all)
cmd->cmnd[14] = 0x1;
else
@@ -263,14 +263,14 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
int result = cmd->result;
struct request *rq = cmd->request;
- if (req_op(rq) == REQ_OP_ZONE_RESET &&
+ if (op_is_zone_mgmt(req_op(rq)) &&
result &&
sshdr->sense_key == ILLEGAL_REQUEST &&
sshdr->asc == 0x24) {
/*
- * INVALID FIELD IN CDB error: reset of a conventional
- * zone was attempted. Nothing to worry about, so be
- * quiet about the error.
+ * INVALID FIELD IN CDB error: a zone management command was
+ * attempted on a conventional zone. Nothing to worry about,
+ * so be quiet about the error.
*/
rq->rq_flags |= RQF_QUIET;
}
@@ -335,32 +335,18 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
* Returns the zone size in number of blocks upon success or an error code
* upon failure.
*/
-static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
+static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ u32 *zblocks)
{
- size_t bufsize, buflen;
- unsigned int noio_flag;
u64 zone_blocks = 0;
- sector_t max_lba, block = 0;
- unsigned char *buf;
+ sector_t max_lba;
unsigned char *rec;
int ret;
- u8 same;
- /* Do all memory allocations as if GFP_NOIO was specified */
- noio_flag = memalloc_noio_save();
-
- /* Get a buffer */
- buf = sd_zbc_alloc_report_buffer(sdkp, SD_ZBC_REPORT_MAX_ZONES,
- &bufsize);
- if (!buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* Do a report zone to get max_lba and the same field */
- ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, 0, false);
+ /* Do a report zone to get max_lba and the size of the first zone */
+ ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false);
if (ret)
- goto out_free;
+ return ret;
if (sdkp->rc_basis == 0) {
/* The max_lba field is the capacity of this device */
@@ -375,82 +361,27 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
}
}
- /*
- * Check same field: for any value other than 0, we know that all zones
- * have the same size.
- */
- same = buf[4] & 0x0f;
- if (same > 0) {
- rec = &buf[64];
- zone_blocks = get_unaligned_be64(&rec[8]);
- goto out;
- }
-
- /*
- * Check the size of all zones: all zones must be of
- * equal size, except the last zone which can be smaller
- * than other zones.
- */
- do {
-
- /* Parse REPORT ZONES header */
- buflen = min_t(size_t, get_unaligned_be32(&buf[0]) + 64,
- bufsize);
- rec = buf + 64;
-
- /* Parse zone descriptors */
- while (rec < buf + buflen) {
- u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
-
- if (zone_blocks == 0) {
- zone_blocks = this_zone_blocks;
- } else if (this_zone_blocks != zone_blocks &&
- (block + this_zone_blocks < sdkp->capacity
- || this_zone_blocks > zone_blocks)) {
- zone_blocks = 0;
- goto out;
- }
- block += this_zone_blocks;
- rec += 64;
- }
-
- if (block < sdkp->capacity) {
- ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, block,
- true);
- if (ret)
- goto out_free;
- }
-
- } while (block < sdkp->capacity);
-
-out:
- if (!zone_blocks) {
- if (sdkp->first_scan)
- sd_printk(KERN_NOTICE, sdkp,
- "Devices with non constant zone "
- "size are not supported\n");
- ret = -ENODEV;
- } else if (!is_power_of_2(zone_blocks)) {
+ /* Parse REPORT ZONES header */
+ rec = buf + 64;
+ zone_blocks = get_unaligned_be64(&rec[8]);
+ if (!zone_blocks || !is_power_of_2(zone_blocks)) {
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Devices with non power of 2 zone "
"size are not supported\n");
- ret = -ENODEV;
- } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
+ return -ENODEV;
+ }
+
+ if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Zone size too large\n");
- ret = -EFBIG;
- } else {
- *zblocks = zone_blocks;
- ret = 0;
+ return -EFBIG;
}
-out_free:
- memalloc_noio_restore(noio_flag);
- kvfree(buf);
+ *zblocks = zone_blocks;
- return ret;
+ return 0;
}
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
@@ -476,7 +407,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
* Check zone size: only devices with a constant zone size (except
* an eventual last runt zone) that is a power of 2 are supported.
*/
- ret = sd_zbc_check_zones(sdkp, &zone_blocks);
+ ret = sd_zbc_check_zones(sdkp, buf, &zone_blocks);
if (ret != 0)
goto err;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 226fbb995fb0..e05679c478e2 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -772,6 +772,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER;
}
+static unsigned long leak_balloon_pages(struct virtio_balloon *vb,
+ unsigned long pages_to_free)
+{
+ return leak_balloon(vb, pages_to_free * VIRTIO_BALLOON_PAGES_PER_PAGE) /
+ VIRTIO_BALLOON_PAGES_PER_PAGE;
+}
+
static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
unsigned long pages_to_free)
{
@@ -782,11 +789,10 @@ static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
* VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it
* multiple times to deflate pages till reaching pages_to_free.
*/
- while (vb->num_pages && pages_to_free) {
- pages_freed += leak_balloon(vb, pages_to_free) /
- VIRTIO_BALLOON_PAGES_PER_PAGE;
- pages_to_free -= pages_freed;
- }
+ while (vb->num_pages && pages_freed < pages_to_free)
+ pages_freed += leak_balloon_pages(vb,
+ pages_to_free - pages_freed);
+
update_balloon_size(vb);
return pages_freed;
@@ -799,7 +805,7 @@ static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker);
- pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE;
+ pages_to_free = sc->nr_to_scan;
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
pages_freed = shrink_free_pages(vb, pages_to_free);
@@ -820,7 +826,7 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
unsigned long count;
count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
- count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER;
+ count += vb->num_free_page_blocks << VIRTIO_BALLOON_FREE_PAGE_ORDER;
return count;
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index a8041e451e9e..867c7ebd3f10 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -583,7 +583,7 @@ unmap_release:
kfree(desc);
END_USE(vq);
- return -EIO;
+ return -ENOMEM;
}
static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
@@ -1085,7 +1085,7 @@ unmap_release:
kfree(desc);
END_USE(vq);
- return -EIO;
+ return -ENOMEM;
}
static inline int virtqueue_add_packed(struct virtqueue *_vq,
diff --git a/fs/Kconfig b/fs/Kconfig
index 2501e6f1f965..7b623e9fc1b0 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -322,4 +322,7 @@ source "fs/nls/Kconfig"
source "fs/dlm/Kconfig"
source "fs/unicode/Kconfig"
+config IO_WQ
+ bool
+
endmenu
diff --git a/fs/Makefile b/fs/Makefile
index 14231b4cf383..1148c555c4d3 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_EVENTFD) += eventfd.o
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_AIO) += aio.o
obj-$(CONFIG_IO_URING) += io_uring.o
+obj-$(CONFIG_IO_WQ) += io-wq.o
obj-$(CONFIG_FS_DAX) += dax.o
obj-$(CONFIG_FS_ENCRYPTION) += crypto/
obj-$(CONFIG_FS_VERITY) += verity/
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 6cdd7047c809..2dca8df1a18d 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -312,7 +312,6 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
_enter("%p,%zu,", server, count);
ASSERT(server != NULL);
- ASSERTCMP(count, <=, AFSCBMAX);
/* TODO: Sort the callback break list by volume ID */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 0e5269374ac1..61498d9f06ef 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -637,6 +637,7 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
call->need_attention = false;
__set_current_state(TASK_RUNNING);
afs_deliver_to_call(call);
+ timeout = rtt2;
continue;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index f18911e8d770..488641b1a418 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -435,6 +435,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
/* fill in the superblock */
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops;
if (!as->dyn_root)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 9c073dbdc1b0..ee63c2732fa2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1403,11 +1403,7 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty)
"resized disk %s\n",
bdev->bd_disk ? bdev->bd_disk->disk_name : "");
}
-
- if (!bdev->bd_disk)
- return;
- if (disk_part_scan_enabled(bdev->bd_disk))
- bdev->bd_invalidated = 1;
+ bdev->bd_invalidated = 1;
}
/**
@@ -1420,8 +1416,8 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty)
* and adjusts it if it differs. When shrinking the bdev size, its all caches
* are freed.
*/
-void check_disk_size_change(struct gendisk *disk, struct block_device *bdev,
- bool verbose)
+static void check_disk_size_change(struct gendisk *disk,
+ struct block_device *bdev, bool verbose)
{
loff_t disk_size, bdev_size;
@@ -1437,6 +1433,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev,
if (bdev_size > disk_size)
flush_disk(bdev, false);
}
+ bdev->bd_invalidated = 0;
}
/**
@@ -1466,7 +1463,6 @@ int revalidate_disk(struct gendisk *disk)
mutex_lock(&bdev->bd_mutex);
check_disk_size_change(disk, bdev, ret == 0);
- bdev->bd_invalidated = 0;
mutex_unlock(&bdev->bd_mutex);
bdput(bdev);
}
@@ -1512,6 +1508,45 @@ EXPORT_SYMBOL(bd_set_size);
static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
+int bdev_disk_changed(struct block_device *bdev, bool invalidate)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ int ret;
+
+ lockdep_assert_held(&bdev->bd_mutex);
+
+rescan:
+ ret = blk_drop_partitions(disk, bdev);
+ if (ret)
+ return ret;
+
+ if (invalidate)
+ set_capacity(disk, 0);
+ else if (disk->fops->revalidate_disk)
+ disk->fops->revalidate_disk(disk);
+
+ check_disk_size_change(disk, bdev, !invalidate);
+
+ if (get_capacity(disk)) {
+ ret = blk_add_partitions(disk, bdev);
+ if (ret == -EAGAIN)
+ goto rescan;
+ } else {
+ /*
+ * Tell userspace that the media / partition table may have
+ * changed.
+ */
+ kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+ }
+
+ return ret;
+}
+/*
+ * Only exported for for loop and dasd for historic reasons. Don't use in new
+ * code!
+ */
+EXPORT_SYMBOL_GPL(bdev_disk_changed);
+
/*
* bd_mutex locking:
*
@@ -1594,12 +1629,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
* The latter is necessary to prevent ghost
* partitions on a removed medium.
*/
- if (bdev->bd_invalidated) {
- if (!ret)
- rescan_partitions(disk, bdev);
- else if (ret == -ENOMEDIUM)
- invalidate_partitions(disk, bdev);
- }
+ if (bdev->bd_invalidated &&
+ (!ret || ret == -ENOMEDIUM))
+ bdev_disk_changed(bdev, ret == -ENOMEDIUM);
if (ret)
goto out_clear;
@@ -1632,12 +1664,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (bdev->bd_disk->fops->open)
ret = bdev->bd_disk->fops->open(bdev, mode);
/* the same as first opener case, read comment there */
- if (bdev->bd_invalidated) {
- if (!ret)
- rescan_partitions(bdev->bd_disk, bdev);
- else if (ret == -ENOMEDIUM)
- invalidate_partitions(bdev->bd_disk, bdev);
- }
+ if (bdev->bd_invalidated &&
+ (!ret || ret == -ENOMEDIUM))
+ bdev_disk_changed(bdev, ret == -ENOMEDIUM);
if (ret)
goto out_unlock_bdev;
}
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index d12ea28836a5..2f04024c3588 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -958,8 +958,8 @@ static int cramfs_get_tree(struct fs_context *fc)
if (IS_ENABLED(CONFIG_CRAMFS_MTD)) {
ret = get_tree_mtd(fc, cramfs_mtd_fill_super);
- if (ret < 0)
- return ret;
+ if (!ret)
+ return 0;
}
if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
ret = get_tree_bdev(fc, cramfs_blkdev_fill_super);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 808709581481..2c997f94a3b2 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1771,7 +1771,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
return -EIO;
}
trace_f2fs_issue_reset_zone(bdev, blkstart);
- return blkdev_reset_zones(bdev, sector, nr_sects, GFP_NOFS);
+ return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
+ sector, nr_sects, GFP_NOFS);
}
/* For conventional zones, use regular discard if supported */
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 1443cee15863..95761740cf1f 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2857,15 +2857,21 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
}
#ifdef CONFIG_BLK_DEV_ZONED
+static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct f2fs_dev_info *dev = data;
+
+ if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL)
+ set_bit(idx, dev->blkz_seq);
+ return 0;
+}
+
static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
{
struct block_device *bdev = FDEV(devi).bdev;
sector_t nr_sectors = bdev->bd_part->nr_sects;
- sector_t sector = 0;
- struct blk_zone *zones;
- unsigned int i, nr_zones;
- unsigned int n = 0;
- int err = -EIO;
+ int ret;
if (!f2fs_sb_has_blkzoned(sbi))
return 0;
@@ -2890,38 +2896,13 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
if (!FDEV(devi).blkz_seq)
return -ENOMEM;
-#define F2FS_REPORT_NR_ZONES 4096
-
- zones = f2fs_kzalloc(sbi,
- array_size(F2FS_REPORT_NR_ZONES,
- sizeof(struct blk_zone)),
- GFP_KERNEL);
- if (!zones)
- return -ENOMEM;
-
/* Get block zones type */
- while (zones && sector < nr_sectors) {
-
- nr_zones = F2FS_REPORT_NR_ZONES;
- err = blkdev_report_zones(bdev, sector, zones, &nr_zones);
- if (err)
- break;
- if (!nr_zones) {
- err = -EIO;
- break;
- }
-
- for (i = 0; i < nr_zones; i++) {
- if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
- set_bit(n, FDEV(devi).blkz_seq);
- sector += zones[i].len;
- n++;
- }
- }
-
- kvfree(zones);
+ ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
+ &FDEV(devi));
+ if (ret < 0)
+ return ret;
- return err;
+ return 0;
}
#endif
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 3d40771e8e7c..41b6438bd2d9 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -261,7 +261,7 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
static bool rw_hint_valid(enum rw_hint hint)
{
switch (hint) {
- case RWF_WRITE_LIFE_NOT_SET:
+ case RWH_WRITE_LIFE_NOT_SET:
case RWH_WRITE_LIFE_NONE:
case RWH_WRITE_LIFE_SHORT:
case RWH_WRITE_LIFE_MEDIUM:
diff --git a/fs/file.c b/fs/file.c
index 3da91a112bab..b241ea7f1aa4 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -795,7 +795,7 @@ unsigned long __fdget_pos(unsigned int fd)
unsigned long v = __fdget(fd);
struct file *file = (struct file *)(v & ~3);
- if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
+ if (file && !(file->f_mode & FMODE_STREAM)) {
if (file_count(file) > 1) {
v |= FDPUT_POS_UNLOCK;
mutex_lock(&file->f_pos_lock);
diff --git a/fs/io-wq.c b/fs/io-wq.c
new file mode 100644
index 000000000000..9174007ce107
--- /dev/null
+++ b/fs/io-wq.c
@@ -0,0 +1,1065 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic worker thread pool for io_uring
+ *
+ * Copyright (C) 2019 Jens Axboe
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched/signal.h>
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
+#include <linux/sched/mm.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/rculist_nulls.h>
+
+#include "io-wq.h"
+
+#define WORKER_IDLE_TIMEOUT (5 * HZ)
+
+enum {
+ IO_WORKER_F_UP = 1, /* up and active */
+ IO_WORKER_F_RUNNING = 2, /* account as running */
+ IO_WORKER_F_FREE = 4, /* worker on free list */
+ IO_WORKER_F_EXITING = 8, /* worker exiting */
+ IO_WORKER_F_FIXED = 16, /* static idle worker */
+ IO_WORKER_F_BOUND = 32, /* is doing bounded work */
+};
+
+enum {
+ IO_WQ_BIT_EXIT = 0, /* wq exiting */
+ IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
+};
+
+enum {
+ IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
+};
+
+/*
+ * One for each thread in a wqe pool
+ */
+struct io_worker {
+ refcount_t ref;
+ unsigned flags;
+ struct hlist_nulls_node nulls_node;
+ struct list_head all_list;
+ struct task_struct *task;
+ wait_queue_head_t wait;
+ struct io_wqe *wqe;
+
+ struct io_wq_work *cur_work;
+ spinlock_t lock;
+
+ struct rcu_head rcu;
+ struct mm_struct *mm;
+ struct files_struct *restore_files;
+};
+
+#if BITS_PER_LONG == 64
+#define IO_WQ_HASH_ORDER 6
+#else
+#define IO_WQ_HASH_ORDER 5
+#endif
+
+struct io_wqe_acct {
+ unsigned nr_workers;
+ unsigned max_workers;
+ atomic_t nr_running;
+};
+
+enum {
+ IO_WQ_ACCT_BOUND,
+ IO_WQ_ACCT_UNBOUND,
+};
+
+/*
+ * Per-node worker thread pool
+ */
+struct io_wqe {
+ struct {
+ spinlock_t lock;
+ struct list_head work_list;
+ unsigned long hash_map;
+ unsigned flags;
+ } ____cacheline_aligned_in_smp;
+
+ int node;
+ struct io_wqe_acct acct[2];
+
+ struct hlist_nulls_head free_list;
+ struct hlist_nulls_head busy_list;
+ struct list_head all_list;
+
+ struct io_wq *wq;
+};
+
+/*
+ * Per io_wq state
+ */
+struct io_wq {
+ struct io_wqe **wqes;
+ unsigned long state;
+ unsigned nr_wqes;
+
+ get_work_fn *get_work;
+ put_work_fn *put_work;
+
+ struct task_struct *manager;
+ struct user_struct *user;
+ struct mm_struct *mm;
+ refcount_t refs;
+ struct completion done;
+};
+
+static bool io_worker_get(struct io_worker *worker)
+{
+ return refcount_inc_not_zero(&worker->ref);
+}
+
+static void io_worker_release(struct io_worker *worker)
+{
+ if (refcount_dec_and_test(&worker->ref))
+ wake_up_process(worker->task);
+}
+
+/*
+ * Note: drops the wqe->lock if returning true! The caller must re-acquire
+ * the lock in that case. Some callers need to restart handling if this
+ * happens, so we can't just re-acquire the lock on behalf of the caller.
+ */
+static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
+{
+ bool dropped_lock = false;
+
+ if (current->files != worker->restore_files) {
+ __acquire(&wqe->lock);
+ spin_unlock_irq(&wqe->lock);
+ dropped_lock = true;
+
+ task_lock(current);
+ current->files = worker->restore_files;
+ task_unlock(current);
+ }
+
+ /*
+ * If we have an active mm, we need to drop the wq lock before unusing
+ * it. If we do, return true and let the caller retry the idle loop.
+ */
+ if (worker->mm) {
+ if (!dropped_lock) {
+ __acquire(&wqe->lock);
+ spin_unlock_irq(&wqe->lock);
+ dropped_lock = true;
+ }
+ __set_current_state(TASK_RUNNING);
+ set_fs(KERNEL_DS);
+ unuse_mm(worker->mm);
+ mmput(worker->mm);
+ worker->mm = NULL;
+ }
+
+ return dropped_lock;
+}
+
+static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
+ struct io_wq_work *work)
+{
+ if (work->flags & IO_WQ_WORK_UNBOUND)
+ return &wqe->acct[IO_WQ_ACCT_UNBOUND];
+
+ return &wqe->acct[IO_WQ_ACCT_BOUND];
+}
+
+static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
+ struct io_worker *worker)
+{
+ if (worker->flags & IO_WORKER_F_BOUND)
+ return &wqe->acct[IO_WQ_ACCT_BOUND];
+
+ return &wqe->acct[IO_WQ_ACCT_UNBOUND];
+}
+
+static void io_worker_exit(struct io_worker *worker)
+{
+ struct io_wqe *wqe = worker->wqe;
+ struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+ unsigned nr_workers;
+
+ /*
+ * If we're not at zero, someone else is holding a brief reference
+ * to the worker. Wait for that to go away.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!refcount_dec_and_test(&worker->ref))
+ schedule();
+ __set_current_state(TASK_RUNNING);
+
+ preempt_disable();
+ current->flags &= ~PF_IO_WORKER;
+ if (worker->flags & IO_WORKER_F_RUNNING)
+ atomic_dec(&acct->nr_running);
+ if (!(worker->flags & IO_WORKER_F_BOUND))
+ atomic_dec(&wqe->wq->user->processes);
+ worker->flags = 0;
+ preempt_enable();
+
+ spin_lock_irq(&wqe->lock);
+ hlist_nulls_del_rcu(&worker->nulls_node);
+ list_del_rcu(&worker->all_list);
+ if (__io_worker_unuse(wqe, worker)) {
+ __release(&wqe->lock);
+ spin_lock_irq(&wqe->lock);
+ }
+ acct->nr_workers--;
+ nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers +
+ wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
+ spin_unlock_irq(&wqe->lock);
+
+ /* all workers gone, wq exit can proceed */
+ if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
+ complete(&wqe->wq->done);
+
+ kfree_rcu(worker, rcu);
+}
+
+static inline bool io_wqe_run_queue(struct io_wqe *wqe)
+ __must_hold(wqe->lock)
+{
+ if (!list_empty(&wqe->work_list) && !(wqe->flags & IO_WQE_FLAG_STALLED))
+ return true;
+ return false;
+}
+
+/*
+ * Check head of free list for an available worker. If one isn't available,
+ * caller must wake up the wq manager to create one.
+ */
+static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
+ __must_hold(RCU)
+{
+ struct hlist_nulls_node *n;
+ struct io_worker *worker;
+
+ n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
+ if (is_a_nulls(n))
+ return false;
+
+ worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
+ if (io_worker_get(worker)) {
+ wake_up(&worker->wait);
+ io_worker_release(worker);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * We need a worker. If we find a free one, we're good. If not, and we're
+ * below the max number of workers, wake up the manager to create one.
+ */
+static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
+{
+ bool ret;
+
+ /*
+ * Most likely an attempt to queue unbounded work on an io_wq that
+ * wasn't setup with any unbounded workers.
+ */
+ WARN_ON_ONCE(!acct->max_workers);
+
+ rcu_read_lock();
+ ret = io_wqe_activate_free_worker(wqe);
+ rcu_read_unlock();
+
+ if (!ret && acct->nr_workers < acct->max_workers)
+ wake_up_process(wqe->wq->manager);
+}
+
+static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
+{
+ struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+
+ atomic_inc(&acct->nr_running);
+}
+
+static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
+ __must_hold(wqe->lock)
+{
+ struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+
+ if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
+ io_wqe_wake_worker(wqe, acct);
+}
+
+static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
+{
+ allow_kernel_signal(SIGINT);
+
+ current->flags |= PF_IO_WORKER;
+
+ worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+ worker->restore_files = current->files;
+ io_wqe_inc_running(wqe, worker);
+}
+
+/*
+ * Worker will start processing some work. Move it to the busy list, if
+ * it's currently on the freelist
+ */
+static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
+ struct io_wq_work *work)
+ __must_hold(wqe->lock)
+{
+ bool worker_bound, work_bound;
+
+ if (worker->flags & IO_WORKER_F_FREE) {
+ worker->flags &= ~IO_WORKER_F_FREE;
+ hlist_nulls_del_init_rcu(&worker->nulls_node);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
+ }
+
+ /*
+ * If worker is moving from bound to unbound (or vice versa), then
+ * ensure we update the running accounting.
+ */
+ worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
+ work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
+ if (worker_bound != work_bound) {
+ io_wqe_dec_running(wqe, worker);
+ if (work_bound) {
+ worker->flags |= IO_WORKER_F_BOUND;
+ wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
+ wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
+ atomic_dec(&wqe->wq->user->processes);
+ } else {
+ worker->flags &= ~IO_WORKER_F_BOUND;
+ wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
+ wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
+ atomic_inc(&wqe->wq->user->processes);
+ }
+ io_wqe_inc_running(wqe, worker);
+ }
+}
+
+/*
+ * No work, worker going to sleep. Move to freelist, and unuse mm if we
+ * have one attached. Dropping the mm may potentially sleep, so we drop
+ * the lock in that case and return success. Since the caller has to
+ * retry the loop in that case (we changed task state), we don't regrab
+ * the lock if we return success.
+ */
+static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
+ __must_hold(wqe->lock)
+{
+ if (!(worker->flags & IO_WORKER_F_FREE)) {
+ worker->flags |= IO_WORKER_F_FREE;
+ hlist_nulls_del_init_rcu(&worker->nulls_node);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+ }
+
+ return __io_worker_unuse(wqe, worker);
+}
+
+static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
+ __must_hold(wqe->lock)
+{
+ struct io_wq_work *work;
+
+ list_for_each_entry(work, &wqe->work_list, list) {
+ /* not hashed, can run anytime */
+ if (!(work->flags & IO_WQ_WORK_HASHED)) {
+ list_del(&work->list);
+ return work;
+ }
+
+ /* hashed, can run if not already running */
+ *hash = work->flags >> IO_WQ_HASH_SHIFT;
+ if (!(wqe->hash_map & BIT_ULL(*hash))) {
+ wqe->hash_map |= BIT_ULL(*hash);
+ list_del(&work->list);
+ return work;
+ }
+ }
+
+ return NULL;
+}
+
+static void io_worker_handle_work(struct io_worker *worker)
+ __releases(wqe->lock)
+{
+ struct io_wq_work *work, *old_work = NULL, *put_work = NULL;
+ struct io_wqe *wqe = worker->wqe;
+ struct io_wq *wq = wqe->wq;
+
+ do {
+ unsigned hash = -1U;
+
+ /*
+ * If we got some work, mark us as busy. If we didn't, but
+ * the list isn't empty, it means we stalled on hashed work.
+ * Mark us stalled so we don't keep looking for work when we
+ * can't make progress, any work completion or insertion will
+ * clear the stalled flag.
+ */
+ work = io_get_next_work(wqe, &hash);
+ if (work)
+ __io_worker_busy(wqe, worker, work);
+ else if (!list_empty(&wqe->work_list))
+ wqe->flags |= IO_WQE_FLAG_STALLED;
+
+ spin_unlock_irq(&wqe->lock);
+ if (put_work && wq->put_work)
+ wq->put_work(old_work);
+ if (!work)
+ break;
+next:
+ /* flush any pending signals before assigning new work */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ spin_lock_irq(&worker->lock);
+ worker->cur_work = work;
+ spin_unlock_irq(&worker->lock);
+
+ if ((work->flags & IO_WQ_WORK_NEEDS_FILES) &&
+ current->files != work->files) {
+ task_lock(current);
+ current->files = work->files;
+ task_unlock(current);
+ }
+ if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm &&
+ wq->mm && mmget_not_zero(wq->mm)) {
+ use_mm(wq->mm);
+ set_fs(USER_DS);
+ worker->mm = wq->mm;
+ }
+ if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
+ work->flags |= IO_WQ_WORK_CANCEL;
+ if (worker->mm)
+ work->flags |= IO_WQ_WORK_HAS_MM;
+
+ if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL)) {
+ put_work = work;
+ wq->get_work(work);
+ }
+
+ old_work = work;
+ work->func(&work);
+
+ spin_lock_irq(&worker->lock);
+ worker->cur_work = NULL;
+ spin_unlock_irq(&worker->lock);
+
+ spin_lock_irq(&wqe->lock);
+
+ if (hash != -1U) {
+ wqe->hash_map &= ~BIT_ULL(hash);
+ wqe->flags &= ~IO_WQE_FLAG_STALLED;
+ }
+ if (work && work != old_work) {
+ spin_unlock_irq(&wqe->lock);
+
+ if (put_work && wq->put_work) {
+ wq->put_work(put_work);
+ put_work = NULL;
+ }
+
+ /* dependent work not hashed */
+ hash = -1U;
+ goto next;
+ }
+ } while (1);
+}
+
+static int io_wqe_worker(void *data)
+{
+ struct io_worker *worker = data;
+ struct io_wqe *wqe = worker->wqe;
+ struct io_wq *wq = wqe->wq;
+ DEFINE_WAIT(wait);
+
+ io_worker_start(wqe, worker);
+
+ while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+ prepare_to_wait(&worker->wait, &wait, TASK_INTERRUPTIBLE);
+
+ spin_lock_irq(&wqe->lock);
+ if (io_wqe_run_queue(wqe)) {
+ __set_current_state(TASK_RUNNING);
+ io_worker_handle_work(worker);
+ continue;
+ }
+ /* drops the lock on success, retry */
+ if (__io_worker_idle(wqe, worker)) {
+ __release(&wqe->lock);
+ continue;
+ }
+ spin_unlock_irq(&wqe->lock);
+ if (signal_pending(current))
+ flush_signals(current);
+ if (schedule_timeout(WORKER_IDLE_TIMEOUT))
+ continue;
+ /* timed out, exit unless we're the fixed worker */
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
+ !(worker->flags & IO_WORKER_F_FIXED))
+ break;
+ }
+
+ finish_wait(&worker->wait, &wait);
+
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+ spin_lock_irq(&wqe->lock);
+ if (!list_empty(&wqe->work_list))
+ io_worker_handle_work(worker);
+ else
+ spin_unlock_irq(&wqe->lock);
+ }
+
+ io_worker_exit(worker);
+ return 0;
+}
+
+/*
+ * Called when a worker is scheduled in. Mark us as currently running.
+ */
+void io_wq_worker_running(struct task_struct *tsk)
+{
+ struct io_worker *worker = kthread_data(tsk);
+ struct io_wqe *wqe = worker->wqe;
+
+ if (!(worker->flags & IO_WORKER_F_UP))
+ return;
+ if (worker->flags & IO_WORKER_F_RUNNING)
+ return;
+ worker->flags |= IO_WORKER_F_RUNNING;
+ io_wqe_inc_running(wqe, worker);
+}
+
+/*
+ * Called when worker is going to sleep. If there are no workers currently
+ * running and we have work pending, wake up a free one or have the manager
+ * set one up.
+ */
+void io_wq_worker_sleeping(struct task_struct *tsk)
+{
+ struct io_worker *worker = kthread_data(tsk);
+ struct io_wqe *wqe = worker->wqe;
+
+ if (!(worker->flags & IO_WORKER_F_UP))
+ return;
+ if (!(worker->flags & IO_WORKER_F_RUNNING))
+ return;
+
+ worker->flags &= ~IO_WORKER_F_RUNNING;
+
+ spin_lock_irq(&wqe->lock);
+ io_wqe_dec_running(wqe, worker);
+ spin_unlock_irq(&wqe->lock);
+}
+
+static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+{
+ struct io_wqe_acct *acct =&wqe->acct[index];
+ struct io_worker *worker;
+
+ worker = kcalloc_node(1, sizeof(*worker), GFP_KERNEL, wqe->node);
+ if (!worker)
+ return;
+
+ refcount_set(&worker->ref, 1);
+ worker->nulls_node.pprev = NULL;
+ init_waitqueue_head(&worker->wait);
+ worker->wqe = wqe;
+ spin_lock_init(&worker->lock);
+
+ worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
+ "io_wqe_worker-%d/%d", index, wqe->node);
+ if (IS_ERR(worker->task)) {
+ kfree(worker);
+ return;
+ }
+
+ spin_lock_irq(&wqe->lock);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+ list_add_tail_rcu(&worker->all_list, &wqe->all_list);
+ worker->flags |= IO_WORKER_F_FREE;
+ if (index == IO_WQ_ACCT_BOUND)
+ worker->flags |= IO_WORKER_F_BOUND;
+ if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
+ worker->flags |= IO_WORKER_F_FIXED;
+ acct->nr_workers++;
+ spin_unlock_irq(&wqe->lock);
+
+ if (index == IO_WQ_ACCT_UNBOUND)
+ atomic_inc(&wq->user->processes);
+
+ wake_up_process(worker->task);
+}
+
+static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
+ __must_hold(wqe->lock)
+{
+ struct io_wqe_acct *acct = &wqe->acct[index];
+
+ /* always ensure we have one bounded worker */
+ if (index == IO_WQ_ACCT_BOUND && !acct->nr_workers)
+ return true;
+ /* if we have available workers or no work, no need */
+ if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
+ return false;
+ return acct->nr_workers < acct->max_workers;
+}
+
+/*
+ * Manager thread. Tasked with creating new workers, if we need them.
+ */
+static int io_wq_manager(void *data)
+{
+ struct io_wq *wq = data;
+
+ while (!kthread_should_stop()) {
+ int i;
+
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+ bool fork_worker[2] = { false, false };
+
+ spin_lock_irq(&wqe->lock);
+ if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
+ fork_worker[IO_WQ_ACCT_BOUND] = true;
+ if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
+ fork_worker[IO_WQ_ACCT_UNBOUND] = true;
+ spin_unlock_irq(&wqe->lock);
+ if (fork_worker[IO_WQ_ACCT_BOUND])
+ create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
+ if (fork_worker[IO_WQ_ACCT_UNBOUND])
+ create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+
+ return 0;
+}
+
+static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
+ struct io_wq_work *work)
+{
+ bool free_worker;
+
+ if (!(work->flags & IO_WQ_WORK_UNBOUND))
+ return true;
+ if (atomic_read(&acct->nr_running))
+ return true;
+
+ rcu_read_lock();
+ free_worker = !hlist_nulls_empty(&wqe->free_list);
+ rcu_read_unlock();
+ if (free_worker)
+ return true;
+
+ if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
+ !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
+ return false;
+
+ return true;
+}
+
+static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
+{
+ struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
+ unsigned long flags;
+
+ /*
+ * Do early check to see if we need a new unbound worker, and if we do,
+ * if we're allowed to do so. This isn't 100% accurate as there's a
+ * gap between this check and incrementing the value, but that's OK.
+ * It's close enough to not be an issue, fork() has the same delay.
+ */
+ if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ return;
+ }
+
+ spin_lock_irqsave(&wqe->lock, flags);
+ list_add_tail(&work->list, &wqe->work_list);
+ wqe->flags &= ~IO_WQE_FLAG_STALLED;
+ spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if (!atomic_read(&acct->nr_running))
+ io_wqe_wake_worker(wqe, acct);
+}
+
+void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
+{
+ struct io_wqe *wqe = wq->wqes[numa_node_id()];
+
+ io_wqe_enqueue(wqe, work);
+}
+
+/*
+ * Enqueue work, hashed by some key. Work items that hash to the same value
+ * will not be done in parallel. Used to limit concurrent writes, generally
+ * hashed by inode.
+ */
+void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val)
+{
+ struct io_wqe *wqe = wq->wqes[numa_node_id()];
+ unsigned bit;
+
+
+ bit = hash_ptr(val, IO_WQ_HASH_ORDER);
+ work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
+ io_wqe_enqueue(wqe, work);
+}
+
+static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
+{
+ send_sig(SIGINT, worker->task, 1);
+ return false;
+}
+
+/*
+ * Iterate the passed in list and call the specific function for each
+ * worker that isn't exiting
+ */
+static bool io_wq_for_each_worker(struct io_wqe *wqe,
+ bool (*func)(struct io_worker *, void *),
+ void *data)
+{
+ struct io_worker *worker;
+ bool ret = false;
+
+ list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
+ if (io_worker_get(worker)) {
+ ret = func(worker, data);
+ io_worker_release(worker);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void io_wq_cancel_all(struct io_wq *wq)
+{
+ int i;
+
+ set_bit(IO_WQ_BIT_CANCEL, &wq->state);
+
+ /*
+ * Browse both lists, as there's a gap between handing work off
+ * to a worker and the worker putting itself on the busy_list
+ */
+ rcu_read_lock();
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+
+ io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
+ }
+ rcu_read_unlock();
+}
+
+struct io_cb_cancel_data {
+ struct io_wqe *wqe;
+ work_cancel_fn *cancel;
+ void *caller_data;
+};
+
+static bool io_work_cancel(struct io_worker *worker, void *cancel_data)
+{
+ struct io_cb_cancel_data *data = cancel_data;
+ unsigned long flags;
+ bool ret = false;
+
+ /*
+ * Hold the lock to avoid ->cur_work going out of scope, caller
+ * may dereference the passed in work.
+ */
+ spin_lock_irqsave(&worker->lock, flags);
+ if (worker->cur_work &&
+ data->cancel(worker->cur_work, data->caller_data)) {
+ send_sig(SIGINT, worker->task, 1);
+ ret = true;
+ }
+ spin_unlock_irqrestore(&worker->lock, flags);
+
+ return ret;
+}
+
+static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
+ work_cancel_fn *cancel,
+ void *cancel_data)
+{
+ struct io_cb_cancel_data data = {
+ .wqe = wqe,
+ .cancel = cancel,
+ .caller_data = cancel_data,
+ };
+ struct io_wq_work *work;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&wqe->lock, flags);
+ list_for_each_entry(work, &wqe->work_list, list) {
+ if (cancel(work, cancel_data)) {
+ list_del(&work->list);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if (found) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ return IO_WQ_CANCEL_OK;
+ }
+
+ rcu_read_lock();
+ found = io_wq_for_each_worker(wqe, io_work_cancel, &data);
+ rcu_read_unlock();
+ return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
+}
+
+enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+ void *data)
+{
+ enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
+ int i;
+
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+
+ ret = io_wqe_cancel_cb_work(wqe, cancel, data);
+ if (ret != IO_WQ_CANCEL_NOTFOUND)
+ break;
+ }
+
+ return ret;
+}
+
+static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
+{
+ struct io_wq_work *work = data;
+ unsigned long flags;
+ bool ret = false;
+
+ if (worker->cur_work != work)
+ return false;
+
+ spin_lock_irqsave(&worker->lock, flags);
+ if (worker->cur_work == work) {
+ send_sig(SIGINT, worker->task, 1);
+ ret = true;
+ }
+ spin_unlock_irqrestore(&worker->lock, flags);
+
+ return ret;
+}
+
+static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
+ struct io_wq_work *cwork)
+{
+ struct io_wq_work *work;
+ unsigned long flags;
+ bool found = false;
+
+ cwork->flags |= IO_WQ_WORK_CANCEL;
+
+ /*
+ * First check pending list, if we're lucky we can just remove it
+ * from there. CANCEL_OK means that the work is returned as-new,
+ * no completion will be posted for it.
+ */
+ spin_lock_irqsave(&wqe->lock, flags);
+ list_for_each_entry(work, &wqe->work_list, list) {
+ if (work == cwork) {
+ list_del(&work->list);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if (found) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ return IO_WQ_CANCEL_OK;
+ }
+
+ /*
+ * Now check if a free (going busy) or busy worker has the work
+ * currently running. If we find it there, we'll return CANCEL_RUNNING
+ * as an indication that we attempte to signal cancellation. The
+ * completion will run normally in this case.
+ */
+ rcu_read_lock();
+ found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, cwork);
+ rcu_read_unlock();
+ return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
+}
+
+enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
+{
+ enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
+ int i;
+
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+
+ ret = io_wqe_cancel_work(wqe, cwork);
+ if (ret != IO_WQ_CANCEL_NOTFOUND)
+ break;
+ }
+
+ return ret;
+}
+
+struct io_wq_flush_data {
+ struct io_wq_work work;
+ struct completion done;
+};
+
+static void io_wq_flush_func(struct io_wq_work **workptr)
+{
+ struct io_wq_work *work = *workptr;
+ struct io_wq_flush_data *data;
+
+ data = container_of(work, struct io_wq_flush_data, work);
+ complete(&data->done);
+}
+
+/*
+ * Doesn't wait for previously queued work to finish. When this completes,
+ * it just means that previously queued work was started.
+ */
+void io_wq_flush(struct io_wq *wq)
+{
+ struct io_wq_flush_data data;
+ int i;
+
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+
+ init_completion(&data.done);
+ INIT_IO_WORK(&data.work, io_wq_flush_func);
+ data.work.flags |= IO_WQ_WORK_INTERNAL;
+ io_wqe_enqueue(wqe, &data.work);
+ wait_for_completion(&data.done);
+ }
+}
+
+struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
+ struct user_struct *user, get_work_fn *get_work,
+ put_work_fn *put_work)
+{
+ int ret = -ENOMEM, i, node;
+ struct io_wq *wq;
+
+ wq = kcalloc(1, sizeof(*wq), GFP_KERNEL);
+ if (!wq)
+ return ERR_PTR(-ENOMEM);
+
+ wq->nr_wqes = num_online_nodes();
+ wq->wqes = kcalloc(wq->nr_wqes, sizeof(struct io_wqe *), GFP_KERNEL);
+ if (!wq->wqes) {
+ kfree(wq);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ wq->get_work = get_work;
+ wq->put_work = put_work;
+
+ /* caller must already hold a reference to this */
+ wq->user = user;
+
+ i = 0;
+ refcount_set(&wq->refs, wq->nr_wqes);
+ for_each_online_node(node) {
+ struct io_wqe *wqe;
+
+ wqe = kcalloc_node(1, sizeof(struct io_wqe), GFP_KERNEL, node);
+ if (!wqe)
+ break;
+ wq->wqes[i] = wqe;
+ wqe->node = node;
+ wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
+ atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
+ if (user) {
+ wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
+ task_rlimit(current, RLIMIT_NPROC);
+ }
+ atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
+ wqe->node = node;
+ wqe->wq = wq;
+ spin_lock_init(&wqe->lock);
+ INIT_LIST_HEAD(&wqe->work_list);
+ INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
+ INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
+ INIT_LIST_HEAD(&wqe->all_list);
+
+ i++;
+ }
+
+ init_completion(&wq->done);
+
+ if (i != wq->nr_wqes)
+ goto err;
+
+ /* caller must have already done mmgrab() on this mm */
+ wq->mm = mm;
+
+ wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
+ if (!IS_ERR(wq->manager)) {
+ wake_up_process(wq->manager);
+ return wq;
+ }
+
+ ret = PTR_ERR(wq->manager);
+ wq->manager = NULL;
+err:
+ complete(&wq->done);
+ io_wq_destroy(wq);
+ return ERR_PTR(ret);
+}
+
+static bool io_wq_worker_wake(struct io_worker *worker, void *data)
+{
+ wake_up_process(worker->task);
+ return false;
+}
+
+void io_wq_destroy(struct io_wq *wq)
+{
+ int i;
+
+ if (wq->manager) {
+ set_bit(IO_WQ_BIT_EXIT, &wq->state);
+ kthread_stop(wq->manager);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+
+ if (!wqe)
+ continue;
+ io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
+ }
+ rcu_read_unlock();
+
+ wait_for_completion(&wq->done);
+
+ for (i = 0; i < wq->nr_wqes; i++)
+ kfree(wq->wqes[i]);
+ kfree(wq->wqes);
+ kfree(wq);
+}
diff --git a/fs/io-wq.h b/fs/io-wq.h
new file mode 100644
index 000000000000..4b29f922f80c
--- /dev/null
+++ b/fs/io-wq.h
@@ -0,0 +1,74 @@
+#ifndef INTERNAL_IO_WQ_H
+#define INTERNAL_IO_WQ_H
+
+struct io_wq;
+
+enum {
+ IO_WQ_WORK_CANCEL = 1,
+ IO_WQ_WORK_HAS_MM = 2,
+ IO_WQ_WORK_HASHED = 4,
+ IO_WQ_WORK_NEEDS_USER = 8,
+ IO_WQ_WORK_NEEDS_FILES = 16,
+ IO_WQ_WORK_UNBOUND = 32,
+ IO_WQ_WORK_INTERNAL = 64,
+
+ IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
+};
+
+enum io_wq_cancel {
+ IO_WQ_CANCEL_OK, /* cancelled before started */
+ IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
+ IO_WQ_CANCEL_NOTFOUND, /* work not found */
+};
+
+struct io_wq_work {
+ struct list_head list;
+ void (*func)(struct io_wq_work **);
+ unsigned flags;
+ struct files_struct *files;
+};
+
+#define INIT_IO_WORK(work, _func) \
+ do { \
+ (work)->func = _func; \
+ (work)->flags = 0; \
+ (work)->files = NULL; \
+ } while (0) \
+
+typedef void (get_work_fn)(struct io_wq_work *);
+typedef void (put_work_fn)(struct io_wq_work *);
+
+struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
+ struct user_struct *user,
+ get_work_fn *get_work, put_work_fn *put_work);
+void io_wq_destroy(struct io_wq *wq);
+
+void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
+void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
+void io_wq_flush(struct io_wq *wq);
+
+void io_wq_cancel_all(struct io_wq *wq);
+enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
+
+typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
+
+enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+ void *data);
+
+#if defined(CONFIG_IO_WQ)
+extern void io_wq_worker_sleeping(struct task_struct *);
+extern void io_wq_worker_running(struct task_struct *);
+#else
+static inline void io_wq_worker_sleeping(struct task_struct *tsk)
+{
+}
+static inline void io_wq_worker_running(struct task_struct *tsk)
+{
+}
+#endif
+
+static inline bool io_wq_current_is_worker(void)
+{
+ return in_task() && (current->flags & PF_IO_WORKER);
+}
+#endif
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 2c819c3c855d..4c030a92de79 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -56,7 +56,6 @@
#include <linux/mmu_context.h>
#include <linux/percpu.h>
#include <linux/slab.h>
-#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/blkdev.h>
#include <linux/bvec.h>
@@ -71,12 +70,24 @@
#include <linux/sizes.h>
#include <linux/hugetlb.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/io_uring.h>
+
#include <uapi/linux/io_uring.h>
#include "internal.h"
+#include "io-wq.h"
#define IORING_MAX_ENTRIES 32768
-#define IORING_MAX_FIXED_FILES 1024
+#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
+
+/*
+ * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
+ */
+#define IORING_FILE_TABLE_SHIFT 9
+#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
+#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
+#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
struct io_uring {
u32 head ____cacheline_aligned_in_smp;
@@ -161,14 +172,8 @@ struct io_mapped_ubuf {
unsigned int nr_bvecs;
};
-struct async_list {
- spinlock_t lock;
- atomic_t cnt;
- struct list_head list;
-
- struct file *file;
- off_t io_start;
- size_t io_len;
+struct fixed_file_table {
+ struct file **files;
};
struct io_ring_ctx {
@@ -180,6 +185,7 @@ struct io_ring_ctx {
unsigned int flags;
bool compat;
bool account_mem;
+ bool cq_overflow_flushed;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -198,38 +204,30 @@ struct io_ring_ctx {
unsigned sq_mask;
unsigned sq_thread_idle;
unsigned cached_sq_dropped;
+ atomic_t cached_cq_overflow;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct list_head timeout_list;
+ struct list_head cq_overflow_list;
+
+ wait_queue_head_t inflight_wait;
} ____cacheline_aligned_in_smp;
+ struct io_rings *rings;
+
/* IO offload */
- struct workqueue_struct *sqo_wq[2];
+ struct io_wq *io_wq;
struct task_struct *sqo_thread; /* if using sq thread polling */
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
- struct completion sqo_thread_started;
-
- struct {
- unsigned cached_cq_tail;
- atomic_t cached_cq_overflow;
- unsigned cq_entries;
- unsigned cq_mask;
- struct wait_queue_head cq_wait;
- struct fasync_struct *cq_fasync;
- struct eventfd_ctx *cq_ev_fd;
- atomic_t cq_timeouts;
- } ____cacheline_aligned_in_smp;
-
- struct io_rings *rings;
/*
* If used, fixed file set. Writers must ensure that ->refs is dead,
* readers must ensure that ->refs is alive as long as the file* is
* used. Only updated through io_uring_register(2).
*/
- struct file **user_files;
+ struct fixed_file_table *file_table;
unsigned nr_user_files;
/* if used, fixed mapped user buffers */
@@ -238,7 +236,25 @@ struct io_ring_ctx {
struct user_struct *user;
- struct completion ctx_done;
+ /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
+ struct completion *completions;
+
+ /* if all else fails... */
+ struct io_kiocb *fallback_req;
+
+#if defined(CONFIG_UNIX)
+ struct socket *ring_sock;
+#endif
+
+ struct {
+ unsigned cached_cq_tail;
+ unsigned cq_entries;
+ unsigned cq_mask;
+ atomic_t cq_timeouts;
+ struct wait_queue_head cq_wait;
+ struct fasync_struct *cq_fasync;
+ struct eventfd_ctx *cq_ev_fd;
+ } ____cacheline_aligned_in_smp;
struct {
struct mutex uring_lock;
@@ -255,22 +271,20 @@ struct io_ring_ctx {
* manipulate the list, hence no extra locking is needed there.
*/
struct list_head poll_list;
- struct list_head cancel_list;
- } ____cacheline_aligned_in_smp;
+ struct rb_root cancel_tree;
- struct async_list pending_async[2];
-
-#if defined(CONFIG_UNIX)
- struct socket *ring_sock;
-#endif
+ spinlock_t inflight_lock;
+ struct list_head inflight_list;
+ } ____cacheline_aligned_in_smp;
};
struct sqe_submit {
const struct io_uring_sqe *sqe;
- unsigned short index;
+ struct file *ring_file;
+ int ring_fd;
u32 sequence;
bool has_user;
- bool needs_lock;
+ bool in_async;
bool needs_fixed_file;
};
@@ -309,7 +323,10 @@ struct io_kiocb {
struct sqe_submit submit;
struct io_ring_ctx *ctx;
- struct list_head list;
+ union {
+ struct list_head list;
+ struct rb_node rb_node;
+ };
struct list_head link_list;
unsigned int flags;
refcount_t refs;
@@ -320,18 +337,22 @@ struct io_kiocb {
#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
#define REQ_F_IO_DRAINED 32 /* drain done */
#define REQ_F_LINK 64 /* linked sqes */
-#define REQ_F_LINK_DONE 128 /* linked sqes done */
+#define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */
#define REQ_F_FAIL_LINK 256 /* fail rest of links */
#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
#define REQ_F_TIMEOUT 1024 /* timeout request */
#define REQ_F_ISREG 2048 /* regular file */
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
+#define REQ_F_INFLIGHT 16384 /* on inflight list */
+#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
u64 user_data;
u32 result;
u32 sequence;
- struct work_struct work;
+ struct list_head inflight_entry;
+
+ struct io_wq_work work;
};
#define IO_PLUG_THRESHOLD 2
@@ -357,10 +378,11 @@ struct io_submit_state {
unsigned int ios_left;
};
-static void io_sq_wq_submit_work(struct work_struct *work);
-static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
- long res);
+static void io_wq_submit_work(struct io_wq_work **workptr);
+static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void __io_free_req(struct io_kiocb *req);
+static void io_put_req(struct io_kiocb *req);
+static void io_double_put_req(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -383,57 +405,67 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref)
{
struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
- complete(&ctx->ctx_done);
+ complete(&ctx->completions[0]);
}
static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{
struct io_ring_ctx *ctx;
- int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
+ ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
+ if (!ctx->fallback_req)
+ goto err;
+
+ ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL);
+ if (!ctx->completions)
+ goto err;
+
if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
- PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
- kfree(ctx);
- return NULL;
- }
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
+ goto err;
ctx->flags = p->flags;
init_waitqueue_head(&ctx->cq_wait);
- init_completion(&ctx->ctx_done);
- init_completion(&ctx->sqo_thread_started);
+ INIT_LIST_HEAD(&ctx->cq_overflow_list);
+ init_completion(&ctx->completions[0]);
+ init_completion(&ctx->completions[1]);
mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->wait);
- for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
- spin_lock_init(&ctx->pending_async[i].lock);
- INIT_LIST_HEAD(&ctx->pending_async[i].list);
- atomic_set(&ctx->pending_async[i].cnt, 0);
- }
spin_lock_init(&ctx->completion_lock);
INIT_LIST_HEAD(&ctx->poll_list);
- INIT_LIST_HEAD(&ctx->cancel_list);
+ ctx->cancel_tree = RB_ROOT;
INIT_LIST_HEAD(&ctx->defer_list);
INIT_LIST_HEAD(&ctx->timeout_list);
+ init_waitqueue_head(&ctx->inflight_wait);
+ spin_lock_init(&ctx->inflight_lock);
+ INIT_LIST_HEAD(&ctx->inflight_list);
return ctx;
+err:
+ if (ctx->fallback_req)
+ kmem_cache_free(req_cachep, ctx->fallback_req);
+ kfree(ctx->completions);
+ kfree(ctx);
+ return NULL;
}
-static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool __req_need_defer(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+ atomic_read(&ctx->cached_cq_overflow);
}
-static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool req_need_defer(struct io_kiocb *req)
{
- if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
- return false;
+ if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN)
+ return __req_need_defer(req);
- return __io_sequence_defer(ctx, req);
+ return false;
}
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
@@ -441,7 +473,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
- if (req && !io_sequence_defer(ctx, req)) {
+ if (req && !req_need_defer(req)) {
list_del_init(&req->list);
return req;
}
@@ -457,7 +489,7 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
if (req) {
if (req->flags & REQ_F_TIMEOUT_NOSEQ)
return NULL;
- if (!__io_sequence_defer(ctx, req)) {
+ if (!__req_need_defer(req)) {
list_del_init(&req->list);
return req;
}
@@ -481,21 +513,59 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
}
}
-static inline void io_queue_async_work(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
{
- int rw = 0;
+ u8 opcode = READ_ONCE(sqe->opcode);
+
+ return !(opcode == IORING_OP_READ_FIXED ||
+ opcode == IORING_OP_WRITE_FIXED);
+}
+
+static inline bool io_prep_async_work(struct io_kiocb *req)
+{
+ bool do_hashed = false;
if (req->submit.sqe) {
switch (req->submit.sqe->opcode) {
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
- rw = !(req->rw.ki_flags & IOCB_DIRECT);
+ do_hashed = true;
+ /* fall-through */
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_SENDMSG:
+ case IORING_OP_RECVMSG:
+ case IORING_OP_ACCEPT:
+ case IORING_OP_POLL_ADD:
+ /*
+ * We know REQ_F_ISREG is not set on some of these
+ * opcodes, but this enables us to keep the check in
+ * just one place.
+ */
+ if (!(req->flags & REQ_F_ISREG))
+ req->work.flags |= IO_WQ_WORK_UNBOUND;
break;
}
+ if (io_sqe_needs_user(req->submit.sqe))
+ req->work.flags |= IO_WQ_WORK_NEEDS_USER;
}
- queue_work(ctx->sqo_wq[rw], &req->work);
+ return do_hashed;
+}
+
+static inline void io_queue_async_work(struct io_kiocb *req)
+{
+ bool do_hashed = io_prep_async_work(req);
+ struct io_ring_ctx *ctx = req->ctx;
+
+ trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
+ req->flags);
+ if (!do_hashed) {
+ io_wq_enqueue(ctx->io_wq, &req->work);
+ } else {
+ io_wq_enqueue_hashed(ctx->io_wq, &req->work,
+ file_inode(req->file));
+ }
}
static void io_kill_timeout(struct io_kiocb *req)
@@ -505,9 +575,9 @@ static void io_kill_timeout(struct io_kiocb *req)
ret = hrtimer_try_to_cancel(&req->timeout.timer);
if (ret != -1) {
atomic_inc(&req->ctx->cq_timeouts);
- list_del(&req->list);
- io_cqring_fill_event(req->ctx, req->user_data, 0);
- __io_free_req(req);
+ list_del_init(&req->list);
+ io_cqring_fill_event(req, 0);
+ io_put_req(req);
}
}
@@ -537,7 +607,7 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
continue;
}
req->flags |= REQ_F_IO_DRAINED;
- io_queue_async_work(ctx, req);
+ io_queue_async_work(req);
}
}
@@ -559,50 +629,124 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
return &rings->cqes[tail & ctx->cq_mask];
}
-static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
- long res)
+static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+{
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+ if (waitqueue_active(&ctx->sqo_wait))
+ wake_up(&ctx->sqo_wait);
+ if (ctx->cq_ev_fd)
+ eventfd_signal(ctx->cq_ev_fd, 1);
+}
+
+static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
+{
+ struct io_rings *rings = ctx->rings;
+ struct io_uring_cqe *cqe;
+ struct io_kiocb *req;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ if (!force) {
+ if (list_empty_careful(&ctx->cq_overflow_list))
+ return;
+ if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
+ rings->cq_ring_entries))
+ return;
+ }
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+
+ /* if force is set, the ring is going away. always drop after that */
+ if (force)
+ ctx->cq_overflow_flushed = true;
+
+ while (!list_empty(&ctx->cq_overflow_list)) {
+ cqe = io_get_cqring(ctx);
+ if (!cqe && !force)
+ break;
+
+ req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
+ list);
+ list_move(&req->list, &list);
+ if (cqe) {
+ WRITE_ONCE(cqe->user_data, req->user_data);
+ WRITE_ONCE(cqe->res, req->result);
+ WRITE_ONCE(cqe->flags, 0);
+ } else {
+ WRITE_ONCE(ctx->rings->cq_overflow,
+ atomic_inc_return(&ctx->cached_cq_overflow));
+ }
+ }
+
+ io_commit_cqring(ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ io_cqring_ev_posted(ctx);
+
+ while (!list_empty(&list)) {
+ req = list_first_entry(&list, struct io_kiocb, list);
+ list_del(&req->list);
+ io_put_req(req);
+ }
+}
+
+static void io_cqring_fill_event(struct io_kiocb *req, long res)
{
+ struct io_ring_ctx *ctx = req->ctx;
struct io_uring_cqe *cqe;
+ trace_io_uring_complete(ctx, req->user_data, res);
+
/*
* If we can't get a cq entry, userspace overflowed the
* submission (by quite a lot). Increment the overflow count in
* the ring.
*/
cqe = io_get_cqring(ctx);
- if (cqe) {
- WRITE_ONCE(cqe->user_data, ki_user_data);
+ if (likely(cqe)) {
+ WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, 0);
- } else {
+ } else if (ctx->cq_overflow_flushed) {
WRITE_ONCE(ctx->rings->cq_overflow,
atomic_inc_return(&ctx->cached_cq_overflow));
+ } else {
+ refcount_inc(&req->refs);
+ req->result = res;
+ list_add_tail(&req->list, &ctx->cq_overflow_list);
}
}
-static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
-{
- if (waitqueue_active(&ctx->wait))
- wake_up(&ctx->wait);
- if (waitqueue_active(&ctx->sqo_wait))
- wake_up(&ctx->sqo_wait);
- if (ctx->cq_ev_fd)
- eventfd_signal(ctx->cq_ev_fd, 1);
-}
-
-static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
- long res)
+static void io_cqring_add_event(struct io_kiocb *req, long res)
{
+ struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
- io_cqring_fill_event(ctx, user_data, res);
+ io_cqring_fill_event(req, res);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
}
+static inline bool io_is_fallback_req(struct io_kiocb *req)
+{
+ return req == (struct io_kiocb *)
+ ((unsigned long) req->ctx->fallback_req & ~1UL);
+}
+
+static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
+{
+ struct io_kiocb *req;
+
+ req = ctx->fallback_req;
+ if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
+ return req;
+
+ return NULL;
+}
+
static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
struct io_submit_state *state)
{
@@ -615,7 +759,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
if (!state) {
req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req))
- goto out;
+ goto fallback;
} else if (!state->free_reqs) {
size_t sz;
int ret;
@@ -630,7 +774,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
if (unlikely(ret <= 0)) {
state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
if (!state->reqs[0])
- goto out;
+ goto fallback;
ret = 1;
}
state->free_reqs = ret - 1;
@@ -642,14 +786,19 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
state->cur_req++;
}
+got_it:
req->file = NULL;
req->ctx = ctx;
req->flags = 0;
/* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2);
req->result = 0;
+ INIT_IO_WORK(&req->work, io_wq_submit_work);
return req;
-out:
+fallback:
+ req = io_get_fallback_req(ctx);
+ if (req)
+ goto got_it;
percpu_ref_put(&ctx->refs);
return NULL;
}
@@ -665,15 +814,48 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
static void __io_free_req(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
if (req->file && !(req->flags & REQ_F_FIXED_FILE))
fput(req->file);
- percpu_ref_put(&req->ctx->refs);
- kmem_cache_free(req_cachep, req);
+ if (req->flags & REQ_F_INFLIGHT) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->inflight_lock, flags);
+ list_del(&req->inflight_entry);
+ if (waitqueue_active(&ctx->inflight_wait))
+ wake_up(&ctx->inflight_wait);
+ spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+ }
+ percpu_ref_put(&ctx->refs);
+ if (likely(!io_is_fallback_req(req)))
+ kmem_cache_free(req_cachep, req);
+ else
+ clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
}
-static void io_req_link_next(struct io_kiocb *req)
+static bool io_link_cancel_timeout(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret;
+
+ ret = hrtimer_try_to_cancel(&req->timeout.timer);
+ if (ret != -1) {
+ io_cqring_fill_event(req, -ECANCELED);
+ io_commit_cqring(ctx);
+ req->flags &= ~REQ_F_LINK;
+ io_put_req(req);
+ return true;
+ }
+
+ return false;
+}
+
+static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
+{
+ struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *nxt;
+ bool wake_ev = false;
/*
* The list should never be empty when we are called here. But could
@@ -681,18 +863,35 @@ static void io_req_link_next(struct io_kiocb *req)
* safe side.
*/
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
- if (nxt) {
- list_del(&nxt->list);
+ while (nxt) {
+ list_del_init(&nxt->list);
if (!list_empty(&req->link_list)) {
INIT_LIST_HEAD(&nxt->link_list);
list_splice(&req->link_list, &nxt->link_list);
nxt->flags |= REQ_F_LINK;
}
- nxt->flags |= REQ_F_LINK_DONE;
- INIT_WORK(&nxt->work, io_sq_wq_submit_work);
- io_queue_async_work(req->ctx, nxt);
+ /*
+ * If we're in async work, we can continue processing the chain
+ * in this context instead of having to queue up new async work.
+ */
+ if (req->flags & REQ_F_LINK_TIMEOUT) {
+ wake_ev = io_link_cancel_timeout(nxt);
+
+ /* we dropped this link, get next */
+ nxt = list_first_entry_or_null(&req->link_list,
+ struct io_kiocb, list);
+ } else if (nxtptr && io_wq_current_is_worker()) {
+ *nxtptr = nxt;
+ break;
+ } else {
+ io_queue_async_work(nxt);
+ break;
+ }
}
+
+ if (wake_ev)
+ io_cqring_ev_posted(ctx);
}
/*
@@ -700,43 +899,118 @@ static void io_req_link_next(struct io_kiocb *req)
*/
static void io_fail_links(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *link;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
while (!list_empty(&req->link_list)) {
link = list_first_entry(&req->link_list, struct io_kiocb, list);
- list_del(&link->list);
+ list_del_init(&link->list);
- io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
- __io_free_req(link);
+ trace_io_uring_fail_link(req, link);
+
+ if ((req->flags & REQ_F_LINK_TIMEOUT) &&
+ link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
+ io_link_cancel_timeout(link);
+ } else {
+ io_cqring_fill_event(link, -ECANCELED);
+ io_double_put_req(link);
+ }
}
+
+ io_commit_cqring(ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ io_cqring_ev_posted(ctx);
}
-static void io_free_req(struct io_kiocb *req)
+static void io_free_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
{
+ if (likely(!(req->flags & REQ_F_LINK))) {
+ __io_free_req(req);
+ return;
+ }
+
/*
* If LINK is set, we have dependent requests in this chain. If we
* didn't fail this request, queue the first one up, moving any other
* dependencies to the next request. In case of failure, fail the rest
* of the chain.
*/
- if (req->flags & REQ_F_LINK) {
- if (req->flags & REQ_F_FAIL_LINK)
- io_fail_links(req);
- else
- io_req_link_next(req);
+ if (req->flags & REQ_F_FAIL_LINK) {
+ io_fail_links(req);
+ } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
+ REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned long flags;
+
+ /*
+ * If this is a timeout link, we could be racing with the
+ * timeout timer. Grab the completion lock for this case to
+ * protect against that.
+ */
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ io_req_link_next(req, nxt);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ } else {
+ io_req_link_next(req, nxt);
}
__io_free_req(req);
}
+static void io_free_req(struct io_kiocb *req)
+{
+ io_free_req_find_next(req, NULL);
+}
+
+/*
+ * Drop reference to request, return next in chain (if there is one) if this
+ * was the last reference to this request.
+ */
+static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
+{
+ struct io_kiocb *nxt = NULL;
+
+ if (refcount_dec_and_test(&req->refs))
+ io_free_req_find_next(req, &nxt);
+
+ if (nxt) {
+ if (nxtptr)
+ *nxtptr = nxt;
+ else
+ io_queue_async_work(nxt);
+ }
+}
+
static void io_put_req(struct io_kiocb *req)
{
if (refcount_dec_and_test(&req->refs))
io_free_req(req);
}
-static unsigned io_cqring_events(struct io_rings *rings)
+static void io_double_put_req(struct io_kiocb *req)
+{
+ /* drop both submit and complete references */
+ if (refcount_sub_and_test(2, &req->refs))
+ __io_free_req(req);
+}
+
+static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
{
+ struct io_rings *rings = ctx->rings;
+
+ /*
+ * noflush == true is from the waitqueue handler, just ensure we wake
+ * up the task, and the next invocation will flush the entries. We
+ * cannot safely to it from here.
+ */
+ if (noflush && !list_empty(&ctx->cq_overflow_list))
+ return -1U;
+
+ io_cqring_overflow_flush(ctx, false);
+
/* See comment at the top of this file */
smp_rmb();
return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
@@ -765,7 +1039,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, list);
list_del(&req->list);
- io_cqring_fill_event(ctx, req->user_data, req->result);
+ io_cqring_fill_event(req, req->result);
(*nr_events)++;
if (refcount_dec_and_test(&req->refs)) {
@@ -774,8 +1048,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
* completions for those, only batch free for fixed
* file and non-linked commands.
*/
- if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
- REQ_F_FIXED_FILE) {
+ if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
+ REQ_F_FIXED_FILE) && !io_is_fallback_req(req)) {
reqs[to_free++] = req;
if (to_free == ARRAY_SIZE(reqs))
io_free_req_many(ctx, reqs, &to_free);
@@ -892,7 +1166,7 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
* If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error).
*/
- if (io_cqring_events(ctx->rings))
+ if (io_cqring_events(ctx, false))
break;
/*
@@ -952,7 +1226,7 @@ static void kiocb_end_write(struct io_kiocb *req)
file_end_write(req->file);
}
-static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
+static void io_complete_rw_common(struct kiocb *kiocb, long res)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
@@ -961,10 +1235,28 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK;
- io_cqring_add_event(req->ctx, req->user_data, res);
+ io_cqring_add_event(req, res);
+}
+
+static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
+{
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+
+ io_complete_rw_common(kiocb, res);
io_put_req(req);
}
+static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
+{
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+ struct io_kiocb *nxt = NULL;
+
+ io_complete_rw_common(kiocb, res);
+ io_put_req_find_next(req, &nxt);
+
+ return nxt;
+}
+
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
@@ -1072,10 +1364,9 @@ static bool io_file_supports_async(struct file *file)
return false;
}
-static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
- bool force_nonblock)
+static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
{
- const struct io_uring_sqe *sqe = s->sqe;
+ const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw;
unsigned ioprio;
@@ -1159,6 +1450,15 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
}
}
+static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
+ bool in_async)
+{
+ if (in_async && ret >= 0 && nxt && kiocb->ki_complete == io_complete_rw)
+ *nxt = __io_complete_rw(kiocb, ret);
+ else
+ io_rw_done(kiocb, ret);
+}
+
static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
const struct io_uring_sqe *sqe,
struct iov_iter *iter)
@@ -1270,65 +1570,6 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
}
-static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
-{
- if (al->file == kiocb->ki_filp) {
- off_t start, end;
-
- /*
- * Allow merging if we're anywhere in the range of the same
- * page. Generally this happens for sub-page reads or writes,
- * and it's beneficial to allow the first worker to bring the
- * page in and the piggy backed work can then work on the
- * cached page.
- */
- start = al->io_start & PAGE_MASK;
- end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
- if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
- return true;
- }
-
- al->file = NULL;
- return false;
-}
-
-/*
- * Make a note of the last file/offset/direction we punted to async
- * context. We'll use this information to see if we can piggy back a
- * sequential request onto the previous one, if it's still hasn't been
- * completed by the async worker.
- */
-static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
-{
- struct async_list *async_list = &req->ctx->pending_async[rw];
- struct kiocb *kiocb = &req->rw;
- struct file *filp = kiocb->ki_filp;
-
- if (io_should_merge(async_list, kiocb)) {
- unsigned long max_bytes;
-
- /* Use 8x RA size as a decent limiter for both reads/writes */
- max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
- if (!max_bytes)
- max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
-
- /* If max len are exceeded, reset the state */
- if (async_list->io_len + len <= max_bytes) {
- req->flags |= REQ_F_SEQ_PREV;
- async_list->io_len += len;
- } else {
- async_list->file = NULL;
- }
- }
-
- /* New file? Reset state. */
- if (async_list->file != filp) {
- async_list->io_start = kiocb->ki_pos;
- async_list->io_len = len;
- async_list->file = filp;
- }
-}
-
/*
* For files that don't have ->read_iter() and ->write_iter(), handle them
* by looping over ->read() or ->write() manually.
@@ -1374,7 +1615,7 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
return ret;
}
-static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
+static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
@@ -1384,7 +1625,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count;
ssize_t read_size, ret;
- ret = io_prep_rw(req, s, force_nonblock);
+ ret = io_prep_rw(req, force_nonblock);
if (ret)
return ret;
file = kiocb->ki_filp;
@@ -1392,7 +1633,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
- ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
+ ret = io_import_iovec(req->ctx, READ, &req->submit, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1423,23 +1664,16 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
ret2 > 0 && ret2 < read_size)
ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */
- if (!force_nonblock || ret2 != -EAGAIN) {
- io_rw_done(kiocb, ret2);
- } else {
- /*
- * If ->needs_lock is true, we're already in async
- * context.
- */
- if (!s->needs_lock)
- io_async_list_note(READ, req, iov_count);
+ if (!force_nonblock || ret2 != -EAGAIN)
+ kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
+ else
ret = -EAGAIN;
- }
}
kfree(iovec);
return ret;
}
-static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
+static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
@@ -1449,7 +1683,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count;
ssize_t ret;
- ret = io_prep_rw(req, s, force_nonblock);
+ ret = io_prep_rw(req, force_nonblock);
if (ret)
return ret;
@@ -1457,7 +1691,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_WRITE)))
return -EBADF;
- ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
+ ret = io_import_iovec(req->ctx, WRITE, &req->submit, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1467,12 +1701,8 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
iov_count = iov_iter_count(&iter);
ret = -EAGAIN;
- if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
- /* If ->needs_lock is true, we're already in async context. */
- if (!s->needs_lock)
- io_async_list_note(WRITE, req, iov_count);
+ if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
goto out_free;
- }
ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
if (!ret) {
@@ -1497,17 +1727,10 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
ret2 = call_write_iter(file, kiocb, &iter);
else
ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
- if (!force_nonblock || ret2 != -EAGAIN) {
- io_rw_done(kiocb, ret2);
- } else {
- /*
- * If ->needs_lock is true, we're already in async
- * context.
- */
- if (!s->needs_lock)
- io_async_list_note(WRITE, req, iov_count);
+ if (!force_nonblock || ret2 != -EAGAIN)
+ kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
+ else
ret = -EAGAIN;
- }
}
out_free:
kfree(iovec);
@@ -1517,15 +1740,14 @@ out_free:
/*
* IORING_OP_NOP just posts a completion event, nothing else.
*/
-static int io_nop(struct io_kiocb *req, u64 user_data)
+static int io_nop(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- long err = 0;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- io_cqring_add_event(ctx, user_data, err);
+ io_cqring_add_event(req, 0);
io_put_req(req);
return 0;
}
@@ -1546,7 +1768,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
loff_t sqe_off = READ_ONCE(sqe->off);
loff_t sqe_len = READ_ONCE(sqe->len);
@@ -1572,8 +1794,8 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
return 0;
}
@@ -1595,6 +1817,7 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_sync_file_range(struct io_kiocb *req,
const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt,
bool force_nonblock)
{
loff_t sqe_off;
@@ -1618,14 +1841,14 @@ static int io_sync_file_range(struct io_kiocb *req,
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
return 0;
}
#if defined(CONFIG_NET)
static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock,
+ struct io_kiocb **nxt, bool force_nonblock,
long (*fn)(struct socket *, struct user_msghdr __user *,
unsigned int))
{
@@ -1654,32 +1877,80 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return ret;
}
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_cqring_add_event(req, ret);
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req_find_next(req, nxt);
return 0;
}
#endif
static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
#if defined(CONFIG_NET)
- return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock);
+ return io_send_recvmsg(req, sqe, nxt, force_nonblock,
+ __sys_sendmsg_sock);
#else
return -EOPNOTSUPP;
#endif
}
static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
#if defined(CONFIG_NET)
- return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock);
+ return io_send_recvmsg(req, sqe, nxt, force_nonblock,
+ __sys_recvmsg_sock);
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt, bool force_nonblock)
+{
+#if defined(CONFIG_NET)
+ struct sockaddr __user *addr;
+ int __user *addr_len;
+ unsigned file_flags;
+ int flags, ret;
+
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
+ return -EINVAL;
+
+ addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
+ addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
+ flags = READ_ONCE(sqe->accept_flags);
+ file_flags = force_nonblock ? O_NONBLOCK : 0;
+
+ ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags);
+ if (ret == -EAGAIN && force_nonblock) {
+ req->work.flags |= IO_WQ_WORK_NEEDS_FILES;
+ return -EAGAIN;
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
#else
return -EOPNOTSUPP;
#endif
}
+static inline void io_poll_remove_req(struct io_kiocb *req)
+{
+ if (!RB_EMPTY_NODE(&req->rb_node)) {
+ rb_erase(&req->rb_node, &req->ctx->cancel_tree);
+ RB_CLEAR_NODE(&req->rb_node);
+ }
+}
+
static void io_poll_remove_one(struct io_kiocb *req)
{
struct io_poll_iocb *poll = &req->poll;
@@ -1688,25 +1959,47 @@ static void io_poll_remove_one(struct io_kiocb *req)
WRITE_ONCE(poll->canceled, true);
if (!list_empty(&poll->wait.entry)) {
list_del_init(&poll->wait.entry);
- io_queue_async_work(req->ctx, req);
+ io_queue_async_work(req);
}
spin_unlock(&poll->head->lock);
-
- list_del_init(&req->list);
+ io_poll_remove_req(req);
}
static void io_poll_remove_all(struct io_ring_ctx *ctx)
{
+ struct rb_node *node;
struct io_kiocb *req;
spin_lock_irq(&ctx->completion_lock);
- while (!list_empty(&ctx->cancel_list)) {
- req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
+ while ((node = rb_first(&ctx->cancel_tree)) != NULL) {
+ req = rb_entry(node, struct io_kiocb, rb_node);
io_poll_remove_one(req);
}
spin_unlock_irq(&ctx->completion_lock);
}
+static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
+{
+ struct rb_node *p, *parent = NULL;
+ struct io_kiocb *req;
+
+ p = ctx->cancel_tree.rb_node;
+ while (p) {
+ parent = p;
+ req = rb_entry(parent, struct io_kiocb, rb_node);
+ if (sqe_addr < req->user_data) {
+ p = p->rb_left;
+ } else if (sqe_addr > req->user_data) {
+ p = p->rb_right;
+ } else {
+ io_poll_remove_one(req);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
/*
* Find a running poll command that matches one specified in sqe->addr,
* and remove it if found.
@@ -1714,8 +2007,7 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *poll_req, *next;
- int ret = -ENOENT;
+ int ret;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
@@ -1724,36 +2016,38 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
spin_lock_irq(&ctx->completion_lock);
- list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
- if (READ_ONCE(sqe->addr) == poll_req->user_data) {
- io_poll_remove_one(poll_req);
- ret = 0;
- break;
- }
- }
+ ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr));
spin_unlock_irq(&ctx->completion_lock);
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
+ io_cqring_add_event(req, ret);
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
io_put_req(req);
return 0;
}
-static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
- __poll_t mask)
+static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
req->poll.done = true;
- io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
+ io_cqring_fill_event(req, mangle_poll(mask));
io_commit_cqring(ctx);
}
-static void io_poll_complete_work(struct work_struct *work)
+static void io_poll_complete_work(struct io_wq_work **workptr)
{
+ struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_poll_iocb *poll = &req->poll;
struct poll_table_struct pt = { ._key = poll->events };
struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *nxt = NULL;
__poll_t mask = 0;
+ if (work->flags & IO_WQ_WORK_CANCEL)
+ WRITE_ONCE(poll->canceled, true);
+
if (!READ_ONCE(poll->canceled))
mask = vfs_poll(poll->file, &pt) & poll->events;
@@ -1770,12 +2064,15 @@ static void io_poll_complete_work(struct work_struct *work)
spin_unlock_irq(&ctx->completion_lock);
return;
}
- list_del_init(&req->list);
- io_poll_complete(ctx, req, mask);
+ io_poll_remove_req(req);
+ io_poll_complete(req, mask);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
- io_put_req(req);
+
+ io_put_req_find_next(req, &nxt);
+ if (nxt)
+ *workptr = &nxt->work;
}
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -1794,15 +2091,22 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
list_del_init(&poll->wait.entry);
+ /*
+ * Run completion inline if we can. We're using trylock here because
+ * we are violating the completion_lock -> poll wq lock ordering.
+ * If we have a link timeout we're going to need the completion_lock
+ * for finalizing the request, mark us as having grabbed that already.
+ */
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
- list_del(&req->list);
- io_poll_complete(ctx, req, mask);
+ io_poll_remove_req(req);
+ io_poll_complete(req, mask);
+ req->flags |= REQ_F_COMP_LOCKED;
+ io_put_req(req);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
- io_put_req(req);
} else {
- io_queue_async_work(ctx, req);
+ io_queue_async_work(req);
}
return 1;
@@ -1829,7 +2133,27 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
add_wait_queue(head, &pt->req->poll.wait);
}
-static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static void io_poll_req_insert(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct rb_node **p = &ctx->cancel_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct io_kiocb *tmp;
+
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct io_kiocb, rb_node);
+ if (req->user_data < tmp->user_data)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&req->rb_node, parent, p);
+ rb_insert_color(&req->rb_node, &ctx->cancel_tree);
+}
+
+static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt)
{
struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx;
@@ -1846,9 +2170,10 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EBADF;
req->submit.sqe = NULL;
- INIT_WORK(&req->work, io_poll_complete_work);
+ INIT_IO_WORK(&req->work, io_poll_complete_work);
events = READ_ONCE(sqe->poll_events);
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
+ RB_CLEAR_NODE(&req->rb_node);
poll->head = NULL;
poll->done = false;
@@ -1881,18 +2206,18 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
else if (cancel)
WRITE_ONCE(poll->canceled, true);
else if (!poll->done) /* actually waiting for an event */
- list_add_tail(&req->list, &ctx->cancel_list);
+ io_poll_req_insert(req);
spin_unlock(&poll->head->lock);
}
if (mask) { /* no async, we'd stolen it */
ipt.error = 0;
- io_poll_complete(ctx, req, mask);
+ io_poll_complete(req, mask);
}
spin_unlock_irq(&ctx->completion_lock);
if (mask) {
io_cqring_ev_posted(ctx);
- io_put_req(req);
+ io_put_req_find_next(req, nxt);
}
return ipt.error;
}
@@ -1900,7 +2225,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
{
struct io_ring_ctx *ctx;
- struct io_kiocb *req, *prev;
+ struct io_kiocb *req;
unsigned long flags;
req = container_of(timer, struct io_kiocb, timeout.timer);
@@ -1909,43 +2234,118 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
spin_lock_irqsave(&ctx->completion_lock, flags);
/*
- * Adjust the reqs sequence before the current one because it
- * will consume a slot in the cq_ring and the the cq_tail pointer
- * will be increased, otherwise other timeout reqs may return in
- * advance without waiting for enough wait_nr.
+ * We could be racing with timeout deletion. If the list is empty,
+ * then timeout lookup already found it and will be handling it.
*/
- prev = req;
- list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
- prev->sequence++;
- list_del(&req->list);
+ if (!list_empty(&req->list)) {
+ struct io_kiocb *prev;
+
+ /*
+ * Adjust the reqs sequence before the current one because it
+ * will consume a slot in the cq_ring and the the cq_tail
+ * pointer will be increased, otherwise other timeout reqs may
+ * return in advance without waiting for enough wait_nr.
+ */
+ prev = req;
+ list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
+ prev->sequence++;
+ list_del_init(&req->list);
+ }
- io_cqring_fill_event(ctx, req->user_data, -ETIME);
+ io_cqring_fill_event(req, -ETIME);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
-
+ if (req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
io_put_req(req);
return HRTIMER_NORESTART;
}
+static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
+{
+ struct io_kiocb *req;
+ int ret = -ENOENT;
+
+ list_for_each_entry(req, &ctx->timeout_list, list) {
+ if (user_data == req->user_data) {
+ list_del_init(&req->list);
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret == -ENOENT)
+ return ret;
+
+ ret = hrtimer_try_to_cancel(&req->timeout.timer);
+ if (ret == -1)
+ return -EALREADY;
+
+ io_cqring_fill_event(req, -ECANCELED);
+ io_put_req(req);
+ return 0;
+}
+
+/*
+ * Remove or update an existing timeout command
+ */
+static int io_timeout_remove(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned flags;
+ int ret;
+
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+ if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
+ return -EINVAL;
+ flags = READ_ONCE(sqe->timeout_flags);
+ if (flags)
+ return -EINVAL;
+
+ spin_lock_irq(&ctx->completion_lock);
+ ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr));
+
+ io_cqring_fill_event(req, ret);
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+ if (ret < 0 && req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req(req);
+ return 0;
+}
+
static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
unsigned count;
struct io_ring_ctx *ctx = req->ctx;
struct list_head *entry;
+ enum hrtimer_mode mode;
struct timespec64 ts;
unsigned span = 0;
+ unsigned flags;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
- sqe->len != 1)
+ if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len != 1)
+ return -EINVAL;
+ flags = READ_ONCE(sqe->timeout_flags);
+ if (flags & ~IORING_TIMEOUT_ABS)
return -EINVAL;
if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT;
+ if (flags & IORING_TIMEOUT_ABS)
+ mode = HRTIMER_MODE_ABS;
+ else
+ mode = HRTIMER_MODE_REL;
+
+ hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, mode);
req->flags |= REQ_F_TIMEOUT;
/*
@@ -2006,21 +2406,92 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->sequence -= span;
add:
list_add(&req->list, entry);
+ req->timeout.timer.function = io_timeout_fn;
+ hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), mode);
spin_unlock_irq(&ctx->completion_lock);
+ return 0;
+}
- hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- req->timeout.timer.function = io_timeout_fn;
- hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
- HRTIMER_MODE_REL);
+static bool io_cancel_cb(struct io_wq_work *work, void *data)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+ return req->user_data == (unsigned long) data;
+}
+
+static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
+{
+ enum io_wq_cancel cancel_ret;
+ int ret = 0;
+
+ cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
+ switch (cancel_ret) {
+ case IO_WQ_CANCEL_OK:
+ ret = 0;
+ break;
+ case IO_WQ_CANCEL_RUNNING:
+ ret = -EALREADY;
+ break;
+ case IO_WQ_CANCEL_NOTFOUND:
+ ret = -ENOENT;
+ break;
+ }
+
+ return ret;
+}
+
+static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
+ struct io_kiocb *req, __u64 sqe_addr,
+ struct io_kiocb **nxt)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
+ if (ret != -ENOENT) {
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ goto done;
+ }
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ ret = io_timeout_cancel(ctx, sqe_addr);
+ if (ret != -ENOENT)
+ goto done;
+ ret = io_poll_cancel(ctx, sqe_addr);
+done:
+ io_cqring_fill_event(req, ret);
+ io_commit_cqring(ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ io_cqring_ev_posted(ctx);
+
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req_find_next(req, nxt);
+}
+
+static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+ if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
+ sqe->cancel_flags)
+ return -EINVAL;
+
+ io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), NULL);
return 0;
}
-static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+static int io_req_defer(struct io_kiocb *req)
{
+ const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_uring_sqe *sqe_copy;
+ struct io_ring_ctx *ctx = req->ctx;
- if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
+ /* Still need defer if there is pending req in defer list. */
+ if (!req_need_defer(req) && list_empty(&ctx->defer_list))
return 0;
sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
@@ -2028,7 +2499,7 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
return -EAGAIN;
spin_lock_irq(&ctx->completion_lock);
- if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
+ if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock);
kfree(sqe_copy);
return 0;
@@ -2037,64 +2508,70 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
req->submit.sqe = sqe_copy;
- INIT_WORK(&req->work, io_sq_wq_submit_work);
+ trace_io_uring_defer(ctx, req, false);
list_add_tail(&req->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock);
return -EIOCBQUEUED;
}
-static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct sqe_submit *s, bool force_nonblock)
+static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
int ret, opcode;
-
- req->user_data = READ_ONCE(s->sqe->user_data);
-
- if (unlikely(s->index >= ctx->sq_entries))
- return -EINVAL;
+ struct sqe_submit *s = &req->submit;
+ struct io_ring_ctx *ctx = req->ctx;
opcode = READ_ONCE(s->sqe->opcode);
switch (opcode) {
case IORING_OP_NOP:
- ret = io_nop(req, req->user_data);
+ ret = io_nop(req);
break;
case IORING_OP_READV:
if (unlikely(s->sqe->buf_index))
return -EINVAL;
- ret = io_read(req, s, force_nonblock);
+ ret = io_read(req, nxt, force_nonblock);
break;
case IORING_OP_WRITEV:
if (unlikely(s->sqe->buf_index))
return -EINVAL;
- ret = io_write(req, s, force_nonblock);
+ ret = io_write(req, nxt, force_nonblock);
break;
case IORING_OP_READ_FIXED:
- ret = io_read(req, s, force_nonblock);
+ ret = io_read(req, nxt, force_nonblock);
break;
case IORING_OP_WRITE_FIXED:
- ret = io_write(req, s, force_nonblock);
+ ret = io_write(req, nxt, force_nonblock);
break;
case IORING_OP_FSYNC:
- ret = io_fsync(req, s->sqe, force_nonblock);
+ ret = io_fsync(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_POLL_ADD:
- ret = io_poll_add(req, s->sqe);
+ ret = io_poll_add(req, s->sqe, nxt);
break;
case IORING_OP_POLL_REMOVE:
ret = io_poll_remove(req, s->sqe);
break;
case IORING_OP_SYNC_FILE_RANGE:
- ret = io_sync_file_range(req, s->sqe, force_nonblock);
+ ret = io_sync_file_range(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_SENDMSG:
- ret = io_sendmsg(req, s->sqe, force_nonblock);
+ ret = io_sendmsg(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_RECVMSG:
- ret = io_recvmsg(req, s->sqe, force_nonblock);
+ ret = io_recvmsg(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_TIMEOUT:
ret = io_timeout(req, s->sqe);
break;
+ case IORING_OP_TIMEOUT_REMOVE:
+ ret = io_timeout_remove(req, s->sqe);
+ break;
+ case IORING_OP_ACCEPT:
+ ret = io_accept(req, s->sqe, nxt, force_nonblock);
+ break;
+ case IORING_OP_ASYNC_CANCEL:
+ ret = io_async_cancel(req, s->sqe, nxt);
+ break;
default:
ret = -EINVAL;
break;
@@ -2108,187 +2585,65 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return -EAGAIN;
/* workqueue context doesn't hold uring_lock, grab it now */
- if (s->needs_lock)
+ if (s->in_async)
mutex_lock(&ctx->uring_lock);
io_iopoll_req_issued(req);
- if (s->needs_lock)
+ if (s->in_async)
mutex_unlock(&ctx->uring_lock);
}
return 0;
}
-static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
- const struct io_uring_sqe *sqe)
-{
- switch (sqe->opcode) {
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- return &ctx->pending_async[READ];
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- return &ctx->pending_async[WRITE];
- default:
- return NULL;
- }
-}
-
-static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
-{
- u8 opcode = READ_ONCE(sqe->opcode);
-
- return !(opcode == IORING_OP_READ_FIXED ||
- opcode == IORING_OP_WRITE_FIXED);
-}
-
-static void io_sq_wq_submit_work(struct work_struct *work)
+static void io_wq_submit_work(struct io_wq_work **workptr)
{
+ struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct io_ring_ctx *ctx = req->ctx;
- struct mm_struct *cur_mm = NULL;
- struct async_list *async_list;
- LIST_HEAD(req_list);
- mm_segment_t old_fs;
- int ret;
+ struct sqe_submit *s = &req->submit;
+ const struct io_uring_sqe *sqe = s->sqe;
+ struct io_kiocb *nxt = NULL;
+ int ret = 0;
- async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
-restart:
- do {
- struct sqe_submit *s = &req->submit;
- const struct io_uring_sqe *sqe = s->sqe;
- unsigned int flags = req->flags;
+ /* Ensure we clear previously set non-block flag */
+ req->rw.ki_flags &= ~IOCB_NOWAIT;
- /* Ensure we clear previously set non-block flag */
- req->rw.ki_flags &= ~IOCB_NOWAIT;
+ if (work->flags & IO_WQ_WORK_CANCEL)
+ ret = -ECANCELED;
- ret = 0;
- if (io_sqe_needs_user(sqe) && !cur_mm) {
- if (!mmget_not_zero(ctx->sqo_mm)) {
- ret = -EFAULT;
- } else {
- cur_mm = ctx->sqo_mm;
- use_mm(cur_mm);
- old_fs = get_fs();
- set_fs(USER_DS);
- }
- }
+ if (!ret) {
+ s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
+ s->in_async = true;
+ do {
+ ret = __io_submit_sqe(req, &nxt, false);
+ /*
+ * We can get EAGAIN for polled IO even though we're
+ * forcing a sync submission from here, since we can't
+ * wait for request slots on the block side.
+ */
+ if (ret != -EAGAIN)
+ break;
+ cond_resched();
+ } while (1);
+ }
- if (!ret) {
- s->has_user = cur_mm != NULL;
- s->needs_lock = true;
- do {
- ret = __io_submit_sqe(ctx, req, s, false);
- /*
- * We can get EAGAIN for polled IO even though
- * we're forcing a sync submission from here,
- * since we can't wait for request slots on the
- * block side.
- */
- if (ret != -EAGAIN)
- break;
- cond_resched();
- } while (1);
- }
+ /* drop submission reference */
+ io_put_req(req);
- /* drop submission reference */
+ if (ret) {
+ if (req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+ io_cqring_add_event(req, ret);
io_put_req(req);
-
- if (ret) {
- io_cqring_add_event(ctx, sqe->user_data, ret);
- io_put_req(req);
- }
-
- /* async context always use a copy of the sqe */
- kfree(sqe);
-
- /* req from defer and link list needn't decrease async cnt */
- if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
- goto out;
-
- if (!async_list)
- break;
- if (!list_empty(&req_list)) {
- req = list_first_entry(&req_list, struct io_kiocb,
- list);
- list_del(&req->list);
- continue;
- }
- if (list_empty(&async_list->list))
- break;
-
- req = NULL;
- spin_lock(&async_list->lock);
- if (list_empty(&async_list->list)) {
- spin_unlock(&async_list->lock);
- break;
- }
- list_splice_init(&async_list->list, &req_list);
- spin_unlock(&async_list->lock);
-
- req = list_first_entry(&req_list, struct io_kiocb, list);
- list_del(&req->list);
- } while (req);
-
- /*
- * Rare case of racing with a submitter. If we find the count has
- * dropped to zero AND we have pending work items, then restart
- * the processing. This is a tiny race window.
- */
- if (async_list) {
- ret = atomic_dec_return(&async_list->cnt);
- while (!ret && !list_empty(&async_list->list)) {
- spin_lock(&async_list->lock);
- atomic_inc(&async_list->cnt);
- list_splice_init(&async_list->list, &req_list);
- spin_unlock(&async_list->lock);
-
- if (!list_empty(&req_list)) {
- req = list_first_entry(&req_list,
- struct io_kiocb, list);
- list_del(&req->list);
- goto restart;
- }
- ret = atomic_dec_return(&async_list->cnt);
- }
- }
-
-out:
- if (cur_mm) {
- set_fs(old_fs);
- unuse_mm(cur_mm);
- mmput(cur_mm);
}
-}
-/*
- * See if we can piggy back onto previously submitted work, that is still
- * running. We currently only allow this if the new request is sequential
- * to the previous one we punted.
- */
-static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
-{
- bool ret;
-
- if (!list)
- return false;
- if (!(req->flags & REQ_F_SEQ_PREV))
- return false;
- if (!atomic_read(&list->cnt))
- return false;
+ /* async context always use a copy of the sqe */
+ kfree(sqe);
- ret = true;
- spin_lock(&list->lock);
- list_add_tail(&req->list, &list->list);
- /*
- * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
- */
- smp_mb();
- if (!atomic_read(&list->cnt)) {
- list_del_init(&req->list);
- ret = false;
+ /* if a dependent link is ready, pass it back */
+ if (!ret && nxt) {
+ io_prep_async_work(nxt);
+ *workptr = &nxt->work;
}
- spin_unlock(&list->lock);
- return ret;
}
static bool io_op_needs_file(const struct io_uring_sqe *sqe)
@@ -2299,15 +2654,28 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe)
case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE:
case IORING_OP_TIMEOUT:
+ case IORING_OP_TIMEOUT_REMOVE:
+ case IORING_OP_ASYNC_CANCEL:
+ case IORING_OP_LINK_TIMEOUT:
return false;
default:
return true;
}
}
-static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
- struct io_submit_state *state, struct io_kiocb *req)
+static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
+ int index)
{
+ struct fixed_file_table *table;
+
+ table = &ctx->file_table[index >> IORING_FILE_TABLE_SHIFT];
+ return table->files[index & IORING_FILE_TABLE_MASK];
+}
+
+static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
+{
+ struct sqe_submit *s = &req->submit;
+ struct io_ring_ctx *ctx = req->ctx;
unsigned flags;
int fd;
@@ -2327,14 +2695,18 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
return 0;
if (flags & IOSQE_FIXED_FILE) {
- if (unlikely(!ctx->user_files ||
+ if (unlikely(!ctx->file_table ||
(unsigned) fd >= ctx->nr_user_files))
return -EBADF;
- req->file = ctx->user_files[fd];
+ fd = array_index_nospec(fd, ctx->nr_user_files);
+ req->file = io_file_from_index(ctx, fd);
+ if (!req->file)
+ return -EBADF;
req->flags |= REQ_F_FIXED_FILE;
} else {
if (s->needs_fixed_file)
return -EBADF;
+ trace_io_uring_file_get(ctx, fd);
req->file = io_file_get(state, fd);
if (unlikely(!req->file))
return -EBADF;
@@ -2343,12 +2715,146 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
return 0;
}
-static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s)
+static int io_grab_files(struct io_kiocb *req)
+{
+ int ret = -EBADF;
+ struct io_ring_ctx *ctx = req->ctx;
+
+ rcu_read_lock();
+ spin_lock_irq(&ctx->inflight_lock);
+ /*
+ * We use the f_ops->flush() handler to ensure that we can flush
+ * out work accessing these files if the fd is closed. Check if
+ * the fd has changed since we started down this path, and disallow
+ * this operation if it has.
+ */
+ if (fcheck(req->submit.ring_fd) == req->submit.ring_file) {
+ list_add(&req->inflight_entry, &ctx->inflight_list);
+ req->flags |= REQ_F_INFLIGHT;
+ req->work.files = current->files;
+ ret = 0;
+ }
+ spin_unlock_irq(&ctx->inflight_lock);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
+{
+ struct io_kiocb *req = container_of(timer, struct io_kiocb,
+ timeout.timer);
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *prev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+
+ /*
+ * We don't expect the list to be empty, that will only happen if we
+ * race with the completion of the linked work.
+ */
+ if (!list_empty(&req->list)) {
+ prev = list_entry(req->list.prev, struct io_kiocb, link_list);
+ if (refcount_inc_not_zero(&prev->refs))
+ list_del_init(&req->list);
+ else
+ prev = NULL;
+ }
+
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+
+ if (prev) {
+ io_async_find_and_cancel(ctx, req, prev->user_data, NULL);
+ io_put_req(prev);
+ } else {
+ io_cqring_add_event(req, -ETIME);
+ io_put_req(req);
+ }
+ return HRTIMER_NORESTART;
+}
+
+static void io_queue_linked_timeout(struct io_kiocb *req, struct timespec64 *ts,
+ enum hrtimer_mode *mode)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ /*
+ * If the list is now empty, then our linked request finished before
+ * we got a chance to setup the timer
+ */
+ spin_lock_irq(&ctx->completion_lock);
+ if (!list_empty(&req->list)) {
+ req->timeout.timer.function = io_link_timeout_fn;
+ hrtimer_start(&req->timeout.timer, timespec64_to_ktime(*ts),
+ *mode);
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ /* drop submission reference */
+ io_put_req(req);
+}
+
+static int io_validate_link_timeout(const struct io_uring_sqe *sqe,
+ struct timespec64 *ts)
+{
+ if (sqe->ioprio || sqe->buf_index || sqe->len != 1 || sqe->off)
+ return -EINVAL;
+ if (sqe->timeout_flags & ~IORING_TIMEOUT_ABS)
+ return -EINVAL;
+ if (get_timespec64(ts, u64_to_user_ptr(sqe->addr)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req,
+ struct timespec64 *ts,
+ enum hrtimer_mode *mode)
{
+ struct io_kiocb *nxt;
int ret;
- ret = __io_submit_sqe(ctx, req, s, true);
+ if (!(req->flags & REQ_F_LINK))
+ return NULL;
+
+ nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
+ if (!nxt || nxt->submit.sqe->opcode != IORING_OP_LINK_TIMEOUT)
+ return NULL;
+
+ ret = io_validate_link_timeout(nxt->submit.sqe, ts);
+ if (ret) {
+ list_del_init(&nxt->list);
+ io_cqring_add_event(nxt, ret);
+ io_double_put_req(nxt);
+ return ERR_PTR(-ECANCELED);
+ }
+
+ if (nxt->submit.sqe->timeout_flags & IORING_TIMEOUT_ABS)
+ *mode = HRTIMER_MODE_ABS;
+ else
+ *mode = HRTIMER_MODE_REL;
+
+ req->flags |= REQ_F_LINK_TIMEOUT;
+ hrtimer_init(&nxt->timeout.timer, CLOCK_MONOTONIC, *mode);
+ return nxt;
+}
+
+static int __io_queue_sqe(struct io_kiocb *req)
+{
+ enum hrtimer_mode mode;
+ struct io_kiocb *nxt;
+ struct timespec64 ts;
+ int ret;
+
+ nxt = io_prep_linked_timeout(req, &ts, &mode);
+ if (IS_ERR(nxt)) {
+ ret = PTR_ERR(nxt);
+ nxt = NULL;
+ goto err;
+ }
+
+ ret = __io_submit_sqe(req, NULL, true);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -2356,36 +2862,47 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
*/
if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
(req->flags & REQ_F_MUST_PUNT))) {
+ struct sqe_submit *s = &req->submit;
struct io_uring_sqe *sqe_copy;
sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
if (sqe_copy) {
- struct async_list *list;
-
s->sqe = sqe_copy;
- memcpy(&req->submit, s, sizeof(*s));
- list = io_async_list_from_sqe(ctx, s->sqe);
- if (!io_add_to_prev_work(list, req)) {
- if (list)
- atomic_inc(&list->cnt);
- INIT_WORK(&req->work, io_sq_wq_submit_work);
- io_queue_async_work(ctx, req);
+ if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
+ ret = io_grab_files(req);
+ if (ret) {
+ kfree(sqe_copy);
+ goto err;
+ }
}
/*
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
*/
+ io_queue_async_work(req);
+
+ if (nxt)
+ io_queue_linked_timeout(nxt, &ts, &mode);
+
return 0;
}
}
+err:
/* drop submission reference */
io_put_req(req);
+ if (nxt) {
+ if (!ret)
+ io_queue_linked_timeout(nxt, &ts, &mode);
+ else
+ io_put_req(nxt);
+ }
+
/* and drop final reference, if we failed */
if (ret) {
- io_cqring_add_event(ctx, req->user_data, ret);
+ io_cqring_add_event(req, ret);
if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
io_put_req(req);
@@ -2394,31 +2911,30 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return ret;
}
-static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s)
+static int io_queue_sqe(struct io_kiocb *req)
{
int ret;
- ret = io_req_defer(ctx, req, s->sqe);
+ ret = io_req_defer(req);
if (ret) {
if (ret != -EIOCBQUEUED) {
- io_free_req(req);
- io_cqring_add_event(ctx, s->sqe->user_data, ret);
+ io_cqring_add_event(req, ret);
+ io_double_put_req(req);
}
return 0;
}
- return __io_queue_sqe(ctx, req, s);
+ return __io_queue_sqe(req);
}
-static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s, struct io_kiocb *shadow)
+static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
{
int ret;
int need_submit = false;
+ struct io_ring_ctx *ctx = req->ctx;
if (!shadow)
- return io_queue_sqe(ctx, req, s);
+ return io_queue_sqe(req);
/*
* Mark the first IO in link list as DRAIN, let all the following
@@ -2426,12 +2942,12 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
* list.
*/
req->flags |= REQ_F_IO_DRAIN;
- ret = io_req_defer(ctx, req, s->sqe);
+ ret = io_req_defer(req);
if (ret) {
if (ret != -EIOCBQUEUED) {
- io_free_req(req);
+ io_cqring_add_event(req, ret);
+ io_double_put_req(req);
__io_free_req(shadow);
- io_cqring_add_event(ctx, s->sqe->user_data, ret);
return 0;
}
} else {
@@ -2444,47 +2960,42 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
/* Insert shadow req to defer_list, blocking next IOs */
spin_lock_irq(&ctx->completion_lock);
+ trace_io_uring_defer(ctx, shadow, true);
list_add_tail(&shadow->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock);
if (need_submit)
- return __io_queue_sqe(ctx, req, s);
+ return __io_queue_sqe(req);
return 0;
}
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
-static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
- struct io_submit_state *state, struct io_kiocb **link)
+static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
+ struct io_kiocb **link)
{
struct io_uring_sqe *sqe_copy;
- struct io_kiocb *req;
+ struct sqe_submit *s = &req->submit;
+ struct io_ring_ctx *ctx = req->ctx;
int ret;
+ req->user_data = s->sqe->user_data;
+
/* enforce forwards compatibility on users */
if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
ret = -EINVAL;
- goto err;
- }
-
- req = io_get_req(ctx, state);
- if (unlikely(!req)) {
- ret = -EAGAIN;
- goto err;
+ goto err_req;
}
- ret = io_req_set_file(ctx, s, state, req);
+ ret = io_req_set_file(state, req);
if (unlikely(ret)) {
err_req:
- io_free_req(req);
-err:
- io_cqring_add_event(ctx, s->sqe->user_data, ret);
+ io_cqring_add_event(req, ret);
+ io_double_put_req(req);
return;
}
- req->user_data = s->sqe->user_data;
-
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but
@@ -2502,16 +3013,19 @@ err:
}
s->sqe = sqe_copy;
- memcpy(&req->submit, s, sizeof(*s));
+ trace_io_uring_link(ctx, req, prev);
list_add_tail(&req->list, &prev->link_list);
} else if (s->sqe->flags & IOSQE_IO_LINK) {
req->flags |= REQ_F_LINK;
- memcpy(&req->submit, s, sizeof(*s));
INIT_LIST_HEAD(&req->link_list);
*link = req;
+ } else if (READ_ONCE(s->sqe->opcode) == IORING_OP_LINK_TIMEOUT) {
+ /* Only valid as a linked SQE */
+ ret = -EINVAL;
+ goto err_req;
} else {
- io_queue_sqe(ctx, req, s);
+ io_queue_sqe(req);
}
}
@@ -2582,7 +3096,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
head = READ_ONCE(sq_array[head & ctx->sq_mask]);
if (head < ctx->sq_entries) {
- s->index = head;
+ s->ring_file = NULL;
s->sqe = &ctx->sq_sqes[head];
s->sequence = ctx->cached_sq_head;
ctx->cached_sq_head++;
@@ -2597,13 +3111,19 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
}
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
- bool has_user, bool mm_fault)
+ struct file *ring_file, int ring_fd,
+ struct mm_struct **mm, bool async)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
struct io_kiocb *shadow_req = NULL;
- bool prev_was_link = false;
int i, submitted = 0;
+ bool mm_fault = false;
+
+ if (!list_empty(&ctx->cq_overflow_list)) {
+ io_cqring_overflow_flush(ctx, false);
+ return -EBUSY;
+ }
if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, ctx, nr);
@@ -2611,23 +3131,31 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
}
for (i = 0; i < nr; i++) {
- struct sqe_submit s;
+ struct io_kiocb *req;
+ unsigned int sqe_flags;
- if (!io_get_sqring(ctx, &s))
+ req = io_get_req(ctx, statep);
+ if (unlikely(!req)) {
+ if (!submitted)
+ submitted = -EAGAIN;
break;
+ }
+ if (!io_get_sqring(ctx, &req->submit)) {
+ __io_free_req(req);
+ break;
+ }
- /*
- * If previous wasn't linked and we have a linked command,
- * that's the end of the chain. Submit the previous link.
- */
- if (!prev_was_link && link) {
- io_queue_link_head(ctx, link, &link->submit, shadow_req);
- link = NULL;
- shadow_req = NULL;
+ if (io_sqe_needs_user(req->submit.sqe) && !*mm) {
+ mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
+ if (!mm_fault) {
+ use_mm(ctx->sqo_mm);
+ *mm = ctx->sqo_mm;
+ }
}
- prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
- if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
+ sqe_flags = req->submit.sqe->flags;
+
+ if (link && (sqe_flags & IOSQE_IO_DRAIN)) {
if (!shadow_req) {
shadow_req = io_get_req(ctx, NULL);
if (unlikely(!shadow_req))
@@ -2635,27 +3163,39 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
refcount_dec(&shadow_req->refs);
}
- shadow_req->sequence = s.sequence;
+ shadow_req->sequence = req->submit.sequence;
}
out:
- if (unlikely(mm_fault)) {
- io_cqring_add_event(ctx, s.sqe->user_data,
- -EFAULT);
- } else {
- s.has_user = has_user;
- s.needs_lock = true;
- s.needs_fixed_file = true;
- io_submit_sqe(ctx, &s, statep, &link);
- submitted++;
+ req->submit.ring_file = ring_file;
+ req->submit.ring_fd = ring_fd;
+ req->submit.has_user = *mm != NULL;
+ req->submit.in_async = async;
+ req->submit.needs_fixed_file = async;
+ trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
+ true, async);
+ io_submit_sqe(req, statep, &link);
+ submitted++;
+
+ /*
+ * If previous wasn't linked and we have a linked command,
+ * that's the end of the chain. Submit the previous link.
+ */
+ if (!(sqe_flags & IOSQE_IO_LINK) && link) {
+ io_queue_link_head(link, shadow_req);
+ link = NULL;
+ shadow_req = NULL;
}
}
if (link)
- io_queue_link_head(ctx, link, &link->submit, shadow_req);
+ io_queue_link_head(link, shadow_req);
if (statep)
io_submit_state_end(&state);
+ /* Commit SQ ring head once we've consumed and submitted all SQEs */
+ io_commit_sqring(ctx);
+
return submitted;
}
@@ -2667,15 +3207,15 @@ static int io_sq_thread(void *data)
DEFINE_WAIT(wait);
unsigned inflight;
unsigned long timeout;
+ int ret;
- complete(&ctx->sqo_thread_started);
+ complete(&ctx->completions[1]);
old_fs = get_fs();
set_fs(USER_DS);
- timeout = inflight = 0;
+ ret = timeout = inflight = 0;
while (!kthread_should_park()) {
- bool mm_fault = false;
unsigned int to_submit;
if (inflight) {
@@ -2710,13 +3250,21 @@ static int io_sq_thread(void *data)
}
to_submit = io_sqring_entries(ctx);
- if (!to_submit) {
+
+ /*
+ * If submit got -EBUSY, flag us as needing the application
+ * to enter the kernel to reap and flush events.
+ */
+ if (!to_submit || ret == -EBUSY) {
/*
* We're polling. If we're within the defined idle
* period, then let us spin without work before going
- * to sleep.
+ * to sleep. The exception is if we got EBUSY doing
+ * more IO, we should wait for the application to
+ * reap events and wake us up.
*/
- if (inflight || !time_after(jiffies, timeout)) {
+ if (inflight ||
+ (!time_after(jiffies, timeout) && ret != -EBUSY)) {
cond_resched();
continue;
}
@@ -2742,7 +3290,7 @@ static int io_sq_thread(void *data)
smp_mb();
to_submit = io_sqring_entries(ctx);
- if (!to_submit) {
+ if (!to_submit || ret == -EBUSY) {
if (kthread_should_park()) {
finish_wait(&ctx->sqo_wait, &wait);
break;
@@ -2760,21 +3308,10 @@ static int io_sq_thread(void *data)
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
}
- /* Unless all new commands are FIXED regions, grab mm */
- if (!cur_mm) {
- mm_fault = !mmget_not_zero(ctx->sqo_mm);
- if (!mm_fault) {
- use_mm(ctx->sqo_mm);
- cur_mm = ctx->sqo_mm;
- }
- }
-
to_submit = min(to_submit, ctx->sq_entries);
- inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
- mm_fault);
-
- /* Commit SQ ring head once we've consumed all SQEs */
- io_commit_sqring(ctx);
+ ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
+ if (ret > 0)
+ inflight += ret;
}
set_fs(old_fs);
@@ -2788,65 +3325,6 @@ static int io_sq_thread(void *data)
return 0;
}
-static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
-{
- struct io_submit_state state, *statep = NULL;
- struct io_kiocb *link = NULL;
- struct io_kiocb *shadow_req = NULL;
- bool prev_was_link = false;
- int i, submit = 0;
-
- if (to_submit > IO_PLUG_THRESHOLD) {
- io_submit_state_start(&state, ctx, to_submit);
- statep = &state;
- }
-
- for (i = 0; i < to_submit; i++) {
- struct sqe_submit s;
-
- if (!io_get_sqring(ctx, &s))
- break;
-
- /*
- * If previous wasn't linked and we have a linked command,
- * that's the end of the chain. Submit the previous link.
- */
- if (!prev_was_link && link) {
- io_queue_link_head(ctx, link, &link->submit, shadow_req);
- link = NULL;
- shadow_req = NULL;
- }
- prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
-
- if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
- if (!shadow_req) {
- shadow_req = io_get_req(ctx, NULL);
- if (unlikely(!shadow_req))
- goto out;
- shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
- refcount_dec(&shadow_req->refs);
- }
- shadow_req->sequence = s.sequence;
- }
-
-out:
- s.has_user = true;
- s.needs_lock = false;
- s.needs_fixed_file = false;
- submit++;
- io_submit_sqe(ctx, &s, statep, &link);
- }
-
- if (link)
- io_queue_link_head(ctx, link, &link->submit, shadow_req);
- if (statep)
- io_submit_state_end(statep);
-
- io_commit_sqring(ctx);
-
- return submit;
-}
-
struct io_wait_queue {
struct wait_queue_entry wq;
struct io_ring_ctx *ctx;
@@ -2854,7 +3332,7 @@ struct io_wait_queue {
unsigned nr_timeouts;
};
-static inline bool io_should_wake(struct io_wait_queue *iowq)
+static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
{
struct io_ring_ctx *ctx = iowq->ctx;
@@ -2863,7 +3341,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq)
* started waiting. For timeouts, we always want to return to userspace,
* regardless of event count.
*/
- return io_cqring_events(ctx->rings) >= iowq->to_wait ||
+ return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}
@@ -2873,7 +3351,8 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
wq);
- if (!io_should_wake(iowq))
+ /* use noflush == true, as we can't safely rely on locking context */
+ if (!io_should_wake(iowq, true))
return -1;
return autoremove_wake_function(curr, mode, wake_flags, key);
@@ -2896,9 +3375,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
.to_wait = min_events,
};
struct io_rings *rings = ctx->rings;
- int ret;
+ int ret = 0;
- if (io_cqring_events(rings) >= min_events)
+ if (io_cqring_events(ctx, false) >= min_events)
return 0;
if (sig) {
@@ -2914,24 +3393,22 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret;
}
- ret = 0;
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+ trace_io_uring_cqring_wait(ctx, min_events);
do {
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE);
- if (io_should_wake(&iowq))
+ if (io_should_wake(&iowq, false))
break;
schedule();
if (signal_pending(current)) {
- ret = -ERESTARTSYS;
+ ret = -EINTR;
break;
}
} while (1);
finish_wait(&ctx->wait, &iowq.wq);
- restore_saved_sigmask_unless(ret == -ERESTARTSYS);
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
+ restore_saved_sigmask_unless(ret == -EINTR);
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
}
@@ -2949,19 +3426,29 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
#else
int i;
- for (i = 0; i < ctx->nr_user_files; i++)
- fput(ctx->user_files[i]);
+ for (i = 0; i < ctx->nr_user_files; i++) {
+ struct file *file;
+
+ file = io_file_from_index(ctx, i);
+ if (file)
+ fput(file);
+ }
#endif
}
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
- if (!ctx->user_files)
+ unsigned nr_tables, i;
+
+ if (!ctx->file_table)
return -ENXIO;
__io_sqe_files_unregister(ctx);
- kfree(ctx->user_files);
- ctx->user_files = NULL;
+ nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
+ for (i = 0; i < nr_tables; i++)
+ kfree(ctx->file_table[i].files);
+ kfree(ctx->file_table);
+ ctx->file_table = NULL;
ctx->nr_user_files = 0;
return 0;
}
@@ -2969,7 +3456,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
static void io_sq_thread_stop(struct io_ring_ctx *ctx)
{
if (ctx->sqo_thread) {
- wait_for_completion(&ctx->sqo_thread_started);
+ wait_for_completion(&ctx->completions[1]);
/*
* The park is a bit of a work-around, without it we get
* warning spews on shutdown with SQPOLL set and affinity
@@ -2983,15 +3470,11 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx)
static void io_finish_async(struct io_ring_ctx *ctx)
{
- int i;
-
io_sq_thread_stop(ctx);
- for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) {
- if (ctx->sqo_wq[i]) {
- destroy_workqueue(ctx->sqo_wq[i]);
- ctx->sqo_wq[i] = NULL;
- }
+ if (ctx->io_wq) {
+ io_wq_destroy(ctx->io_wq);
+ ctx->io_wq = NULL;
}
}
@@ -2999,11 +3482,9 @@ static void io_finish_async(struct io_ring_ctx *ctx)
static void io_destruct_skb(struct sk_buff *skb)
{
struct io_ring_ctx *ctx = skb->sk->sk_user_data;
- int i;
- for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++)
- if (ctx->sqo_wq[i])
- flush_workqueue(ctx->sqo_wq[i]);
+ if (ctx->io_wq)
+ io_wq_flush(ctx->io_wq);
unix_destruct_scm(skb);
}
@@ -3018,7 +3499,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
struct sock *sk = ctx->ring_sock->sk;
struct scm_fp_list *fpl;
struct sk_buff *skb;
- int i;
+ int i, nr_files;
if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
unsigned long inflight = ctx->user->unix_inflight + nr;
@@ -3038,21 +3519,33 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
}
skb->sk = sk;
- skb->destructor = io_destruct_skb;
+ nr_files = 0;
fpl->user = get_uid(ctx->user);
for (i = 0; i < nr; i++) {
- fpl->fp[i] = get_file(ctx->user_files[i + offset]);
- unix_inflight(fpl->user, fpl->fp[i]);
+ struct file *file = io_file_from_index(ctx, i + offset);
+
+ if (!file)
+ continue;
+ fpl->fp[nr_files] = get_file(file);
+ unix_inflight(fpl->user, fpl->fp[nr_files]);
+ nr_files++;
}
- fpl->max = fpl->count = nr;
- UNIXCB(skb).fp = fpl;
- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
- skb_queue_head(&sk->sk_receive_queue, skb);
+ if (nr_files) {
+ fpl->max = SCM_MAX_FD;
+ fpl->count = nr_files;
+ UNIXCB(skb).fp = fpl;
+ skb->destructor = io_destruct_skb;
+ refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+ skb_queue_head(&sk->sk_receive_queue, skb);
- for (i = 0; i < nr; i++)
- fput(fpl->fp[i]);
+ for (i = 0; i < nr_files; i++)
+ fput(fpl->fp[i]);
+ } else {
+ kfree_skb(skb);
+ kfree(fpl);
+ }
return 0;
}
@@ -3083,7 +3576,10 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx)
return 0;
while (total < ctx->nr_user_files) {
- fput(ctx->user_files[total]);
+ struct file *file = io_file_from_index(ctx, total);
+
+ if (file)
+ fput(file);
total++;
}
@@ -3096,33 +3592,79 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx)
}
#endif
+static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
+ unsigned nr_files)
+{
+ int i;
+
+ for (i = 0; i < nr_tables; i++) {
+ struct fixed_file_table *table = &ctx->file_table[i];
+ unsigned this_files;
+
+ this_files = min(nr_files, IORING_MAX_FILES_TABLE);
+ table->files = kcalloc(this_files, sizeof(struct file *),
+ GFP_KERNEL);
+ if (!table->files)
+ break;
+ nr_files -= this_files;
+ }
+
+ if (i == nr_tables)
+ return 0;
+
+ for (i = 0; i < nr_tables; i++) {
+ struct fixed_file_table *table = &ctx->file_table[i];
+ kfree(table->files);
+ }
+ return 1;
+}
+
static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args)
{
__s32 __user *fds = (__s32 __user *) arg;
+ unsigned nr_tables;
int fd, ret = 0;
unsigned i;
- if (ctx->user_files)
+ if (ctx->file_table)
return -EBUSY;
if (!nr_args)
return -EINVAL;
if (nr_args > IORING_MAX_FIXED_FILES)
return -EMFILE;
- ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
- if (!ctx->user_files)
+ nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
+ ctx->file_table = kcalloc(nr_tables, sizeof(struct fixed_file_table),
+ GFP_KERNEL);
+ if (!ctx->file_table)
return -ENOMEM;
- for (i = 0; i < nr_args; i++) {
+ if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
+ kfree(ctx->file_table);
+ ctx->file_table = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
+ struct fixed_file_table *table;
+ unsigned index;
+
ret = -EFAULT;
if (copy_from_user(&fd, &fds[i], sizeof(fd)))
break;
+ /* allow sparse sets */
+ if (fd == -1) {
+ ret = 0;
+ continue;
+ }
- ctx->user_files[i] = fget(fd);
+ table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
+ index = i & IORING_FILE_TABLE_MASK;
+ table->files[index] = fget(fd);
ret = -EBADF;
- if (!ctx->user_files[i])
+ if (!table->files[index])
break;
/*
* Don't allow io_uring instances to be registered. If UNIX
@@ -3131,20 +3673,26 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
* handle it just fine, but there's still no point in allowing
* a ring fd as it doesn't support regular read/write anyway.
*/
- if (ctx->user_files[i]->f_op == &io_uring_fops) {
- fput(ctx->user_files[i]);
+ if (table->files[index]->f_op == &io_uring_fops) {
+ fput(table->files[index]);
break;
}
- ctx->nr_user_files++;
ret = 0;
}
if (ret) {
- for (i = 0; i < ctx->nr_user_files; i++)
- fput(ctx->user_files[i]);
+ for (i = 0; i < ctx->nr_user_files; i++) {
+ struct file *file;
- kfree(ctx->user_files);
- ctx->user_files = NULL;
+ file = io_file_from_index(ctx, i);
+ if (file)
+ fput(file);
+ }
+ for (i = 0; i < nr_tables; i++)
+ kfree(ctx->file_table[i].files);
+
+ kfree(ctx->file_table);
+ ctx->file_table = NULL;
ctx->nr_user_files = 0;
return ret;
}
@@ -3156,9 +3704,201 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
return ret;
}
+static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index)
+{
+#if defined(CONFIG_UNIX)
+ struct file *file = io_file_from_index(ctx, index);
+ struct sock *sock = ctx->ring_sock->sk;
+ struct sk_buff_head list, *head = &sock->sk_receive_queue;
+ struct sk_buff *skb;
+ int i;
+
+ __skb_queue_head_init(&list);
+
+ /*
+ * Find the skb that holds this file in its SCM_RIGHTS. When found,
+ * remove this entry and rearrange the file array.
+ */
+ skb = skb_dequeue(head);
+ while (skb) {
+ struct scm_fp_list *fp;
+
+ fp = UNIXCB(skb).fp;
+ for (i = 0; i < fp->count; i++) {
+ int left;
+
+ if (fp->fp[i] != file)
+ continue;
+
+ unix_notinflight(fp->user, fp->fp[i]);
+ left = fp->count - 1 - i;
+ if (left) {
+ memmove(&fp->fp[i], &fp->fp[i + 1],
+ left * sizeof(struct file *));
+ }
+ fp->count--;
+ if (!fp->count) {
+ kfree_skb(skb);
+ skb = NULL;
+ } else {
+ __skb_queue_tail(&list, skb);
+ }
+ fput(file);
+ file = NULL;
+ break;
+ }
+
+ if (!file)
+ break;
+
+ __skb_queue_tail(&list, skb);
+
+ skb = skb_dequeue(head);
+ }
+
+ if (skb_peek(&list)) {
+ spin_lock_irq(&head->lock);
+ while ((skb = __skb_dequeue(&list)) != NULL)
+ __skb_queue_tail(head, skb);
+ spin_unlock_irq(&head->lock);
+ }
+#else
+ fput(io_file_from_index(ctx, index));
+#endif
+}
+
+static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
+ int index)
+{
+#if defined(CONFIG_UNIX)
+ struct sock *sock = ctx->ring_sock->sk;
+ struct sk_buff_head *head = &sock->sk_receive_queue;
+ struct sk_buff *skb;
+
+ /*
+ * See if we can merge this file into an existing skb SCM_RIGHTS
+ * file set. If there's no room, fall back to allocating a new skb
+ * and filling it in.
+ */
+ spin_lock_irq(&head->lock);
+ skb = skb_peek(head);
+ if (skb) {
+ struct scm_fp_list *fpl = UNIXCB(skb).fp;
+
+ if (fpl->count < SCM_MAX_FD) {
+ __skb_unlink(skb, head);
+ spin_unlock_irq(&head->lock);
+ fpl->fp[fpl->count] = get_file(file);
+ unix_inflight(fpl->user, fpl->fp[fpl->count]);
+ fpl->count++;
+ spin_lock_irq(&head->lock);
+ __skb_queue_head(head, skb);
+ } else {
+ skb = NULL;
+ }
+ }
+ spin_unlock_irq(&head->lock);
+
+ if (skb) {
+ fput(file);
+ return 0;
+ }
+
+ return __io_sqe_files_scm(ctx, 1, index);
+#else
+ return 0;
+#endif
+}
+
+static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned nr_args)
+{
+ struct io_uring_files_update up;
+ __s32 __user *fds;
+ int fd, i, err;
+ __u32 done;
+
+ if (!ctx->file_table)
+ return -ENXIO;
+ if (!nr_args)
+ return -EINVAL;
+ if (copy_from_user(&up, arg, sizeof(up)))
+ return -EFAULT;
+ if (check_add_overflow(up.offset, nr_args, &done))
+ return -EOVERFLOW;
+ if (done > ctx->nr_user_files)
+ return -EINVAL;
+
+ done = 0;
+ fds = (__s32 __user *) up.fds;
+ while (nr_args) {
+ struct fixed_file_table *table;
+ unsigned index;
+
+ err = 0;
+ if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
+ err = -EFAULT;
+ break;
+ }
+ i = array_index_nospec(up.offset, ctx->nr_user_files);
+ table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
+ index = i & IORING_FILE_TABLE_MASK;
+ if (table->files[index]) {
+ io_sqe_file_unregister(ctx, i);
+ table->files[index] = NULL;
+ }
+ if (fd != -1) {
+ struct file *file;
+
+ file = fget(fd);
+ if (!file) {
+ err = -EBADF;
+ break;
+ }
+ /*
+ * Don't allow io_uring instances to be registered. If
+ * UNIX isn't enabled, then this causes a reference
+ * cycle and this instance can never get freed. If UNIX
+ * is enabled we'll handle it just fine, but there's
+ * still no point in allowing a ring fd as it doesn't
+ * support regular read/write anyway.
+ */
+ if (file->f_op == &io_uring_fops) {
+ fput(file);
+ err = -EBADF;
+ break;
+ }
+ table->files[index] = file;
+ err = io_sqe_file_register(ctx, file, i);
+ if (err)
+ break;
+ }
+ nr_args--;
+ done++;
+ up.offset++;
+ }
+
+ return done ? done : err;
+}
+
+static void io_put_work(struct io_wq_work *work)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+ io_put_req(req);
+}
+
+static void io_get_work(struct io_wq_work *work)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+ refcount_inc(&req->refs);
+}
+
static int io_sq_offload_start(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
+ unsigned concurrency;
int ret;
init_waitqueue_head(&ctx->sqo_wait);
@@ -3202,26 +3942,13 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
goto err;
}
- /* Do QD, or 2 * CPUS, whatever is smallest */
- ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq",
- WQ_UNBOUND | WQ_FREEZABLE,
- min(ctx->sq_entries - 1, 2 * num_online_cpus()));
- if (!ctx->sqo_wq[0]) {
- ret = -ENOMEM;
- goto err;
- }
-
- /*
- * This is for buffered writes, where we want to limit the parallelism
- * due to file locking in file systems. As "normal" buffered writes
- * should parellelize on writeout quite nicely, limit us to having 2
- * pending. This avoids massive contention on the inode when doing
- * buffered async writes.
- */
- ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq",
- WQ_UNBOUND | WQ_FREEZABLE, 2);
- if (!ctx->sqo_wq[1]) {
- ret = -ENOMEM;
+ /* Do QD, or 4 * CPUS, whatever is smallest */
+ concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
+ ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user,
+ io_get_work, io_put_work);
+ if (IS_ERR(ctx->io_wq)) {
+ ret = PTR_ERR(ctx->io_wq);
+ ctx->io_wq = NULL;
goto err;
}
@@ -3567,6 +4294,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_unaccount_mem(ctx->user,
ring_pages(ctx->sq_entries, ctx->cq_entries));
free_uid(ctx->user);
+ kfree(ctx->completions);
+ kmem_cache_free(req_cachep, ctx->fallback_req);
kfree(ctx);
}
@@ -3605,8 +4334,15 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_kill_timeouts(ctx);
io_poll_remove_all(ctx);
+
+ if (ctx->io_wq)
+ io_wq_cancel_all(ctx->io_wq);
+
io_iopoll_reap_events(ctx);
- wait_for_completion(&ctx->ctx_done);
+ /* if we failed setting up the ctx, we might not have any rings */
+ if (ctx->rings)
+ io_cqring_overflow_flush(ctx, true);
+ wait_for_completion(&ctx->completions[0]);
io_ring_ctx_free(ctx);
}
@@ -3619,6 +4355,53 @@ static int io_uring_release(struct inode *inode, struct file *file)
return 0;
}
+static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+{
+ struct io_kiocb *req;
+ DEFINE_WAIT(wait);
+
+ while (!list_empty_careful(&ctx->inflight_list)) {
+ struct io_kiocb *cancel_req = NULL;
+
+ spin_lock_irq(&ctx->inflight_lock);
+ list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
+ if (req->work.files != files)
+ continue;
+ /* req is being completed, ignore */
+ if (!refcount_inc_not_zero(&req->refs))
+ continue;
+ cancel_req = req;
+ break;
+ }
+ if (cancel_req)
+ prepare_to_wait(&ctx->inflight_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&ctx->inflight_lock);
+
+ /* We need to keep going until we don't find a matching req */
+ if (!cancel_req)
+ break;
+
+ io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
+ io_put_req(cancel_req);
+ schedule();
+ }
+ finish_wait(&ctx->inflight_wait, &wait);
+}
+
+static int io_uring_flush(struct file *file, void *data)
+{
+ struct io_ring_ctx *ctx = file->private_data;
+
+ io_uring_cancel_files(ctx, data);
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
+ io_cqring_overflow_flush(ctx, true);
+ io_wq_cancel_all(ctx->io_wq);
+ }
+ return 0;
+}
+
static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
{
loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
@@ -3680,14 +4463,20 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
*/
ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) {
+ if (!list_empty_careful(&ctx->cq_overflow_list))
+ io_cqring_overflow_flush(ctx, false);
if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sqo_wait);
submitted = to_submit;
} else if (to_submit) {
- to_submit = min(to_submit, ctx->sq_entries);
+ struct mm_struct *cur_mm;
+ to_submit = min(to_submit, ctx->sq_entries);
mutex_lock(&ctx->uring_lock);
- submitted = io_ring_submit(ctx, to_submit);
+ /* already have mm, so io_submit_sqes() won't try to grab it */
+ cur_mm = ctx->sqo_mm;
+ submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
+ &cur_mm, false);
mutex_unlock(&ctx->uring_lock);
}
if (flags & IORING_ENTER_GETEVENTS) {
@@ -3710,6 +4499,7 @@ out_fput:
static const struct file_operations io_uring_fops = {
.release = io_uring_release,
+ .flush = io_uring_flush,
.mmap = io_uring_mmap,
.poll = io_uring_poll,
.fasync = io_uring_fasync,
@@ -3809,10 +4599,23 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
* Use twice as many entries for the CQ ring. It's possible for the
* application to drive a higher depth than the size of the SQ ring,
* since the sqes are only used at submission time. This allows for
- * some flexibility in overcommitting a bit.
+ * some flexibility in overcommitting a bit. If the application has
+ * set IORING_SETUP_CQSIZE, it will have passed in the desired number
+ * of CQ ring entries manually.
*/
p->sq_entries = roundup_pow_of_two(entries);
- p->cq_entries = 2 * p->sq_entries;
+ if (p->flags & IORING_SETUP_CQSIZE) {
+ /*
+ * If IORING_SETUP_CQSIZE is set, we do the same roundup
+ * to a power-of-two, if it isn't already. We do NOT impose
+ * any cq vs sq ring sizing.
+ */
+ if (p->cq_entries < p->sq_entries || p->cq_entries > IORING_MAX_CQ_ENTRIES)
+ return -EINVAL;
+ p->cq_entries = roundup_pow_of_two(p->cq_entries);
+ } else {
+ p->cq_entries = 2 * p->sq_entries;
+ }
user = get_uid(current_user());
account_mem = !capable(CAP_IPC_LOCK);
@@ -3871,7 +4674,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
if (ret < 0)
goto err;
- p->features = IORING_FEAT_SINGLE_MMAP;
+ p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP;
+ trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
io_ring_ctx_wait_and_kill(ctx);
@@ -3897,7 +4701,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
}
if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
- IORING_SETUP_SQ_AFF))
+ IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE))
return -EINVAL;
ret = io_uring_create(entries, &p);
@@ -3941,7 +4745,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
* no new references will come in after we've killed the percpu ref.
*/
mutex_unlock(&ctx->uring_lock);
- wait_for_completion(&ctx->ctx_done);
+ wait_for_completion(&ctx->completions[0]);
mutex_lock(&ctx->uring_lock);
switch (opcode) {
@@ -3963,6 +4767,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_sqe_files_unregister(ctx);
break;
+ case IORING_REGISTER_FILES_UPDATE:
+ ret = io_sqe_files_update(ctx, arg, nr_args);
+ break;
case IORING_REGISTER_EVENTFD:
ret = -EINVAL;
if (nr_args != 1)
@@ -3981,7 +4788,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
}
/* bring the ctx back to life */
- reinit_completion(&ctx->ctx_done);
+ reinit_completion(&ctx->completions[0]);
percpu_ref_reinit(&ctx->refs);
return ret;
}
@@ -4006,6 +4813,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
mutex_lock(&ctx->uring_lock);
ret = __io_uring_register(ctx, opcode, arg, nr_args);
mutex_unlock(&ctx->uring_lock);
+ trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
+ ctx->cq_ev_fd != NULL, ret);
out_fput:
fdput(f);
return ret;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index d8507972ee13..90c830e3758e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1490,6 +1490,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
return loc->xl_ops->xlo_check_space(loc, xi);
}
+static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+{
+ loc->xl_ops->xlo_add_entry(loc, name_hash);
+ loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
+ /*
+ * We can't leave the new entry's xe_name_offset at zero or
+ * add_namevalue() will go nuts. We set it to the size of our
+ * storage so that it can never be less than any other entry.
+ */
+ loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
+}
+
static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
@@ -2121,31 +2133,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
if (rc)
goto out;
- if (!loc->xl_entry) {
- rc = -EINVAL;
- goto out;
- }
-
- if (ocfs2_xa_can_reuse_entry(loc, xi)) {
- orig_value_size = loc->xl_entry->xe_value_size;
- rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
- if (rc)
- goto out;
- goto alloc_value;
- }
+ if (loc->xl_entry) {
+ if (ocfs2_xa_can_reuse_entry(loc, xi)) {
+ orig_value_size = loc->xl_entry->xe_value_size;
+ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
+ if (rc)
+ goto out;
+ goto alloc_value;
+ }
- if (!ocfs2_xattr_is_local(loc->xl_entry)) {
- orig_clusters = ocfs2_xa_value_clusters(loc);
- rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
- if (rc) {
- mlog_errno(rc);
- ocfs2_xa_cleanup_value_truncate(loc,
- "overwriting",
- orig_clusters);
- goto out;
+ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc) {
+ mlog_errno(rc);
+ ocfs2_xa_cleanup_value_truncate(loc,
+ "overwriting",
+ orig_clusters);
+ goto out;
+ }
}
- }
- ocfs2_xa_wipe_namevalue(loc);
+ ocfs2_xa_wipe_namevalue(loc);
+ } else
+ ocfs2_xa_add_entry(loc, name_hash);
/*
* If we get here, we have a blank entry. Fill it. We grow our
diff --git a/fs/open.c b/fs/open.c
index b62f5c0923a8..5c68282ea79e 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -771,10 +771,6 @@ static int do_dentry_open(struct file *f,
f->f_mode |= FMODE_WRITER;
}
- /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))
- f->f_mode |= FMODE_ATOMIC_POS;
-
f->f_op = fops_get(inode->i_fop);
if (WARN_ON(!f->f_op)) {
error = -ENODEV;
@@ -1256,7 +1252,7 @@ EXPORT_SYMBOL(nonseekable_open);
*/
int stream_open(struct inode *inode, struct file *filp)
{
- filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS);
+ filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
filp->f_mode |= FMODE_STREAM;
return 0;
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 8a2ab2f974bd..a9149199e0e7 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -793,6 +793,8 @@ int create_pipe_files(struct file **res, int flags)
}
res[0]->private_data = inode->i_pipe;
res[1] = f;
+ stream_open(inode, res[0]);
+ stream_open(inode, res[1]);
return 0;
}
@@ -931,9 +933,9 @@ static int fifo_open(struct inode *inode, struct file *filp)
__pipe_lock(pipe);
/* We can only do regular read/write on fifos */
- filp->f_mode &= (FMODE_READ | FMODE_WRITE);
+ stream_open(inode, filp);
- switch (filp->f_mode) {
+ switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
case FMODE_READ:
/*
* O_RDONLY
diff --git a/include/Kbuild b/include/Kbuild
index ffba79483cc5..95508049ee51 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -65,7 +65,6 @@ header-test- += keys/asymmetric-subtype.h
header-test- += keys/asymmetric-type.h
header-test- += keys/big_key-type.h
header-test- += keys/request_key_auth-type.h
-header-test- += keys/trusted.h
header-test- += kvm/arm_arch_timer.h
header-test- += kvm/arm_pmu.h
header-test-$(CONFIG_ARM) += kvm/arm_psci.h
@@ -1028,6 +1027,7 @@ header-test- += trace/events/fsi_master_gpio.h
header-test- += trace/events/huge_memory.h
header-test- += trace/events/ib_mad.h
header-test- += trace/events/ib_umad.h
+header-test- += trace/events/io_uring.h
header-test- += trace/events/iscsi.h
header-test- += trace/events/jbd2.h
header-test- += trace/events/kvm.h
diff --git a/include/keys/trusted.h b/include/keys/trusted_tpm.h
index 0071298b9b28..a56d8e1298f2 100644
--- a/include/keys/trusted.h
+++ b/include/keys/trusted_tpm.h
@@ -1,14 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __TRUSTED_KEY_H
-#define __TRUSTED_KEY_H
+#ifndef __TRUSTED_TPM_H
+#define __TRUSTED_TPM_H
+
+#include <keys/trusted-type.h>
+#include <linux/tpm_command.h>
/* implementation specific TPM constants */
#define MAX_BUF_SIZE 1024
#define TPM_GETRANDOM_SIZE 14
-#define TPM_OSAP_SIZE 36
-#define TPM_OIAP_SIZE 10
-#define TPM_SEAL_SIZE 87
-#define TPM_UNSEAL_SIZE 104
#define TPM_SIZE_OFFSET 2
#define TPM_RETURN_OFFSET 6
#define TPM_DATA_OFFSET 10
@@ -17,13 +16,6 @@
#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset])
#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
-struct tpm_buf {
- int len;
- unsigned char data[MAX_BUF_SIZE];
-};
-
-#define INIT_BUF(tb) (tb->len = 0)
-
struct osapsess {
uint32_t handle;
unsigned char secret[SHA1_DIGEST_SIZE];
@@ -48,6 +40,13 @@ int TSS_checkhmac1(unsigned char *buffer,
int trusted_tpm_send(unsigned char *cmd, size_t buflen);
int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce);
+int tpm2_seal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
+
#define TPM_DEBUG 0
#if TPM_DEBUG
@@ -109,28 +108,4 @@ static inline void dump_tpm_buf(unsigned char *buf)
{
}
#endif
-
-static inline void store8(struct tpm_buf *buf, const unsigned char value)
-{
- buf->data[buf->len++] = value;
-}
-
-static inline void store16(struct tpm_buf *buf, const uint16_t value)
-{
- *(uint16_t *) & buf->data[buf->len] = htons(value);
- buf->len += sizeof value;
-}
-
-static inline void store32(struct tpm_buf *buf, const uint32_t value)
-{
- *(uint32_t *) & buf->data[buf->len] = htonl(value);
- buf->len += sizeof value;
-}
-
-static inline void storebytes(struct tpm_buf *buf, const unsigned char *in,
- const int len)
-{
- memcpy(buf->data + buf->len, in, len);
- buf->len += len;
-}
#endif
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index bed9e43f9426..19394c77ed99 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -15,7 +15,9 @@
*/
#include <linux/cgroup.h>
+#include <linux/percpu.h>
#include <linux/percpu_counter.h>
+#include <linux/u64_stats_sync.h>
#include <linux/seq_file.h>
#include <linux/radix-tree.h>
#include <linux/blkdev.h>
@@ -31,15 +33,12 @@
#ifdef CONFIG_BLK_CGROUP
-enum blkg_rwstat_type {
- BLKG_RWSTAT_READ,
- BLKG_RWSTAT_WRITE,
- BLKG_RWSTAT_SYNC,
- BLKG_RWSTAT_ASYNC,
- BLKG_RWSTAT_DISCARD,
+enum blkg_iostat_type {
+ BLKG_IOSTAT_READ,
+ BLKG_IOSTAT_WRITE,
+ BLKG_IOSTAT_DISCARD,
- BLKG_RWSTAT_NR,
- BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
+ BLKG_IOSTAT_NR,
};
struct blkcg_gq;
@@ -61,17 +60,15 @@ struct blkcg {
#endif
};
-/*
- * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
- * recursive. Used to carry stats of dead children.
- */
-struct blkg_rwstat {
- struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
- atomic64_t aux_cnt[BLKG_RWSTAT_NR];
+struct blkg_iostat {
+ u64 bytes[BLKG_IOSTAT_NR];
+ u64 ios[BLKG_IOSTAT_NR];
};
-struct blkg_rwstat_sample {
- u64 cnt[BLKG_RWSTAT_NR];
+struct blkg_iostat_set {
+ struct u64_stats_sync sync;
+ struct blkg_iostat cur;
+ struct blkg_iostat last;
};
/*
@@ -127,8 +124,8 @@ struct blkcg_gq {
/* is this blkg online? protected by both blkcg and q locks */
bool online;
- struct blkg_rwstat stat_bytes;
- struct blkg_rwstat stat_ios;
+ struct blkg_iostat_set __percpu *iostat_cpu;
+ struct blkg_iostat_set iostat;
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
@@ -202,13 +199,6 @@ int blkcg_activate_policy(struct request_queue *q,
void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol);
-static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
- unsigned int idx)
-{
- return atomic64_read(&rwstat->aux_cnt[idx]) +
- percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
-}
-
const char *blkg_dev_name(struct blkcg_gq *blkg);
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
u64 (*prfill)(struct seq_file *,
@@ -216,17 +206,6 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
const struct blkcg_policy *pol, int data,
bool show_total);
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
-u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- const struct blkg_rwstat_sample *rwstat);
-u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- int off);
-int blkg_print_stat_bytes(struct seq_file *sf, void *v);
-int blkg_print_stat_ios(struct seq_file *sf, void *v);
-int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
-int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
-
-void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
- int off, struct blkg_rwstat_sample *sum);
struct blkg_conf_ctx {
struct gendisk *disk;
@@ -578,128 +557,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false)))
-static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
-{
- int i, ret;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++) {
- ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
- if (ret) {
- while (--i >= 0)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
- return ret;
- }
- atomic64_set(&rwstat->aux_cnt[i], 0);
- }
- return 0;
-}
-
-static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
-}
-
-/**
- * blkg_rwstat_add - add a value to a blkg_rwstat
- * @rwstat: target blkg_rwstat
- * @op: REQ_OP and flags
- * @val: value to add
- *
- * Add @val to @rwstat. The counters are chosen according to @rw. The
- * caller is responsible for synchronizing calls to this function.
- */
-static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
- unsigned int op, uint64_t val)
-{
- struct percpu_counter *cnt;
-
- if (op_is_discard(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
- else if (op_is_write(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
- else
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
-
- percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
-
- if (op_is_sync(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
- else
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
-
- percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
-}
-
-/**
- * blkg_rwstat_read - read the current values of a blkg_rwstat
- * @rwstat: blkg_rwstat to read
- *
- * Read the current snapshot of @rwstat and return it in the aux counts.
- */
-static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
- struct blkg_rwstat_sample *result)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- result->cnt[i] =
- percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
-}
-
-/**
- * blkg_rwstat_total - read the total count of a blkg_rwstat
- * @rwstat: blkg_rwstat to read
- *
- * Return the total count of @rwstat regardless of the IO direction. This
- * function can be called without synchronization and takes care of u64
- * atomicity.
- */
-static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
-{
- struct blkg_rwstat_sample tmp = { };
-
- blkg_rwstat_read(rwstat, &tmp);
- return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
-}
-
-/**
- * blkg_rwstat_reset - reset a blkg_rwstat
- * @rwstat: blkg_rwstat to reset
- */
-static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++) {
- percpu_counter_set(&rwstat->cpu_cnt[i], 0);
- atomic64_set(&rwstat->aux_cnt[i], 0);
- }
-}
-
-/**
- * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
- * @to: the destination blkg_rwstat
- * @from: the source
- *
- * Add @from's count including the aux one to @to's aux count.
- */
-static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
- struct blkg_rwstat *from)
-{
- u64 sum[BLKG_RWSTAT_NR];
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
- &to->aux_cnt[i]);
-}
-
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio);
@@ -745,15 +602,33 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
throtl = blk_throtl_bio(q, blkg, bio);
if (!throtl) {
+ struct blkg_iostat_set *bis;
+ int rwd, cpu;
+
+ if (op_is_discard(bio->bi_opf))
+ rwd = BLKG_IOSTAT_DISCARD;
+ else if (op_is_write(bio->bi_opf))
+ rwd = BLKG_IOSTAT_WRITE;
+ else
+ rwd = BLKG_IOSTAT_READ;
+
+ cpu = get_cpu();
+ bis = per_cpu_ptr(blkg->iostat_cpu, cpu);
+ u64_stats_update_begin(&bis->sync);
+
/*
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
* is a split bio and we would have already accounted for the
* size of the bio.
*/
if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
- blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
- bio->bi_iter.bi_size);
- blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
+ bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
+ bis->cur.ios[rwd]++;
+
+ u64_stats_update_end(&bis->sync);
+ if (cgroup_subsys_on_dfl(io_cgrp_subsys))
+ cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu);
+ put_cpu();
}
blkcg_bio_issue_init(bio);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 0bf056de5cc3..11cfd6470b1a 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -10,103 +10,239 @@ struct blk_mq_tags;
struct blk_flush_queue;
/**
- * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
+ * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
+ * block device
*/
struct blk_mq_hw_ctx {
struct {
+ /** @lock: Protects the dispatch list. */
spinlock_t lock;
+ /**
+ * @dispatch: Used for requests that are ready to be
+ * dispatched to the hardware but for some reason (e.g. lack of
+ * resources) could not be sent to the hardware. As soon as the
+ * driver can send new requests, requests at this list will
+ * be sent first for a fairer dispatch.
+ */
struct list_head dispatch;
- unsigned long state; /* BLK_MQ_S_* flags */
+ /**
+ * @state: BLK_MQ_S_* flags. Defines the state of the hw
+ * queue (active, scheduled to restart, stopped).
+ */
+ unsigned long state;
} ____cacheline_aligned_in_smp;
+ /**
+ * @run_work: Used for scheduling a hardware queue run at a later time.
+ */
struct delayed_work run_work;
+ /** @cpumask: Map of available CPUs where this hctx can run. */
cpumask_var_t cpumask;
+ /**
+ * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
+ * selection from @cpumask.
+ */
int next_cpu;
+ /**
+ * @next_cpu_batch: Counter of how many works left in the batch before
+ * changing to the next CPU.
+ */
int next_cpu_batch;
- unsigned long flags; /* BLK_MQ_F_* flags */
+ /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
+ unsigned long flags;
+ /**
+ * @sched_data: Pointer owned by the IO scheduler attached to a request
+ * queue. It's up to the IO scheduler how to use this pointer.
+ */
void *sched_data;
+ /**
+ * @queue: Pointer to the request queue that owns this hardware context.
+ */
struct request_queue *queue;
+ /** @fq: Queue of requests that need to perform a flush operation. */
struct blk_flush_queue *fq;
+ /**
+ * @driver_data: Pointer to data owned by the block driver that created
+ * this hctx
+ */
void *driver_data;
+ /**
+ * @ctx_map: Bitmap for each software queue. If bit is on, there is a
+ * pending request in that software queue.
+ */
struct sbitmap ctx_map;
+ /**
+ * @dispatch_from: Software queue to be used when no scheduler was
+ * selected.
+ */
struct blk_mq_ctx *dispatch_from;
+ /**
+ * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
+ * decide if the hw_queue is busy using Exponential Weighted Moving
+ * Average algorithm.
+ */
unsigned int dispatch_busy;
+ /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
unsigned short type;
+ /** @nr_ctx: Number of software queues. */
unsigned short nr_ctx;
+ /** @ctxs: Array of software queues. */
struct blk_mq_ctx **ctxs;
+ /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
spinlock_t dispatch_wait_lock;
+ /**
+ * @dispatch_wait: Waitqueue to put requests when there is no tag
+ * available at the moment, to wait for another try in the future.
+ */
wait_queue_entry_t dispatch_wait;
+
+ /**
+ * @wait_index: Index of next available dispatch_wait queue to insert
+ * requests.
+ */
atomic_t wait_index;
+ /**
+ * @tags: Tags owned by the block driver. A tag at this set is only
+ * assigned when a request is dispatched from a hardware queue.
+ */
struct blk_mq_tags *tags;
+ /**
+ * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
+ * scheduler associated with a request queue, a tag is assigned when
+ * that request is allocated. Else, this member is not used.
+ */
struct blk_mq_tags *sched_tags;
+ /** @queued: Number of queued requests. */
unsigned long queued;
+ /** @run: Number of dispatched requests. */
unsigned long run;
#define BLK_MQ_MAX_DISPATCH_ORDER 7
+ /** @dispatched: Number of dispatch requests by queue. */
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
+ /** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
+ /** @queue_num: Index of this hardware queue. */
unsigned int queue_num;
+ /**
+ * @nr_active: Number of active requests. Only used when a tag set is
+ * shared across request queues.
+ */
atomic_t nr_active;
+ /** @cpuhp_dead: List to store request if some CPU die. */
struct hlist_node cpuhp_dead;
+ /** @kobj: Kernel object for sysfs. */
struct kobject kobj;
+ /** @poll_considered: Count times blk_poll() was called. */
unsigned long poll_considered;
+ /** @poll_invoked: Count how many requests blk_poll() polled. */
unsigned long poll_invoked;
+ /** @poll_success: Count how many polled requests were completed. */
unsigned long poll_success;
#ifdef CONFIG_BLK_DEBUG_FS
+ /**
+ * @debugfs_dir: debugfs directory for this hardware queue. Named
+ * as cpu<cpu_number>.
+ */
struct dentry *debugfs_dir;
+ /** @sched_debugfs_dir: debugfs directory for the scheduler. */
struct dentry *sched_debugfs_dir;
#endif
+ /** @hctx_list: List of all hardware queues. */
struct list_head hctx_list;
- /* Must be the last member - see also blk_mq_hw_ctx_size(). */
+ /**
+ * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
+ * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
+ * blk_mq_hw_ctx_size().
+ */
struct srcu_struct srcu[0];
};
+/**
+ * struct blk_mq_queue_map - Map software queues to hardware queues
+ * @mq_map: CPU ID to hardware queue index map. This is an array
+ * with nr_cpu_ids elements. Each element has a value in the range
+ * [@queue_offset, @queue_offset + @nr_queues).
+ * @nr_queues: Number of hardware queues to map CPU IDs onto.
+ * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
+ * driver to map each hardware queue type (enum hctx_type) onto a distinct
+ * set of hardware queues.
+ */
struct blk_mq_queue_map {
unsigned int *mq_map;
unsigned int nr_queues;
unsigned int queue_offset;
};
+/**
+ * enum hctx_type - Type of hardware queue
+ * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
+ * @HCTX_TYPE_READ: Just for READ I/O.
+ * @HCTX_TYPE_POLL: Polled I/O of any kind.
+ * @HCTX_MAX_TYPES: Number of types of hctx.
+ */
enum hctx_type {
- HCTX_TYPE_DEFAULT, /* all I/O not otherwise accounted for */
- HCTX_TYPE_READ, /* just for READ I/O */
- HCTX_TYPE_POLL, /* polled I/O of any kind */
+ HCTX_TYPE_DEFAULT,
+ HCTX_TYPE_READ,
+ HCTX_TYPE_POLL,
HCTX_MAX_TYPES,
};
+/**
+ * struct blk_mq_tag_set - tag set that can be shared between request queues
+ * @map: One or more ctx -> hctx mappings. One map exists for each
+ * hardware queue type (enum hctx_type) that the driver wishes
+ * to support. There are no restrictions on maps being of the
+ * same size, and it's perfectly legal to share maps between
+ * types.
+ * @nr_maps: Number of elements in the @map array. A number in the range
+ * [1, HCTX_MAX_TYPES].
+ * @ops: Pointers to functions that implement block driver behavior.
+ * @nr_hw_queues: Number of hardware queues supported by the block driver that
+ * owns this data structure.
+ * @queue_depth: Number of tags per hardware queue, reserved tags included.
+ * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
+ * allocations.
+ * @cmd_size: Number of additional bytes to allocate per request. The block
+ * driver owns these additional bytes.
+ * @numa_node: NUMA node the storage adapter has been connected to.
+ * @timeout: Request processing timeout in jiffies.
+ * @flags: Zero or more BLK_MQ_F_* flags.
+ * @driver_data: Pointer to data owned by the block driver that created this
+ * tag set.
+ * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
+ * elements.
+ * @tag_list_lock: Serializes tag_list accesses.
+ * @tag_list: List of the request queues that use this tag set. See also
+ * request_queue.tag_set_list.
+ */
struct blk_mq_tag_set {
- /*
- * map[] holds ctx -> hctx mappings, one map exists for each type
- * that the driver wishes to support. There are no restrictions
- * on maps being of the same size, and it's perfectly legal to
- * share maps between types.
- */
struct blk_mq_queue_map map[HCTX_MAX_TYPES];
- unsigned int nr_maps; /* nr entries in map[] */
+ unsigned int nr_maps;
const struct blk_mq_ops *ops;
- unsigned int nr_hw_queues; /* nr hw queues across maps */
- unsigned int queue_depth; /* max hw supported */
+ unsigned int nr_hw_queues;
+ unsigned int queue_depth;
unsigned int reserved_tags;
- unsigned int cmd_size; /* per-request extra data */
+ unsigned int cmd_size;
int numa_node;
unsigned int timeout;
- unsigned int flags; /* BLK_MQ_F_* */
+ unsigned int flags;
void *driver_data;
struct blk_mq_tags **tags;
@@ -115,6 +251,12 @@ struct blk_mq_tag_set {
struct list_head tag_list;
};
+/**
+ * struct blk_mq_queue_data - Data about a request inserted in a queue
+ *
+ * @rq: Request pointer.
+ * @last: If it is the last request in the queue.
+ */
struct blk_mq_queue_data {
struct request *rq;
bool last;
@@ -142,81 +284,101 @@ typedef bool (busy_fn)(struct request_queue *);
typedef void (complete_fn)(struct request *);
typedef void (cleanup_rq_fn)(struct request *);
-
+/**
+ * struct blk_mq_ops - Callback functions that implements block driver
+ * behaviour.
+ */
struct blk_mq_ops {
- /*
- * Queue request
+ /**
+ * @queue_rq: Queue a new request from block IO.
*/
queue_rq_fn *queue_rq;
- /*
- * If a driver uses bd->last to judge when to submit requests to
- * hardware, it must define this function. In case of errors that
- * make us stop issuing further requests, this hook serves the
+ /**
+ * @commit_rqs: If a driver uses bd->last to judge when to submit
+ * requests to hardware, it must define this function. In case of errors
+ * that make us stop issuing further requests, this hook serves the
* purpose of kicking the hardware (which the last request otherwise
* would have done).
*/
commit_rqs_fn *commit_rqs;
- /*
- * Reserve budget before queue request, once .queue_rq is
+ /**
+ * @get_budget: Reserve budget before queue request, once .queue_rq is
* run, it is driver's responsibility to release the
* reserved budget. Also we have to handle failure case
* of .get_budget for avoiding I/O deadlock.
*/
get_budget_fn *get_budget;
+ /**
+ * @put_budget: Release the reserved budget.
+ */
put_budget_fn *put_budget;
- /*
- * Called on request timeout
+ /**
+ * @timeout: Called on request timeout.
*/
timeout_fn *timeout;
- /*
- * Called to poll for completion of a specific tag.
+ /**
+ * @poll: Called to poll for completion of a specific tag.
*/
poll_fn *poll;
+ /**
+ * @complete: Mark the request as complete.
+ */
complete_fn *complete;
- /*
- * Called when the block layer side of a hardware queue has been
- * set up, allowing the driver to allocate/init matching structures.
- * Ditto for exit/teardown.
+ /**
+ * @init_hctx: Called when the block layer side of a hardware queue has
+ * been set up, allowing the driver to allocate/init matching
+ * structures.
*/
init_hctx_fn *init_hctx;
+ /**
+ * @exit_hctx: Ditto for exit/teardown.
+ */
exit_hctx_fn *exit_hctx;
- /*
- * Called for every command allocated by the block layer to allow
- * the driver to set up driver specific data.
+ /**
+ * @init_request: Called for every command allocated by the block layer
+ * to allow the driver to set up driver specific data.
*
* Tag greater than or equal to queue_depth is for setting up
* flush request.
- *
- * Ditto for exit/teardown.
*/
init_request_fn *init_request;
+ /**
+ * @exit_request: Ditto for exit/teardown.
+ */
exit_request_fn *exit_request;
- /* Called from inside blk_get_request() */
+
+ /**
+ * @initialize_rq_fn: Called from inside blk_get_request().
+ */
void (*initialize_rq_fn)(struct request *rq);
- /*
- * Called before freeing one request which isn't completed yet,
- * and usually for freeing the driver private data
+ /**
+ * @cleanup_rq: Called before freeing one request which isn't completed
+ * yet, and usually for freeing the driver private data.
*/
cleanup_rq_fn *cleanup_rq;
- /*
- * If set, returns whether or not this queue currently is busy
+ /**
+ * @busy: If set, returns whether or not this queue currently is busy.
*/
busy_fn *busy;
+ /**
+ * @map_queues: This allows drivers specify their own queue mapping by
+ * overriding the setup-time function that builds the mq_map.
+ */
map_queues_fn *map_queues;
#ifdef CONFIG_BLK_DEBUG_FS
- /*
- * Used by the debugfs implementation to show driver-specific
+ /**
+ * @show_rq: Used by the debugfs implementation to show driver-specific
* information about a request.
*/
void (*show_rq)(struct seq_file *m, struct request *rq);
@@ -262,7 +424,6 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_free_request(struct request *rq);
-bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
bool blk_mq_queue_inflight(struct request_queue *q);
@@ -301,9 +462,25 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
+/**
+ * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
+ * @rq: target request.
+ */
+static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
+{
+ return READ_ONCE(rq->state);
+}
+
+static inline int blk_mq_request_started(struct request *rq)
+{
+ return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
+}
+
+static inline int blk_mq_request_completed(struct request *rq)
+{
+ return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
+}
-int blk_mq_request_started(struct request *rq);
-int blk_mq_request_completed(struct request *rq);
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
@@ -324,7 +501,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
@@ -343,14 +520,29 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q);
unsigned int blk_mq_rq_cpu(struct request *rq);
-/*
+/**
+ * blk_mq_rq_from_pdu - cast a PDU to a request
+ * @pdu: the PDU (Protocol Data Unit) to be casted
+ *
+ * Return: request
+ *
* Driver command data is immediately after the request. So subtract request
- * size to get back to the original request, add request size to get the PDU.
+ * size to get back to the original request.
*/
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
return pdu - sizeof(struct request);
}
+
+/**
+ * blk_mq_rq_to_pdu - cast a request to a PDU
+ * @rq: the request to be casted
+ *
+ * Return: pointer to the PDU
+ *
+ * Driver command data is immediately after the request. So add request to get
+ * the PDU.
+ */
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
return rq + 1;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d688b96d1d63..70254ae11769 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -153,10 +153,10 @@ struct bio {
unsigned short bi_write_hint;
blk_status_t bi_status;
u8 bi_partno;
+ atomic_t __bi_remaining;
struct bvec_iter bi_iter;
- atomic_t __bi_remaining;
bio_end_io_t *bi_end_io;
void *bi_private;
@@ -290,6 +290,12 @@ enum req_opf {
REQ_OP_ZONE_RESET_ALL = 8,
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = 9,
+ /* Open a zone */
+ REQ_OP_ZONE_OPEN = 10,
+ /* Close a zone */
+ REQ_OP_ZONE_CLOSE = 11,
+ /* Transition a zone to full */
+ REQ_OP_ZONE_FINISH = 12,
/* SCSI passthrough using struct scsi_request */
REQ_OP_SCSI_IN = 32,
@@ -371,6 +377,7 @@ enum stat_group {
STAT_READ,
STAT_WRITE,
STAT_DISCARD,
+ STAT_FLUSH,
NR_STAT_GROUPS
};
@@ -417,6 +424,25 @@ static inline bool op_is_discard(unsigned int op)
return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
}
+/*
+ * Check if a bio or request operation is a zone management operation, with
+ * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
+ * due to its different handling in the block layer and device response in
+ * case of command failure.
+ */
+static inline bool op_is_zone_mgmt(enum req_opf op)
+{
+ switch (op & REQ_OP_MASK) {
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline int op_stat_group(unsigned int op)
{
if (op_is_discard(op))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f3ea78b0c91c..397bb9bc230b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -349,25 +349,25 @@ struct queue_limits {
enum blk_zoned_model zoned;
};
+typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
+ void *data);
+
#ifdef CONFIG_BLK_DEV_ZONED
-/*
- * Maximum number of zones to report with a single report zones command.
- */
-#define BLK_ZONED_REPORT_MAX_ZONES 8192U
+#define BLK_ALL_ZONES ((unsigned int)-1)
+int blkdev_report_zones(struct block_device *bdev, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
extern unsigned int blkdev_nr_zones(struct block_device *bdev);
-extern int blkdev_report_zones(struct block_device *bdev,
- sector_t sector, struct blk_zone *zones,
- unsigned int *nr_zones);
-extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
- sector_t nr_sectors, gfp_t gfp_mask);
+extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
+ sector_t sectors, sector_t nr_sectors,
+ gfp_t gfp_mask);
extern int blk_revalidate_disk_zones(struct gendisk *disk);
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
-extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
+extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
#else /* CONFIG_BLK_DEV_ZONED */
@@ -388,9 +388,9 @@ static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
return -ENOTTY;
}
-static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
+static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
+ fmode_t mode, unsigned int cmd,
+ unsigned long arg)
{
return -ENOTTY;
}
@@ -411,7 +411,6 @@ struct request_queue {
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
- unsigned int nr_queues;
unsigned int queue_depth;
@@ -1709,7 +1708,7 @@ struct block_device_operations {
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
int (*report_zones)(struct gendisk *, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones);
+ unsigned int nr_zones, report_zones_cb cb, void *data);
struct module *owner;
const struct pr_ops *pr_ops;
};
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 399ad8632356..475668c69dbc 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -17,6 +17,7 @@
struct dm_dev;
struct dm_target;
struct dm_table;
+struct dm_report_zones_args;
struct mapped_device;
struct bio_vec;
@@ -93,9 +94,9 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
-typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
- struct blk_zone *zones,
- unsigned int *nr_zones);
+typedef int (*dm_report_zones_fn) (struct dm_target *ti,
+ struct dm_report_zones_args *args,
+ unsigned int nr_zones);
/*
* These iteration functions are typically used to check (and combine)
@@ -422,10 +423,23 @@ struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
-void dm_remap_zone_report(struct dm_target *ti, sector_t start,
- struct blk_zone *zones, unsigned int *nr_zones);
union map_info *dm_get_rq_mapinfo(struct request *rq);
+#ifdef CONFIG_BLK_DEV_ZONED
+struct dm_report_zones_args {
+ struct dm_target *tgt;
+ sector_t next_sector;
+
+ void *orig_data;
+ report_zones_cb orig_cb;
+ unsigned int zone_idx;
+
+ /* must be filled by ->report_zones before calling dm_report_zones_cb */
+ sector_t start;
+};
+int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data);
+#endif /* CONFIG_BLK_DEV_ZONED */
+
/*
* Device mapper functions to parse and create devices specified by the
* parameter "dm-mod.create="
@@ -594,9 +608,6 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
*/
#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
-#define dm_array_too_big(fixed, obj, num) \
- ((num) > (UINT_MAX - (fixed)) / (obj))
-
/*
* Sector offset taken relative to the start of the target instead of
* relative to the start of the device.
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e0d909d35763..dde6dc4492a0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -148,8 +148,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File is opened with O_PATH; almost nothing can be done with it */
#define FMODE_PATH ((__force fmode_t)0x4000)
-/* File needs atomic accesses to f_pos */
-#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
/* Write access to underlying fs */
#define FMODE_WRITER ((__force fmode_t)0x10000)
/* Has read method(s) */
@@ -2632,8 +2630,6 @@ extern void bd_finish_claiming(struct block_device *bdev,
extern void bd_abort_claiming(struct block_device *bdev,
struct block_device *whole, void *holder);
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
-extern int __blkdev_reread_part(struct block_device *bdev);
-extern int blkdev_reread_part(struct block_device *bdev);
#ifdef CONFIG_SYSFS
extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
@@ -2703,8 +2699,6 @@ extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
#ifdef CONFIG_BLOCK
-extern void check_disk_size_change(struct gendisk *disk,
- struct block_device *bdev, bool verbose);
extern int revalidate_disk(struct gendisk *);
extern int check_disk_change(struct block_device *);
extern int __invalidate_device(struct block_device *, bool);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 8b5330dd5ac0..8bb63027e4d6 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -621,9 +621,10 @@ extern void blk_invalidate_devt(dev_t devt);
extern dev_t blk_lookup_devt(const char *name, int partno);
extern char *disk_name (struct gendisk *hd, int partno, char *buf);
+int bdev_disk_changed(struct block_device *bdev, bool invalidate);
+int blk_add_partitions(struct gendisk *disk, struct block_device *bdev);
+int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev);
extern int disk_expand_part_tbl(struct gendisk *disk, int target);
-extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
-extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
int partno, sector_t start,
sector_t len, int flags,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 207e7ee764ce..d3bbfddf616a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -484,6 +484,7 @@ enum hsm_task_states {
};
enum ata_completion_errors {
+ AC_ERR_OK = 0, /* no error */
AC_ERR_DEV = (1 << 0), /* device reported error */
AC_ERR_HSM = (1 << 1), /* host state machine violation */
AC_ERR_TIMEOUT = (1 << 2), /* timeout */
@@ -891,9 +892,9 @@ struct ata_port_operations {
/*
* Command execution
*/
- int (*qc_defer)(struct ata_queued_cmd *qc);
- int (*check_atapi_dma)(struct ata_queued_cmd *qc);
- void (*qc_prep)(struct ata_queued_cmd *qc);
+ int (*qc_defer)(struct ata_queued_cmd *qc);
+ int (*check_atapi_dma)(struct ata_queued_cmd *qc);
+ enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
@@ -1161,7 +1162,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode);
extern const char *ata_mode_string(unsigned long xfer_mask);
extern unsigned long ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@@ -1893,9 +1894,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops;
.sg_tablesize = LIBATA_MAX_PRD, \
.dma_boundary = ATA_DMA_BOUNDARY
-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index fc0b4b19c900..5a4623fc586b 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -22,6 +22,7 @@
#define SNOR_MFR_INTEL CFI_MFR_INTEL
#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */
#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */
+#define SNOR_MFR_ISSI CFI_MFR_PMC
#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
#define SNOR_MFR_SPANSION CFI_MFR_AMD
#define SNOR_MFR_SST CFI_MFR_SST
@@ -133,7 +134,7 @@
#define SR_E_ERR BIT(5)
#define SR_P_ERR BIT(6)
-#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
+#define SR1_QUAD_EN_BIT6 BIT(6)
/* Enhanced Volatile Configuration Register bits */
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
@@ -144,10 +145,8 @@
#define FSR_P_ERR BIT(4) /* Program operation status */
#define FSR_PT_ERR BIT(1) /* Protection error bit */
-/* Configuration Register bits. */
-#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
-
/* Status Register 2 bits. */
+#define SR2_QUAD_EN_BIT1 BIT(1)
#define SR2_QUAD_EN_BIT7 BIT(7)
/* Supported SPI protocols */
@@ -243,6 +242,9 @@ enum spi_nor_option_flags {
SNOR_F_4B_OPCODES = BIT(6),
SNOR_F_HAS_4BAIT = BIT(7),
SNOR_F_HAS_LOCK = BIT(8),
+ SNOR_F_HAS_16BIT_SR = BIT(9),
+ SNOR_F_NO_READ_CR = BIT(10),
+
};
/**
@@ -466,6 +468,34 @@ enum spi_nor_pp_command_index {
struct spi_nor;
/**
+ * struct spi_nor_controller_ops - SPI NOR controller driver specific
+ * operations.
+ * @prepare: [OPTIONAL] do some preparations for the
+ * read/write/erase/lock/unlock operations.
+ * @unprepare: [OPTIONAL] do some post work after the
+ * read/write/erase/lock/unlock operations.
+ * @read_reg: read out the register.
+ * @write_reg: write data to the register.
+ * @read: read data from the SPI NOR.
+ * @write: write data to the SPI NOR.
+ * @erase: erase a sector of the SPI NOR at the offset @offs; if
+ * not provided by the driver, spi-nor will send the erase
+ * opcode via write_reg().
+ */
+struct spi_nor_controller_ops {
+ int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+ void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+ int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len);
+ int (*write_reg)(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len);
+
+ ssize_t (*read)(struct spi_nor *nor, loff_t from, size_t len, u8 *buf);
+ ssize_t (*write)(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf);
+ int (*erase)(struct spi_nor *nor, loff_t offs);
+};
+
+/**
* struct spi_nor_locking_ops - SPI NOR locking methods
* @lock: lock a region of the SPI NOR.
* @unlock: unlock a region of the SPI NOR.
@@ -549,19 +579,7 @@ struct flash_info;
* @read_proto: the SPI protocol for read operations
* @write_proto: the SPI protocol for write operations
* @reg_proto the SPI protocol for read_reg/write_reg/erase operations
- * @prepare: [OPTIONAL] do some preparations for the
- * read/write/erase/lock/unlock operations
- * @unprepare: [OPTIONAL] do some post work after the
- * read/write/erase/lock/unlock operations
- * @read_reg: [DRIVER-SPECIFIC] read out the register
- * @write_reg: [DRIVER-SPECIFIC] write data to the register
- * @read: [DRIVER-SPECIFIC] read data from the SPI NOR
- * @write: [DRIVER-SPECIFIC] write data to the SPI NOR
- * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
- * at the offset @offs; if not provided by the driver,
- * spi-nor will send the erase opcode via write_reg()
- * @clear_sr_bp: [FLASH-SPECIFIC] clears the Block Protection Bits from
- * the SPI NOR Status Register.
+ * @controller_ops: SPI NOR controller driver specific operations.
* @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings.
* The structure includes legacy flash parameters and
* settings that can be overwritten by the spi_nor_fixups
@@ -588,18 +606,8 @@ struct spi_nor {
bool sst_write_second;
u32 flags;
- int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
- void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
- int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
- int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
-
- ssize_t (*read)(struct spi_nor *nor, loff_t from,
- size_t len, u_char *read_buf);
- ssize_t (*write)(struct spi_nor *nor, loff_t to,
- size_t len, const u_char *write_buf);
- int (*erase)(struct spi_nor *nor, loff_t offs);
+ const struct spi_nor_controller_ops *controller_ops;
- int (*clear_sr_bp)(struct spi_nor *nor);
struct spi_nor_flash_parameter params;
void *priv;
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index 067c9fea64fe..e8c30b39bb27 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -4,33 +4,60 @@
*/
/*
- * This file contains definitions relative to FC-NVME r1.14 (16-020vB).
- * The fcnvme_lsdesc_cr_assoc_cmd struct reflects expected r1.16 content.
+ * This file contains definitions relative to FC-NVME-2 r1.06
+ * (T11-2019-00210-v001).
*/
#ifndef _NVME_FC_H
#define _NVME_FC_H 1
+#include <uapi/scsi/fc/fc_fs.h>
-#define NVME_CMD_SCSI_ID 0xFD
+#define NVME_CMD_FORMAT_ID 0xFD
#define NVME_CMD_FC_ID FC_TYPE_NVME
/* FC-NVME Cmd IU Flags */
-#define FCNVME_CMD_FLAGS_DIRMASK 0x03
-#define FCNVME_CMD_FLAGS_WRITE 0x01
-#define FCNVME_CMD_FLAGS_READ 0x02
+enum {
+ FCNVME_CMD_FLAGS_DIRMASK = 0x03,
+ FCNVME_CMD_FLAGS_WRITE = (1 << 0),
+ FCNVME_CMD_FLAGS_READ = (1 << 1),
+
+ FCNVME_CMD_FLAGS_PICWP = (1 << 2),
+};
+
+enum {
+ FCNVME_CMD_CAT_MASK = 0x0F,
+ FCNVME_CMD_CAT_ADMINQ = 0x01,
+ FCNVME_CMD_CAT_CSSMASK = 0x07,
+ FCNVME_CMD_CAT_CSSFLAG = 0x08,
+};
+
+static inline __u8 fccmnd_set_cat_admin(__u8 rsv_cat)
+{
+ return (rsv_cat & ~FCNVME_CMD_CAT_MASK) | FCNVME_CMD_CAT_ADMINQ;
+}
+
+static inline __u8 fccmnd_set_cat_css(__u8 rsv_cat, __u8 css)
+{
+ return (rsv_cat & ~FCNVME_CMD_CAT_MASK) | FCNVME_CMD_CAT_CSSFLAG |
+ (css & FCNVME_CMD_CAT_CSSMASK);
+}
struct nvme_fc_cmd_iu {
- __u8 scsi_id;
+ __u8 format_id;
__u8 fc_id;
__be16 iu_len;
- __u8 rsvd4[3];
+ __u8 rsvd4[2];
+ __u8 rsv_cat;
__u8 flags;
__be64 connection_id;
__be32 csn;
__be32 data_len;
struct nvme_command sqe;
- __be32 rsvd88[2];
+ __u8 dps;
+ __u8 lbads;
+ __be16 ms;
+ __be32 rsvd92;
};
#define NVME_FC_SIZEOF_ZEROS_RSP 12
@@ -38,11 +65,12 @@ struct nvme_fc_cmd_iu {
enum {
FCNVME_SC_SUCCESS = 0,
FCNVME_SC_INVALID_FIELD = 1,
- FCNVME_SC_INVALID_CONNID = 2,
+ /* reserved 2 */
+ FCNVME_SC_ILL_CONN_PARAMS = 3,
};
struct nvme_fc_ersp_iu {
- __u8 status_code;
+ __u8 ersp_result;
__u8 rsvd1;
__be16 iu_len;
__be32 rsn;
@@ -53,14 +81,44 @@ struct nvme_fc_ersp_iu {
};
-/* FC-NVME Link Services */
+#define FCNVME_NVME_SR_OPCODE 0x01
+
+struct nvme_fc_nvme_sr_iu {
+ __u8 fc_id;
+ __u8 opcode;
+ __u8 rsvd2;
+ __u8 retry_rctl;
+ __be32 rsvd4;
+};
+
+
+enum {
+ FCNVME_SRSTAT_ACC = 0x0,
+ FCNVME_SRSTAT_INV_FCID = 0x1,
+ /* reserved 0x2 */
+ FCNVME_SRSTAT_LOGICAL_ERR = 0x3,
+ FCNVME_SRSTAT_INV_QUALIF = 0x4,
+ FCNVME_SRSTAT_UNABL2PERFORM = 0x9,
+};
+
+struct nvme_fc_nvme_sr_rsp_iu {
+ __u8 fc_id;
+ __u8 opcode;
+ __u8 rsvd2;
+ __u8 status;
+ __be32 rsvd4;
+};
+
+
+/* FC-NVME Link Services - LS cmd values (w0 bits 31:24) */
enum {
FCNVME_LS_RSVD = 0,
FCNVME_LS_RJT = 1,
FCNVME_LS_ACC = 2,
- FCNVME_LS_CREATE_ASSOCIATION = 3,
- FCNVME_LS_CREATE_CONNECTION = 4,
- FCNVME_LS_DISCONNECT = 5,
+ FCNVME_LS_CREATE_ASSOCIATION = 3, /* Create Association */
+ FCNVME_LS_CREATE_CONNECTION = 4, /* Create I/O Connection */
+ FCNVME_LS_DISCONNECT_ASSOC = 5, /* Disconnect Association */
+ FCNVME_LS_DISCONNECT_CONN = 6, /* Disconnect Connection */
};
/* FC-NVME Link Service Descriptors */
@@ -117,14 +175,17 @@ enum fcnvme_ls_rjt_reason {
FCNVME_RJT_RC_UNSUP = 0x0b,
/* command not supported */
- FCNVME_RJT_RC_INPROG = 0x0e,
- /* command already in progress */
-
FCNVME_RJT_RC_INV_ASSOC = 0x40,
- /* Invalid Association ID*/
+ /* Invalid Association ID */
FCNVME_RJT_RC_INV_CONN = 0x41,
- /* Invalid Connection ID*/
+ /* Invalid Connection ID */
+
+ FCNVME_RJT_RC_INV_PARAM = 0x42,
+ /* Invalid Parameters */
+
+ FCNVME_RJT_RC_INSUF_RES = 0x43,
+ /* Insufficient Resources */
FCNVME_RJT_RC_VENDOR = 0xff,
/* vendor specific error */
@@ -138,14 +199,32 @@ enum fcnvme_ls_rjt_explan {
FCNVME_RJT_EXP_OXID_RXID = 0x17,
/* invalid OX_ID-RX_ID combination */
- FCNVME_RJT_EXP_INSUF_RES = 0x29,
- /* insufficient resources */
-
FCNVME_RJT_EXP_UNAB_DATA = 0x2a,
/* unable to supply requested data */
FCNVME_RJT_EXP_INV_LEN = 0x2d,
/* Invalid payload length */
+
+ FCNVME_RJT_EXP_INV_ERSP_RAT = 0x40,
+ /* Invalid NVMe_ERSP Ratio */
+
+ FCNVME_RJT_EXP_INV_CTLR_ID = 0x41,
+ /* Invalid Controller ID */
+
+ FCNVME_RJT_EXP_INV_QUEUE_ID = 0x42,
+ /* Invalid Queue ID */
+
+ FCNVME_RJT_EXP_INV_SQSIZE = 0x43,
+ /* Invalid Submission Queue Size */
+
+ FCNVME_RJT_EXP_INV_HOSTID = 0x44,
+ /* Invalid HOST ID */
+
+ FCNVME_RJT_EXP_INV_HOSTNQN = 0x45,
+ /* Invalid HOSTNQN */
+
+ FCNVME_RJT_EXP_INV_SUBNQN = 0x46,
+ /* Invalid SUBNQN */
};
/* FCNVME_LSDESC_RJT */
@@ -209,21 +288,11 @@ struct fcnvme_lsdesc_cr_conn_cmd {
__be32 rsvd52;
};
-/* Disconnect Scope Values */
-enum {
- FCNVME_DISCONN_ASSOCIATION = 0,
- FCNVME_DISCONN_CONNECTION = 1,
-};
-
/* FCNVME_LSDESC_DISCONN_CMD */
struct fcnvme_lsdesc_disconn_cmd {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
- u8 rsvd8[3];
- /* note: scope is really a 1 bit field */
- u8 scope; /* FCNVME_DISCONN_xxx */
- __be32 rsvd12;
- __be64 id;
+ __be32 rsvd8[4];
};
/* FCNVME_LSDESC_CONN_ID */
@@ -242,9 +311,14 @@ struct fcnvme_lsdesc_assoc_id {
/* r_ctl values */
enum {
- FCNVME_RS_RCTL_DATA = 1,
- FCNVME_RS_RCTL_XFER_RDY = 5,
- FCNVME_RS_RCTL_RSP = 8,
+ FCNVME_RS_RCTL_CMND = 0x6,
+ FCNVME_RS_RCTL_DATA = 0x1,
+ FCNVME_RS_RCTL_CONF = 0x3,
+ FCNVME_RS_RCTL_SR = 0x9,
+ FCNVME_RS_RCTL_XFER_RDY = 0x5,
+ FCNVME_RS_RCTL_RSP = 0x7,
+ FCNVME_RS_RCTL_ERSP = 0x8,
+ FCNVME_RS_RCTL_SR_RSP = 0xA,
};
@@ -264,7 +338,10 @@ struct fcnvme_ls_acc_hdr {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_rqst rqst;
- /* Followed by cmd-specific ACC descriptors, see next definitions */
+ /*
+ * Followed by cmd-specific ACCEPT descriptors, see xxx_acc
+ * definitions below
+ */
};
/* FCNVME_LS_CREATE_ASSOCIATION */
@@ -302,25 +379,39 @@ struct fcnvme_ls_cr_conn_acc {
struct fcnvme_lsdesc_conn_id connectid;
};
-/* FCNVME_LS_DISCONNECT */
-struct fcnvme_ls_disconnect_rqst {
+/* FCNVME_LS_DISCONNECT_ASSOC */
+struct fcnvme_ls_disconnect_assoc_rqst {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_assoc_id associd;
struct fcnvme_lsdesc_disconn_cmd discon_cmd;
};
-struct fcnvme_ls_disconnect_acc {
+struct fcnvme_ls_disconnect_assoc_acc {
+ struct fcnvme_ls_acc_hdr hdr;
+};
+
+
+/* FCNVME_LS_DISCONNECT_CONN */
+struct fcnvme_ls_disconnect_conn_rqst {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_assoc_id associd;
+ struct fcnvme_lsdesc_disconn_cmd connectid;
+};
+
+struct fcnvme_ls_disconnect_conn_acc {
struct fcnvme_ls_acc_hdr hdr;
};
/*
- * Yet to be defined in FC-NVME:
+ * Default R_A_TOV is pulled in from fc_fs.h but needs conversion
+ * from ms to seconds for our use.
*/
-#define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */
-#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */
-#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */
+#define FC_TWO_TIMES_R_A_TOV (2 * (FC_DEF_R_A_TOV / 1000))
+#define NVME_FC_LS_TIMEOUT_SEC FC_TWO_TIMES_R_A_TOV
+#define NVME_FC_TGTOP_TIMEOUT_SEC FC_TWO_TIMES_R_A_TOV
/*
* TRADDR string must be of form "nn-<16hexdigits>:pn-<16hexdigits>"
@@ -328,6 +419,7 @@ struct fcnvme_ls_disconnect_acc {
* infront of the <16hexdigits>. Without is considered the "min" string
* and with is considered the "max" string. The hexdigits may be upper
* or lower case.
+ * Note: FC-NVME-2 standard requires a "0x" prefix.
*/
#define NVME_FC_TRADDR_NNLEN 3 /* "?n-" */
#define NVME_FC_TRADDR_OXNNLEN 5 /* "?n-0x" */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index f61d6906e59d..3d5189f46cb1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -107,8 +107,22 @@ enum {
NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
- NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
+ NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
+ NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */
+ NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */
+ NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
+ * Location
+ */
+ NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
+ NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
+ NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
+ NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity
+ * Buffer Size
+ */
+ NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained
+ * Write Throughput
+ */
NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
};
@@ -295,6 +309,14 @@ enum {
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
+ NVME_CTRL_CTRATT_128_ID = 1 << 0,
+ NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
+ NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
+ NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
+ NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
+ NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
+ NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
+ NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
};
struct nvme_lbaf {
@@ -352,6 +374,9 @@ enum {
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
NVME_ID_CNS_CTRL_LIST = 0x13,
+ NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
+ NVME_ID_CNS_NS_GRANULARITY = 0x16,
+ NVME_ID_CNS_UUID_LIST = 0x17,
};
enum {
@@ -409,7 +434,8 @@ struct nvme_smart_log {
__u8 avail_spare;
__u8 spare_thresh;
__u8 percent_used;
- __u8 rsvd6[26];
+ __u8 endu_grp_crit_warn_sumry;
+ __u8 rsvd7[25];
__u8 data_units_read[16];
__u8 data_units_written[16];
__u8 host_reads[16];
@@ -423,7 +449,11 @@ struct nvme_smart_log {
__le32 warning_temp_time;
__le32 critical_comp_time;
__le16 temp_sensor[8];
- __u8 rsvd216[296];
+ __le32 thm_temp1_trans_count;
+ __le32 thm_temp2_trans_count;
+ __le32 thm_temp1_total_time;
+ __le32 thm_temp2_total_time;
+ __u8 rsvd232[280];
};
struct nvme_fw_slot_info_log {
@@ -440,6 +470,7 @@ enum {
NVME_CMD_EFFECTS_NIC = 1 << 3,
NVME_CMD_EFFECTS_CCC = 1 << 4,
NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
+ NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
};
struct nvme_effects_log {
@@ -563,6 +594,7 @@ enum nvme_opcode {
nvme_cmd_compare = 0x05,
nvme_cmd_write_zeroes = 0x08,
nvme_cmd_dsm = 0x09,
+ nvme_cmd_verify = 0x0c,
nvme_cmd_resv_register = 0x0d,
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
@@ -772,6 +804,12 @@ struct nvme_write_zeroes_cmd {
/* Features */
+enum {
+ NVME_TEMP_THRESH_MASK = 0xffff,
+ NVME_TEMP_THRESH_SELECT_SHIFT = 16,
+ NVME_TEMP_THRESH_TYPE_UNDER = 0x100000,
+};
+
struct nvme_feat_auto_pst {
__le64 entries[32];
};
@@ -806,10 +844,14 @@ enum nvme_admin_opcode {
nvme_admin_ns_mgmt = 0x0d,
nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11,
+ nvme_admin_dev_self_test = 0x14,
nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
nvme_admin_directive_send = 0x19,
nvme_admin_directive_recv = 0x1a,
+ nvme_admin_virtual_mgmt = 0x1c,
+ nvme_admin_nvme_mi_send = 0x1d,
+ nvme_admin_nvme_mi_recv = 0x1e,
nvme_admin_dbbuf = 0x7C,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
@@ -873,6 +915,7 @@ enum {
NVME_FEAT_PLM_CONFIG = 0x13,
NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_HOST_BEHAVIOR = 0x16,
+ NVME_FEAT_SANITIZE = 0x17,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -883,6 +926,10 @@ enum {
NVME_LOG_FW_SLOT = 0x03,
NVME_LOG_CHANGED_NS = 0x04,
NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_DEVICE_SELF_TEST = 0x06,
+ NVME_LOG_TELEMETRY_HOST = 0x07,
+ NVME_LOG_TELEMETRY_CTRL = 0x08,
+ NVME_LOG_ENDURANCE_GROUP = 0x09,
NVME_LOG_ANA = 0x0c,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
@@ -1290,7 +1337,11 @@ enum {
NVME_SC_SGL_INVALID_OFFSET = 0x16,
NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
+ NVME_SC_SANITIZE_FAILED = 0x1C,
+ NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
+
NVME_SC_NS_WRITE_PROTECTED = 0x20,
+ NVME_SC_CMD_INTERRUPTED = 0x21,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
@@ -1328,6 +1379,8 @@ enum {
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
+ NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
+ NVME_SC_PMR_SAN_PROHIBITED = 0x123,
/*
* I/O Command Set Specific - NVM commands:
@@ -1368,6 +1421,7 @@ enum {
NVME_SC_ANA_INACCESSIBLE = 0x302,
NVME_SC_ANA_TRANSITION = 0x303,
NVME_SC_HOST_PATH_ERROR = 0x370,
+ NVME_SC_HOST_ABORTED_CMD = 0x371,
NVME_SC_CRD = 0x1800,
NVME_SC_DNR = 0x4000,
diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h
index ebb4f332588b..7f53a5c6f35e 100644
--- a/include/linux/platform_data/intel-spi.h
+++ b/include/linux/platform_data/intel-spi.h
@@ -13,6 +13,7 @@ enum intel_spi_type {
INTEL_SPI_BYT = 1,
INTEL_SPI_LPT,
INTEL_SPI_BXT,
+ INTEL_SPI_CNL,
};
/**
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index a986ac12a848..e40d019c3d9d 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -216,15 +216,6 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
*/
bool sbitmap_any_bit_set(const struct sbitmap *sb);
-/**
- * sbitmap_any_bit_clear() - Check for an unset bit in a &struct
- * sbitmap.
- * @sb: Bitmap to check.
- *
- * Return: true if any bit in the bitmap is clear, false otherwise.
- */
-bool sbitmap_any_bit_clear(const struct sbitmap *sb);
-
#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 67a1d86981a9..6666e25606b7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1468,6 +1468,7 @@ extern struct pid *cad_pid;
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
+#define PF_IO_WORKER 0x20000000 /* Task is an IO worker */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 53c28d750a45..1ac0d712a9c3 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -42,6 +42,7 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_PSID_REVERT_TPR:
case IOC_OPAL_MBR_DONE:
case IOC_OPAL_WRITE_SHADOW_MBR:
+ case IOC_OPAL_GENERIC_TABLE_RW:
return true;
}
return false;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 64a395c7f689..8688f7adfda7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4169,12 +4169,18 @@ static inline void skb_ext_reset(struct sk_buff *skb)
skb->active_extensions = 0;
}
}
+
+static inline bool skb_has_extensions(struct sk_buff *skb)
+{
+ return unlikely(skb->active_extensions);
+}
#else
static inline void skb_ext_put(struct sk_buff *skb) {}
static inline void skb_ext_reset(struct sk_buff *skb) {}
static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
+static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
#endif /* CONFIG_SKB_EXTENSIONS */
static inline void nf_reset_ct(struct sk_buff *skb)
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 4049d9755cf1..09c32a21555b 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -392,6 +392,9 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
extern int __sys_sendto(int fd, void __user *buff, size_t len,
unsigned int flags, struct sockaddr __user *addr,
int addr_len);
+extern int __sys_accept4_file(struct file *file, unsigned file_flags,
+ struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags);
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_socket(int family, int type, int protocol);
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 53c0ea9ec9df..0d6e949ba315 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -21,6 +21,7 @@
#include <linux/acpi.h>
#include <linux/cdev.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
#include <crypto/hash_info.h>
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
@@ -67,6 +68,8 @@ struct tpm_class_ops {
u8 (*status) (struct tpm_chip *chip);
void (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
+ void (*update_durations)(struct tpm_chip *chip,
+ unsigned long *duration_cap);
int (*go_idle)(struct tpm_chip *chip);
int (*cmd_ready)(struct tpm_chip *chip);
int (*request_locality)(struct tpm_chip *chip, int loc);
@@ -161,6 +164,235 @@ struct tpm_chip {
int locality;
};
+#define TPM_HEADER_SIZE 10
+
+enum tpm2_const {
+ TPM2_PLATFORM_PCR = 24,
+ TPM2_PCR_SELECT_MIN = ((TPM2_PLATFORM_PCR + 7) / 8),
+};
+
+enum tpm2_timeouts {
+ TPM2_TIMEOUT_A = 750,
+ TPM2_TIMEOUT_B = 2000,
+ TPM2_TIMEOUT_C = 200,
+ TPM2_TIMEOUT_D = 30,
+ TPM2_DURATION_SHORT = 20,
+ TPM2_DURATION_MEDIUM = 750,
+ TPM2_DURATION_LONG = 2000,
+ TPM2_DURATION_LONG_LONG = 300000,
+ TPM2_DURATION_DEFAULT = 120000,
+};
+
+enum tpm2_structures {
+ TPM2_ST_NO_SESSIONS = 0x8001,
+ TPM2_ST_SESSIONS = 0x8002,
+};
+
+/* Indicates from what layer of the software stack the error comes from */
+#define TSS2_RC_LAYER_SHIFT 16
+#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
+
+enum tpm2_return_codes {
+ TPM2_RC_SUCCESS = 0x0000,
+ TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
+ TPM2_RC_HANDLE = 0x008B,
+ TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
+ TPM2_RC_FAILURE = 0x0101,
+ TPM2_RC_DISABLED = 0x0120,
+ TPM2_RC_COMMAND_CODE = 0x0143,
+ TPM2_RC_TESTING = 0x090A, /* RC_WARN */
+ TPM2_RC_REFERENCE_H0 = 0x0910,
+ TPM2_RC_RETRY = 0x0922,
+};
+
+enum tpm2_command_codes {
+ TPM2_CC_FIRST = 0x011F,
+ TPM2_CC_HIERARCHY_CONTROL = 0x0121,
+ TPM2_CC_HIERARCHY_CHANGE_AUTH = 0x0129,
+ TPM2_CC_CREATE_PRIMARY = 0x0131,
+ TPM2_CC_SEQUENCE_COMPLETE = 0x013E,
+ TPM2_CC_SELF_TEST = 0x0143,
+ TPM2_CC_STARTUP = 0x0144,
+ TPM2_CC_SHUTDOWN = 0x0145,
+ TPM2_CC_NV_READ = 0x014E,
+ TPM2_CC_CREATE = 0x0153,
+ TPM2_CC_LOAD = 0x0157,
+ TPM2_CC_SEQUENCE_UPDATE = 0x015C,
+ TPM2_CC_UNSEAL = 0x015E,
+ TPM2_CC_CONTEXT_LOAD = 0x0161,
+ TPM2_CC_CONTEXT_SAVE = 0x0162,
+ TPM2_CC_FLUSH_CONTEXT = 0x0165,
+ TPM2_CC_VERIFY_SIGNATURE = 0x0177,
+ TPM2_CC_GET_CAPABILITY = 0x017A,
+ TPM2_CC_GET_RANDOM = 0x017B,
+ TPM2_CC_PCR_READ = 0x017E,
+ TPM2_CC_PCR_EXTEND = 0x0182,
+ TPM2_CC_EVENT_SEQUENCE_COMPLETE = 0x0185,
+ TPM2_CC_HASH_SEQUENCE_START = 0x0186,
+ TPM2_CC_CREATE_LOADED = 0x0191,
+ TPM2_CC_LAST = 0x0193, /* Spec 1.36 */
+};
+
+enum tpm2_permanent_handles {
+ TPM2_RS_PW = 0x40000009,
+};
+
+enum tpm2_capabilities {
+ TPM2_CAP_HANDLES = 1,
+ TPM2_CAP_COMMANDS = 2,
+ TPM2_CAP_PCRS = 5,
+ TPM2_CAP_TPM_PROPERTIES = 6,
+};
+
+enum tpm2_properties {
+ TPM_PT_TOTAL_COMMANDS = 0x0129,
+};
+
+enum tpm2_startup_types {
+ TPM2_SU_CLEAR = 0x0000,
+ TPM2_SU_STATE = 0x0001,
+};
+
+enum tpm2_cc_attrs {
+ TPM2_CC_ATTR_CHANDLES = 25,
+ TPM2_CC_ATTR_RHANDLE = 28,
+};
+
+#define TPM_VID_INTEL 0x8086
+#define TPM_VID_WINBOND 0x1050
+#define TPM_VID_STM 0x104A
+
+enum tpm_chip_flags {
+ TPM_CHIP_FLAG_TPM2 = BIT(1),
+ TPM_CHIP_FLAG_IRQ = BIT(2),
+ TPM_CHIP_FLAG_VIRTUAL = BIT(3),
+ TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
+ TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
+ TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6),
+};
+
+#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
+
+struct tpm_header {
+ __be16 tag;
+ __be32 length;
+ union {
+ __be32 ordinal;
+ __be32 return_code;
+ };
+} __packed;
+
+/* A string buffer type for constructing TPM commands. This is based on the
+ * ideas of string buffer code in security/keys/trusted.h but is heap based
+ * in order to keep the stack usage minimal.
+ */
+
+enum tpm_buf_flags {
+ TPM_BUF_OVERFLOW = BIT(0),
+};
+
+struct tpm_buf {
+ unsigned int flags;
+ u8 *data;
+};
+
+enum tpm2_object_attributes {
+ TPM2_OA_USER_WITH_AUTH = BIT(6),
+};
+
+enum tpm2_session_attributes {
+ TPM2_SA_CONTINUE_SESSION = BIT(0),
+};
+
+struct tpm2_hash {
+ unsigned int crypto_id;
+ unsigned int tpm_id;
+};
+
+static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ head->tag = cpu_to_be16(tag);
+ head->length = cpu_to_be32(sizeof(*head));
+ head->ordinal = cpu_to_be32(ordinal);
+}
+
+static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+ buf->data = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!buf->data)
+ return -ENOMEM;
+
+ buf->flags = 0;
+ tpm_buf_reset(buf, tag, ordinal);
+ return 0;
+}
+
+static inline void tpm_buf_destroy(struct tpm_buf *buf)
+{
+ free_page((unsigned long)buf->data);
+}
+
+static inline u32 tpm_buf_length(struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ return be32_to_cpu(head->length);
+}
+
+static inline u16 tpm_buf_tag(struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ return be16_to_cpu(head->tag);
+}
+
+static inline void tpm_buf_append(struct tpm_buf *buf,
+ const unsigned char *new_data,
+ unsigned int new_len)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ u32 len = tpm_buf_length(buf);
+
+ /* Return silently if overflow has already happened. */
+ if (buf->flags & TPM_BUF_OVERFLOW)
+ return;
+
+ if ((len + new_len) > PAGE_SIZE) {
+ WARN(1, "tpm_buf: overflow\n");
+ buf->flags |= TPM_BUF_OVERFLOW;
+ return;
+ }
+
+ memcpy(&buf->data[len], new_data, new_len);
+ head->length = cpu_to_be32(len + new_len);
+}
+
+static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
+{
+ tpm_buf_append(buf, &value, 1);
+}
+
+static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
+{
+ __be16 value2 = cpu_to_be16(value);
+
+ tpm_buf_append(buf, (u8 *) &value2, 2);
+}
+
+static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
+{
+ __be32 value2 = cpu_to_be32(value);
+
+ tpm_buf_append(buf, (u8 *) &value2, 4);
+}
+
+static inline u32 tpm2_rc_value(u32 rc)
+{
+ return (rc & BIT(7)) ? rc & 0xff : rc;
+}
+
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
extern int tpm_is_tpm2(struct tpm_chip *chip);
@@ -170,12 +402,6 @@ extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digests);
extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
-extern int tpm_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
-extern int tpm_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
extern struct tpm_chip *tpm_default_chip(void);
#else
static inline int tpm_is_tpm2(struct tpm_chip *chip)
@@ -204,18 +430,6 @@ static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max)
return -ENODEV;
}
-static inline int tpm_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- return -ENODEV;
-}
-static inline int tpm_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- return -ENODEV;
-}
static inline struct tpm_chip *tpm_default_chip(void)
{
return NULL;
diff --git a/include/net/tls.h b/include/net/tls.h
index 794e297483ea..f4ad831eaa02 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -356,6 +356,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
void tls_sw_strparser_done(struct tls_context *tls_ctx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
new file mode 100644
index 000000000000..72a4d0174b02
--- /dev/null
+++ b/include/trace/events/io_uring.h
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM io_uring
+
+#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IO_URING_H
+
+#include <linux/tracepoint.h>
+
+struct io_wq_work;
+
+/**
+ * io_uring_create - called after a new io_uring context was prepared
+ *
+ * @fd: corresponding file descriptor
+ * @ctx: pointer to a ring context structure
+ * @sq_entries: actual SQ size
+ * @cq_entries: actual CQ size
+ * @flags: SQ ring flags, provided to io_uring_setup(2)
+ *
+ * Allows to trace io_uring creation and provide pointer to a context, that can
+ * be used later to find correlated events.
+ */
+TRACE_EVENT(io_uring_create,
+
+ TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
+
+ TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
+
+ TP_STRUCT__entry (
+ __field( int, fd )
+ __field( void *, ctx )
+ __field( u32, sq_entries )
+ __field( u32, cq_entries )
+ __field( u32, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->fd = fd;
+ __entry->ctx = ctx;
+ __entry->sq_entries = sq_entries;
+ __entry->cq_entries = cq_entries;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
+ __entry->ctx, __entry->fd, __entry->sq_entries,
+ __entry->cq_entries, __entry->flags)
+);
+
+/**
+ * io_uring_register - called after a buffer/file/eventfd was succesfully
+ * registered for a ring
+ *
+ * @ctx: pointer to a ring context structure
+ * @opcode: describes which operation to perform
+ * @nr_user_files: number of registered files
+ * @nr_user_bufs: number of registered buffers
+ * @cq_ev_fd: whether eventfs registered or not
+ * @ret: return code
+ *
+ * Allows to trace fixed files/buffers/eventfds, that could be registered to
+ * avoid an overhead of getting references to them for every operation. This
+ * event, together with io_uring_file_get, can provide a full picture of how
+ * much overhead one can reduce via fixing.
+ */
+TRACE_EVENT(io_uring_register,
+
+ TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
+ unsigned nr_bufs, bool eventfd, long ret),
+
+ TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( unsigned, opcode )
+ __field( unsigned, nr_files )
+ __field( unsigned, nr_bufs )
+ __field( bool, eventfd )
+ __field( long, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->opcode = opcode;
+ __entry->nr_files = nr_files;
+ __entry->nr_bufs = nr_bufs;
+ __entry->eventfd = eventfd;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
+ "eventfd %d, ret %ld",
+ __entry->ctx, __entry->opcode, __entry->nr_files,
+ __entry->nr_bufs, __entry->eventfd, __entry->ret)
+);
+
+/**
+ * io_uring_file_get - called before getting references to an SQE file
+ *
+ * @ctx: pointer to a ring context structure
+ * @fd: SQE file descriptor
+ *
+ * Allows to trace out how often an SQE file reference is obtained, which can
+ * help figuring out if it makes sense to use fixed files, or check that fixed
+ * files are used correctly.
+ */
+TRACE_EVENT(io_uring_file_get,
+
+ TP_PROTO(void *ctx, int fd),
+
+ TP_ARGS(ctx, fd),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( int, fd )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->fd = fd;
+ ),
+
+ TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
+);
+
+/**
+ * io_uring_queue_async_work - called before submitting a new async work
+ *
+ * @ctx: pointer to a ring context structure
+ * @hashed: type of workqueue, hashed or normal
+ * @req: pointer to a submitted request
+ * @work: pointer to a submitted io_wq_work
+ *
+ * Allows to trace asynchronous work submission.
+ */
+TRACE_EVENT(io_uring_queue_async_work,
+
+ TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
+ unsigned int flags),
+
+ TP_ARGS(ctx, rw, req, work, flags),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( int, rw )
+ __field( void *, req )
+ __field( struct io_wq_work *, work )
+ __field( unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->rw = rw;
+ __entry->req = req;
+ __entry->work = work;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
+ __entry->ctx, __entry->req, __entry->flags,
+ __entry->rw ? "hashed" : "normal", __entry->work)
+);
+
+/**
+ * io_uring_defer_list - called before the io_uring work added into defer_list
+ *
+ * @ctx: pointer to a ring context structure
+ * @req: pointer to a deferred request
+ * @shadow: whether request is shadow or not
+ *
+ * Allows to track deferred requests, to get an insight about what requests are
+ * not started immediately.
+ */
+TRACE_EVENT(io_uring_defer,
+
+ TP_PROTO(void *ctx, void *req, bool shadow),
+
+ TP_ARGS(ctx, req, shadow),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( bool, shadow )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->req = req;
+ __entry->shadow = shadow;
+ ),
+
+ TP_printk("ring %p, request %p%s", __entry->ctx, __entry->req,
+ __entry->shadow ? ", shadow": "")
+);
+
+/**
+ * io_uring_link - called before the io_uring request added into link_list of
+ * another request
+ *
+ * @ctx: pointer to a ring context structure
+ * @req: pointer to a linked request
+ * @target_req: pointer to a previous request, that would contain @req
+ *
+ * Allows to track linked requests, to understand dependencies between requests
+ * and how does it influence their execution flow.
+ */
+TRACE_EVENT(io_uring_link,
+
+ TP_PROTO(void *ctx, void *req, void *target_req),
+
+ TP_ARGS(ctx, req, target_req),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( void *, target_req )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->req = req;
+ __entry->target_req = target_req;
+ ),
+
+ TP_printk("ring %p, request %p linked after %p",
+ __entry->ctx, __entry->req, __entry->target_req)
+);
+
+/**
+ * io_uring_cqring_wait - called before start waiting for an available CQE
+ *
+ * @ctx: pointer to a ring context structure
+ * @min_events: minimal number of events to wait for
+ *
+ * Allows to track waiting for CQE, so that we can e.g. troubleshoot
+ * situations, when an application wants to wait for an event, that never
+ * comes.
+ */
+TRACE_EVENT(io_uring_cqring_wait,
+
+ TP_PROTO(void *ctx, int min_events),
+
+ TP_ARGS(ctx, min_events),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( int, min_events )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->min_events = min_events;
+ ),
+
+ TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
+);
+
+/**
+ * io_uring_fail_link - called before failing a linked request
+ *
+ * @req: request, which links were cancelled
+ * @link: cancelled link
+ *
+ * Allows to track linked requests cancellation, to see not only that some work
+ * was cancelled, but also which request was the reason.
+ */
+TRACE_EVENT(io_uring_fail_link,
+
+ TP_PROTO(void *req, void *link),
+
+ TP_ARGS(req, link),
+
+ TP_STRUCT__entry (
+ __field( void *, req )
+ __field( void *, link )
+ ),
+
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->link = link;
+ ),
+
+ TP_printk("request %p, link %p", __entry->req, __entry->link)
+);
+
+/**
+ * io_uring_complete - called when completing an SQE
+ *
+ * @ctx: pointer to a ring context structure
+ * @user_data: user data associated with the request
+ * @res: result of the request
+ *
+ */
+TRACE_EVENT(io_uring_complete,
+
+ TP_PROTO(void *ctx, u64 user_data, long res),
+
+ TP_ARGS(ctx, user_data, res),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u64, user_data )
+ __field( long, res )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->user_data = user_data;
+ __entry->res = res;
+ ),
+
+ TP_printk("ring %p, user_data 0x%llx, result %ld",
+ __entry->ctx, (unsigned long long)__entry->user_data,
+ __entry->res)
+);
+
+
+/**
+ * io_uring_submit_sqe - called before submitting one SQE
+ *
+ * @ctx: pointer to a ring context structure
+ * @user_data: user data associated with the request
+ * @force_nonblock: whether a context blocking or not
+ * @sq_thread: true if sq_thread has submitted this SQE
+ *
+ * Allows to track SQE submitting, to understand what was the source of it, SQ
+ * thread or io_uring_enter call.
+ */
+TRACE_EVENT(io_uring_submit_sqe,
+
+ TP_PROTO(void *ctx, u64 user_data, bool force_nonblock, bool sq_thread),
+
+ TP_ARGS(ctx, user_data, force_nonblock, sq_thread),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u64, user_data )
+ __field( bool, force_nonblock )
+ __field( bool, sq_thread )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->user_data = user_data;
+ __entry->force_nonblock = force_nonblock;
+ __entry->sq_thread = sq_thread;
+ ),
+
+ TP_printk("ring %p, user data 0x%llx, non block %d, sq_thread %d",
+ __entry->ctx, (unsigned long long) __entry->user_data,
+ __entry->force_nonblock, __entry->sq_thread)
+);
+
+#endif /* _TRACE_IO_URING_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
index b048694070e2..37342a13c9cb 100644
--- a/include/trace/events/wbt.h
+++ b/include/trace/events/wbt.h
@@ -33,7 +33,8 @@ TRACE_EVENT(wbt_stat,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->rmean = stat[0].mean;
__entry->rmin = stat[0].min;
__entry->rmax = stat[0].max;
@@ -67,7 +68,8 @@ TRACE_EVENT(wbt_lat,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->lat = div_u64(lat, 1000);
),
@@ -103,7 +105,8 @@ TRACE_EVENT(wbt_step,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->msg = msg;
__entry->step = step;
__entry->window = div_u64(window, 1000);
@@ -138,7 +141,8 @@ TRACE_EVENT(wbt_timer,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->status = status;
__entry->step = step;
__entry->inflight = inflight;
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 498eec813494..0cdef67135f0 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -120,9 +120,11 @@ struct blk_zone_report {
};
/**
- * struct blk_zone_range - BLKRESETZONE ioctl request
- * @sector: starting sector of the first zone to issue reset write pointer
- * @nr_sectors: Total number of sectors of 1 or more zones to reset
+ * struct blk_zone_range - BLKRESETZONE/BLKOPENZONE/
+ * BLKCLOSEZONE/BLKFINISHZONE ioctl
+ * requests
+ * @sector: Starting sector of the first zone to operate on.
+ * @nr_sectors: Total number of sectors of all zones to operate on.
*/
struct blk_zone_range {
__u64 sector;
@@ -139,10 +141,19 @@ struct blk_zone_range {
* sector range. The sector range must be zone aligned.
* @BLKGETZONESZ: Get the device zone size in number of 512 B sectors.
* @BLKGETNRZONES: Get the total number of zones of the device.
+ * @BLKOPENZONE: Open the zones in the specified sector range.
+ * The 512 B sector range must be zone aligned.
+ * @BLKCLOSEZONE: Close the zones in the specified sector range.
+ * The 512 B sector range must be zone aligned.
+ * @BLKFINISHZONE: Mark the zones as full in the specified sector range.
+ * The 512 B sector range must be zone aligned.
*/
#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
#define BLKGETZONESZ _IOR(0x12, 132, __u32)
#define BLKGETNRZONES _IOR(0x12, 133, __u32)
+#define BLKOPENZONE _IOW(0x12, 134, struct blk_zone_range)
+#define BLKCLOSEZONE _IOW(0x12, 135, struct blk_zone_range)
+#define BLKFINISHZONE _IOW(0x12, 136, struct blk_zone_range)
#endif /* _UAPI_BLKZONED_H */
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 1d338357df8a..1f97b33c840e 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -58,7 +58,7 @@
* Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be
* used to clear any hints previously set.
*/
-#define RWF_WRITE_LIFE_NOT_SET 0
+#define RWH_WRITE_LIFE_NOT_SET 0
#define RWH_WRITE_LIFE_NONE 1
#define RWH_WRITE_LIFE_SHORT 2
#define RWH_WRITE_LIFE_MEDIUM 3
@@ -66,6 +66,13 @@
#define RWH_WRITE_LIFE_EXTREME 5
/*
+ * The originally introduced spelling is remained from the first
+ * versions of the patch set that introduced the feature, see commit
+ * v4.13-rc1~212^2~51.
+ */
+#define RWF_WRITE_LIFE_NOT_SET RWH_WRITE_LIFE_NOT_SET
+
+/*
* Types of directory notifications that may be requested.
*/
#define DN_ACCESS 0x00000001 /* File accessed */
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index ea57526a5b89..2a1569211d87 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -19,7 +19,10 @@ struct io_uring_sqe {
__u8 flags; /* IOSQE_ flags */
__u16 ioprio; /* ioprio for the request */
__s32 fd; /* file descriptor to do IO on */
- __u64 off; /* offset into file */
+ union {
+ __u64 off; /* offset into file */
+ __u64 addr2;
+ };
__u64 addr; /* pointer to buffer or iovecs */
__u32 len; /* buffer size or number of iovecs */
union {
@@ -29,6 +32,8 @@ struct io_uring_sqe {
__u32 sync_range_flags;
__u32 msg_flags;
__u32 timeout_flags;
+ __u32 accept_flags;
+ __u32 cancel_flags;
};
__u64 user_data; /* data to be passed back at completion time */
union {
@@ -50,6 +55,7 @@ struct io_uring_sqe {
#define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */
#define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */
#define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */
+#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
#define IORING_OP_NOP 0
#define IORING_OP_READV 1
@@ -63,6 +69,10 @@ struct io_uring_sqe {
#define IORING_OP_SENDMSG 9
#define IORING_OP_RECVMSG 10
#define IORING_OP_TIMEOUT 11
+#define IORING_OP_TIMEOUT_REMOVE 12
+#define IORING_OP_ACCEPT 13
+#define IORING_OP_ASYNC_CANCEL 14
+#define IORING_OP_LINK_TIMEOUT 15
/*
* sqe->fsync_flags
@@ -70,6 +80,11 @@ struct io_uring_sqe {
#define IORING_FSYNC_DATASYNC (1U << 0)
/*
+ * sqe->timeout_flags
+ */
+#define IORING_TIMEOUT_ABS (1U << 0)
+
+/*
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
@@ -140,6 +155,7 @@ struct io_uring_params {
* io_uring_params->features flags
*/
#define IORING_FEAT_SINGLE_MMAP (1U << 0)
+#define IORING_FEAT_NODROP (1U << 1)
/*
* io_uring_register(2) opcodes and arguments
@@ -150,5 +166,11 @@ struct io_uring_params {
#define IORING_UNREGISTER_FILES 3
#define IORING_REGISTER_EVENTFD 4
#define IORING_UNREGISTER_EVENTFD 5
+#define IORING_REGISTER_FILES_UPDATE 6
+
+struct io_uring_files_update {
+ __u32 offset;
+ __s32 *fds;
+};
#endif
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index c6d035fa1b6c..6f5af1a84213 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -113,6 +113,25 @@ struct opal_shadow_mbr {
__u64 size;
};
+/* Opal table operations */
+enum opal_table_ops {
+ OPAL_READ_TABLE,
+ OPAL_WRITE_TABLE,
+};
+
+#define OPAL_UID_LENGTH 8
+struct opal_read_write_table {
+ struct opal_key key;
+ const __u64 data;
+ const __u8 table_uid[OPAL_UID_LENGTH];
+ __u64 offset;
+ __u64 size;
+#define OPAL_TABLE_READ (1 << OPAL_READ_TABLE)
+#define OPAL_TABLE_WRITE (1 << OPAL_WRITE_TABLE)
+ __u64 flags;
+ __u64 priv;
+};
+
#define IOC_OPAL_SAVE _IOW('p', 220, struct opal_lock_unlock)
#define IOC_OPAL_LOCK_UNLOCK _IOW('p', 221, struct opal_lock_unlock)
#define IOC_OPAL_TAKE_OWNERSHIP _IOW('p', 222, struct opal_key)
@@ -128,5 +147,6 @@ struct opal_shadow_mbr {
#define IOC_OPAL_PSID_REVERT_TPR _IOW('p', 232, struct opal_key)
#define IOC_OPAL_MBR_DONE _IOW('p', 233, struct opal_mbr_done)
#define IOC_OPAL_WRITE_SHADOW_MBR _IOW('p', 234, struct opal_shadow_mbr)
+#define IOC_OPAL_GENERIC_TABLE_RW _IOW('p', 235, struct opal_read_write_table)
#endif /* _UAPI_SED_OPAL_H */
diff --git a/init/Kconfig b/init/Kconfig
index b4daad2bac23..4d8d145c41d2 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1548,6 +1548,7 @@ config AIO
config IO_URING
bool "Enable IO uring support" if EXPERT
select ANON_INODES
+ select IO_WQ
default y
help
This option enables support for the io_uring interface, enabling
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index ba635209ae9a..5b9da0954a27 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -678,8 +678,10 @@ bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
down_write(&bpf_devs_lock);
if (!offdevs_inited) {
err = rhashtable_init(&offdevs, &offdevs_params);
- if (err)
+ if (err) {
+ up_write(&bpf_devs_lock);
return ERR_PTR(err);
+ }
offdevs_inited = true;
}
up_write(&bpf_devs_lock);
diff --git a/kernel/fork.c b/kernel/fork.c
index 55af6931c6ec..13b38794efb5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1708,11 +1708,11 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
/*
* Poll support for process exit notification.
*/
-static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts)
+static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
{
struct task_struct *task;
struct pid *pid = file->private_data;
- int poll_flags = 0;
+ __poll_t poll_flags = 0;
poll_wait(file, &pid->wait_pidfd, pts);
@@ -1724,7 +1724,7 @@ static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts)
* group, then poll(2) should block, similar to the wait(2) family.
*/
if (!task || (task->exit_state && thread_group_empty(task)))
- poll_flags = POLLIN | POLLRDNORM;
+ poll_flags = EPOLLIN | EPOLLRDNORM;
rcu_read_unlock();
return poll_flags;
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 04e83fdfbe80..a45cba7df0ae 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -814,6 +814,8 @@ EXPORT_SYMBOL_GPL(freq_qos_update_request);
*/
int freq_qos_remove_request(struct freq_qos_request *req)
{
+ int ret;
+
if (!req)
return -EINVAL;
@@ -821,7 +823,11 @@ int freq_qos_remove_request(struct freq_qos_request *req)
"%s() called for unknown object\n", __func__))
return -EINVAL;
- return freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ req->qos = NULL;
+ req->type = 0;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(freq_qos_remove_request);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 44123b4d14e8..80b60ca7767f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -16,6 +16,7 @@
#include <asm/tlb.h>
#include "../workqueue_internal.h"
+#include "../../fs/io-wq.h"
#include "../smpboot.h"
#include "pelt.h"
@@ -4112,9 +4113,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
* we disable preemption to avoid it calling schedule() again
* in the possible wakeup of a kworker.
*/
- if (tsk->flags & PF_WQ_WORKER) {
+ if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
preempt_disable();
- wq_worker_sleeping(tsk);
+ if (tsk->flags & PF_WQ_WORKER)
+ wq_worker_sleeping(tsk);
+ else
+ io_wq_worker_sleeping(tsk);
preempt_enable_no_resched();
}
@@ -4131,8 +4135,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
static void sched_update_worker(struct task_struct *tsk)
{
- if (tsk->flags & PF_WQ_WORKER)
- wq_worker_running(tsk);
+ if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
+ if (tsk->flags & PF_WQ_WORKER)
+ wq_worker_running(tsk);
+ else
+ io_wq_worker_running(tsk);
+ }
}
asmlinkage __visible void __sched schedule(void)
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 969e5400a615..33feec8989f1 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -236,23 +236,6 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb)
}
EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
-bool sbitmap_any_bit_clear(const struct sbitmap *sb)
-{
- unsigned int i;
-
- for (i = 0; i < sb->map_nr; i++) {
- const struct sbitmap_word *word = &sb->map[i];
- unsigned long mask = word->word & ~word->cleared;
- unsigned long ret;
-
- ret = find_first_zero_bit(&mask, word->depth);
- if (ret < word->depth)
- return true;
- }
- return false;
-}
-EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
-
static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
{
unsigned int i, weight = 0;
diff --git a/mm/ksm.c b/mm/ksm.c
index dbee2eb4dd05..7905934cd3ad 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -885,13 +885,13 @@ static int remove_stable_node(struct stable_node *stable_node)
return 0;
}
- if (WARN_ON_ONCE(page_mapped(page))) {
- /*
- * This should not happen: but if it does, just refuse to let
- * merge_across_nodes be switched - there is no need to panic.
- */
- err = -EBUSY;
- } else {
+ /*
+ * Page could be still mapped if this races with __mmput() running in
+ * between ksm_exit() and exit_mmap(). Just refuse to let
+ * merge_across_nodes/max_page_sharing be switched.
+ */
+ err = -EBUSY;
+ if (!page_mapped(page)) {
/*
* The stable node did not yet appear stale to get_ksm_page(),
* since that allows for an unmapped ksm page to be recognized
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 3b62a9ff8ea0..f307bd82d750 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -331,7 +331,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
unsigned long end_pfn)
{
for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
- if (unlikely(!pfn_valid(start_pfn)))
+ if (unlikely(!pfn_to_online_page(start_pfn)))
continue;
if (unlikely(pfn_to_nid(start_pfn) != nid))
@@ -356,7 +356,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
/* pfn is the end pfn of a memory section. */
pfn = end_pfn - 1;
for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
- if (unlikely(!pfn_valid(pfn)))
+ if (unlikely(!pfn_to_online_page(pfn)))
continue;
if (unlikely(pfn_to_nid(pfn) != nid))
@@ -415,7 +415,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
*/
pfn = zone_start_pfn;
for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
- if (unlikely(!pfn_valid(pfn)))
+ if (unlikely(!pfn_to_online_page(pfn)))
continue;
if (page_zone(pfn_to_page(pfn)) != zone)
@@ -471,6 +471,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
struct pglist_data *pgdat = zone->zone_pgdat;
unsigned long flags;
+#ifdef CONFIG_ZONE_DEVICE
+ /*
+ * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
+ * we will not try to shrink the zones - which is okay as
+ * set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
+ */
+ if (zone_idx(zone) == ZONE_DEVICE)
+ return;
+#endif
+
pgdat_resize_lock(zone->zone_pgdat, &flags);
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
update_pgdat_span(pgdat);
diff --git a/net/atm/clip.c b/net/atm/clip.c
index a7972da7235d..294cb9efe3d3 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -89,7 +89,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
struct clip_vcc **walk;
if (!entry) {
- pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
+ pr_err("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
return;
}
netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
@@ -109,10 +109,10 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
error = neigh_update(entry->neigh, NULL, NUD_NONE,
NEIGH_UPDATE_F_ADMIN, 0);
if (error)
- pr_crit("neigh_update failed with %d\n", error);
+ pr_err("neigh_update failed with %d\n", error);
goto out;
}
- pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
+ pr_err("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
out:
netif_tx_unlock_bh(entry->neigh->dev);
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 865ba6ca16eb..ae3bcb1540ec 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -923,21 +923,23 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
"rx-%u", index);
if (error)
- return error;
+ goto err;
dev_hold(queue->dev);
if (dev->sysfs_rx_queue_group) {
error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
- if (error) {
- kobject_put(kobj);
- return error;
- }
+ if (error)
+ goto err;
}
kobject_uevent(kobj, KOBJ_ADD);
return error;
+
+err:
+ kobject_put(kobj);
+ return error;
}
#endif /* CONFIG_SYSFS */
@@ -1461,21 +1463,22 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
"tx-%u", index);
if (error)
- return error;
+ goto err;
dev_hold(queue->dev);
#ifdef CONFIG_BQL
error = sysfs_create_group(kobj, &dql_group);
- if (error) {
- kobject_put(kobj);
- return error;
- }
+ if (error)
+ goto err;
#endif
kobject_uevent(kobj, KOBJ_ADD);
-
return 0;
+
+err:
+ kobject_put(kobj);
+ return error;
}
#endif /* CONFIG_SYSFS */
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index c81cd80114d9..e4ec575c1fba 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2195,6 +2195,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_MAC]) {
struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
+ if (ivm->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_mac)
err = ops->ndo_set_vf_mac(dev, ivm->vf,
@@ -2206,6 +2208,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_VLAN]) {
struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
+ if (ivv->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
@@ -2238,6 +2242,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (len == 0)
return -EINVAL;
+ if (ivvl[0]->vf >= INT_MAX)
+ return -EINVAL;
err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
ivvl[0]->qos, ivvl[0]->vlan_proto);
if (err < 0)
@@ -2248,6 +2254,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
struct ifla_vf_info ivf;
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_get_vf_config)
err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
@@ -2266,6 +2274,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_RATE]) {
struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_rate)
err = ops->ndo_set_vf_rate(dev, ivt->vf,
@@ -2278,6 +2288,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_SPOOFCHK]) {
struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
+ if (ivs->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_spoofchk)
err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
@@ -2289,6 +2301,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_LINK_STATE]) {
struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
+ if (ivl->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_link_state)
err = ops->ndo_set_vf_link_state(dev, ivl->vf,
@@ -2302,6 +2316,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
err = -EOPNOTSUPP;
ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
+ if (ivrssq_en->vf >= INT_MAX)
+ return -EINVAL;
if (ops->ndo_set_vf_rss_query_en)
err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
ivrssq_en->setting);
@@ -2312,6 +2328,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_TRUST]) {
struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_trust)
err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
@@ -2322,15 +2340,18 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_IB_NODE_GUID]) {
struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
if (!ops->ndo_set_vf_guid)
return -EOPNOTSUPP;
-
return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
}
if (tb[IFLA_VF_IB_PORT_GUID]) {
struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
if (!ops->ndo_set_vf_guid)
return -EOPNOTSUPP;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 59ded25acd04..0902cb32bbad 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -1037,7 +1037,7 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_fib_multipath_hash_policy,
.extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
+ .extra2 = &two,
},
#endif
{
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1d58ce829dca..447defbfccdd 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1297,6 +1297,27 @@ out:
#define UDP_SKB_IS_STATELESS 0x80000000
+/* all head states (dst, sk, nf conntrack) except skb extensions are
+ * cleared by udp_rcv().
+ *
+ * We need to preserve secpath, if present, to eventually process
+ * IP_CMSG_PASSSEC at recvmsg() time.
+ *
+ * Other extensions can be cleared.
+ */
+static bool udp_try_make_stateless(struct sk_buff *skb)
+{
+ if (!skb_has_extensions(skb))
+ return true;
+
+ if (!secpath_exists(skb)) {
+ skb_ext_reset(skb);
+ return true;
+ }
+
+ return false;
+}
+
static void udp_set_dev_scratch(struct sk_buff *skb)
{
struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
@@ -1308,11 +1329,7 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
scratch->is_linear = !skb_is_nonlinear(skb);
#endif
- /* all head states execept sp (dst, sk, nf) are always cleared by
- * udp_rcv() and we need to preserve secpath, if present, to eventually
- * process IP_CMSG_PASSSEC at recvmsg() time
- */
- if (likely(!skb_sec_path(skb)))
+ if (udp_try_make_stateless(skb))
scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 264c292e7dcc..79fc012dd2ca 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -363,8 +363,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
break;
case IPV6_TRANSPARENT:
- if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) &&
- !ns_capable(net->user_ns, CAP_NET_RAW)) {
+ if (valbool && !ns_capable(net->user_ns, CAP_NET_RAW) &&
+ !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
retv = -EPERM;
break;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e60bf8e7dd1a..3f83ea851ebf 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -634,7 +634,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
* Router Reachability Probe MUST be rate-limited
* to no more than one per minute.
*/
- if (fib6_nh->fib_nh_gw_family)
+ if (!fib6_nh->fib_nh_gw_family)
return;
nh_gw = &fib6_nh->fib_nh_gw6;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index cdfaa79382a2..b5bc631b96b7 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -43,7 +43,7 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
int err = -EINVAL;
int rem;
- if (!nla || !n)
+ if (!nla)
return NULL;
keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL);
@@ -170,6 +170,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
}
parm = nla_data(pattr);
+ if (!parm->nkeys) {
+ NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
+ return -EINVAL;
+ }
ksize = parm->nkeys * sizeof(struct tc_pedit_key);
if (nla_len(pattr) < sizeof(*parm) + ksize) {
NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid");
@@ -183,12 +187,6 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
index = parm->index;
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- if (!parm->nkeys) {
- tcf_idr_cleanup(tn, index);
- NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
- ret = -EINVAL;
- goto out_free;
- }
ret = tcf_idr_create(tn, index, est, a,
&act_pedit_ops, bind, false);
if (ret) {
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 2f83a79f76aa..d55669e14741 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -135,6 +135,10 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
if (opt_len < 0)
return opt_len;
opts_len += opt_len;
+ if (opts_len > IP_TUNNEL_OPTS_MAX) {
+ NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
+ return -EINVAL;
+ }
if (dst) {
dst_len -= opt_len;
dst += opt_len;
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 7cd68628c637..c609373c8661 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -922,7 +922,7 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
}
/* Verify priority mapping uses valid tcs */
- for (i = 0; i < TC_BITMASK + 1; i++) {
+ for (i = 0; i <= TC_BITMASK; i++) {
if (qopt->prio_tc_map[i] >= qopt->num_tc) {
NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
return -EINVAL;
@@ -1347,6 +1347,26 @@ out:
return err;
}
+static int taprio_mqprio_cmp(const struct net_device *dev,
+ const struct tc_mqprio_qopt *mqprio)
+{
+ int i;
+
+ if (!mqprio || mqprio->num_tc != dev->num_tc)
+ return -1;
+
+ for (i = 0; i < mqprio->num_tc; i++)
+ if (dev->tc_to_txq[i].count != mqprio->count[i] ||
+ dev->tc_to_txq[i].offset != mqprio->offset[i])
+ return -1;
+
+ for (i = 0; i <= TC_BITMASK; i++)
+ if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
+ return -1;
+
+ return 0;
+}
+
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@@ -1398,6 +1418,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
admin = rcu_dereference(q->admin_sched);
rcu_read_unlock();
+ /* no changes - no new mqprio settings */
+ if (!taprio_mqprio_cmp(dev, mqprio))
+ mqprio = NULL;
+
if (mqprio && (oper || admin)) {
NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
err = -ENOTSUPP;
@@ -1455,7 +1479,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
mqprio->offset[i]);
/* Always use supplied priority mappings */
- for (i = 0; i < TC_BITMASK + 1; i++)
+ for (i = 0; i <= TC_BITMASK; i++)
netdev_set_prio_tc_map(dev, i,
mqprio->prio_tc_map[i]);
}
diff --git a/net/socket.c b/net/socket.c
index 6a9ab7a8b1d2..17bc1eee198a 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -404,6 +404,7 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
sock->file = file;
file->private_data = sock;
+ stream_open(SOCK_INODE(sock), file);
return file;
}
EXPORT_SYMBOL(sock_alloc_file);
@@ -1690,24 +1691,13 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
return __sys_listen(fd, backlog);
}
-/*
- * For accept, we attempt to create a new socket, set up the link
- * with the client, wake up the client, then return the new
- * connected fd. We collect the address of the connector in kernel
- * space and move it to user at the very end. This is unclean because
- * we open the socket then return an error.
- *
- * 1003.1g adds the ability to recvmsg() to query connection pending
- * status to recvmsg. We need to add that support in a way thats
- * clean when we restructure accept also.
- */
-
-int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen, int flags)
+int __sys_accept4_file(struct file *file, unsigned file_flags,
+ struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags)
{
struct socket *sock, *newsock;
struct file *newfile;
- int err, len, newfd, fput_needed;
+ int err, len, newfd;
struct sockaddr_storage address;
if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
@@ -1716,14 +1706,14 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ sock = sock_from_file(file, &err);
if (!sock)
goto out;
err = -ENFILE;
newsock = sock_alloc();
if (!newsock)
- goto out_put;
+ goto out;
newsock->type = sock->type;
newsock->ops = sock->ops;
@@ -1738,20 +1728,21 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
if (unlikely(newfd < 0)) {
err = newfd;
sock_release(newsock);
- goto out_put;
+ goto out;
}
newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name);
if (IS_ERR(newfile)) {
err = PTR_ERR(newfile);
put_unused_fd(newfd);
- goto out_put;
+ goto out;
}
err = security_socket_accept(sock, newsock);
if (err)
goto out_fd;
- err = sock->ops->accept(sock, newsock, sock->file->f_flags, false);
+ err = sock->ops->accept(sock, newsock, sock->file->f_flags | file_flags,
+ false);
if (err < 0)
goto out_fd;
@@ -1772,15 +1763,42 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
fd_install(newfd, newfile);
err = newfd;
-
-out_put:
- fput_light(sock->file, fput_needed);
out:
return err;
out_fd:
fput(newfile);
put_unused_fd(newfd);
- goto out_put;
+ goto out;
+
+}
+
+/*
+ * For accept, we attempt to create a new socket, set up the link
+ * with the client, wake up the client, then return the new
+ * connected fd. We collect the address of the connector in kernel
+ * space and move it to user at the very end. This is unclean because
+ * we open the socket then return an error.
+ *
+ * 1003.1g adds the ability to recvmsg() to query connection pending
+ * status to recvmsg. We need to add that support in a way thats
+ * clean when we restructure accept also.
+ */
+
+int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags)
+{
+ int ret = -EBADF;
+ struct fd f;
+
+ f = fdget(fd);
+ if (f.file) {
+ ret = __sys_accept4_file(f.file, 0, upeer_sockaddr,
+ upeer_addrlen, flags);
+ if (f.flags)
+ fput(f.file);
+ }
+
+ return ret;
}
SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 0775ae40fcfb..f874cc0da45d 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -908,6 +908,7 @@ static int __init tls_register(void)
{
tls_sw_proto_ops = inet_stream_ops;
tls_sw_proto_ops.splice_read = tls_sw_splice_read;
+ tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked,
tls_device_init();
tcp_register_ulp(&tcp_tls_ulp_ops);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 446f23c1f3ce..319735d5c084 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1204,6 +1204,17 @@ sendpage_end:
return copied ? copied : ret;
}
+int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+{
+ if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
+ MSG_NO_SHARED_FRAGS))
+ return -ENOTSUPP;
+
+ return tls_sw_do_sendpage(sk, page, offset, size, flags);
+}
+
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 9cef54064f60..074f27538f55 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -28,5 +28,5 @@ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += keyctl_pkey.o
# Key types
#
obj-$(CONFIG_BIG_KEYS) += big_key.o
-obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted-keys/
obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
diff --git a/security/keys/trusted-keys/Makefile b/security/keys/trusted-keys/Makefile
new file mode 100644
index 000000000000..7b73cebbb378
--- /dev/null
+++ b/security/keys/trusted-keys/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for trusted keys
+#
+
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+trusted-y += trusted_tpm1.o
+trusted-y += trusted_tpm2.o
diff --git a/security/keys/trusted.c b/security/keys/trusted-keys/trusted_tpm1.c
index 1fbd77816610..d2c5ec1e040b 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted-keys/trusted_tpm1.c
@@ -27,7 +27,7 @@
#include <linux/tpm.h>
#include <linux/tpm_command.h>
-#include <keys/trusted.h>
+#include <keys/trusted_tpm.h>
static const char hmac_alg[] = "hmac(sha1)";
static const char hash_alg[] = "sha1";
@@ -406,13 +406,10 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
if (ret != TPM_NONCE_SIZE)
return ret;
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_COMMAND);
- store32(tb, TPM_OSAP_SIZE);
- store32(tb, TPM_ORD_OSAP);
- store16(tb, type);
- store32(tb, handle);
- storebytes(tb, ononce, TPM_NONCE_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_OSAP);
+ tpm_buf_append_u16(tb, type);
+ tpm_buf_append_u32(tb, handle);
+ tpm_buf_append(tb, ononce, TPM_NONCE_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
@@ -437,10 +434,7 @@ int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
if (!chip)
return -ENODEV;
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_COMMAND);
- store32(tb, TPM_OIAP_SIZE);
- store32(tb, TPM_ORD_OIAP);
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_OIAP);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
return ret;
@@ -535,20 +529,17 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
goto out;
/* build and send the TPM request packet */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
- store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen);
- store32(tb, TPM_ORD_SEAL);
- store32(tb, keyhandle);
- storebytes(tb, td->encauth, SHA1_DIGEST_SIZE);
- store32(tb, pcrinfosize);
- storebytes(tb, pcrinfo, pcrinfosize);
- store32(tb, datalen);
- storebytes(tb, data, datalen);
- store32(tb, sess.handle);
- storebytes(tb, td->nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_SEAL);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, td->encauth, SHA1_DIGEST_SIZE);
+ tpm_buf_append_u32(tb, pcrinfosize);
+ tpm_buf_append(tb, pcrinfo, pcrinfosize);
+ tpm_buf_append_u32(tb, datalen);
+ tpm_buf_append(tb, data, datalen);
+ tpm_buf_append_u32(tb, sess.handle);
+ tpm_buf_append(tb, td->nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, td->pubauth, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
@@ -594,7 +585,6 @@ static int tpm_unseal(struct tpm_buf *tb,
uint32_t authhandle2 = 0;
unsigned char cont = 0;
uint32_t ordinal;
- uint32_t keyhndl;
int ret;
/* sessions for unsealing key and data */
@@ -610,7 +600,6 @@ static int tpm_unseal(struct tpm_buf *tb,
}
ordinal = htonl(TPM_ORD_UNSEAL);
- keyhndl = htonl(SRKHANDLE);
ret = tpm_get_random(chip, nonceodd, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE) {
pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
@@ -628,20 +617,17 @@ static int tpm_unseal(struct tpm_buf *tb,
return ret;
/* build and send TPM request packet */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH2_COMMAND);
- store32(tb, TPM_UNSEAL_SIZE + bloblen);
- store32(tb, TPM_ORD_UNSEAL);
- store32(tb, keyhandle);
- storebytes(tb, blob, bloblen);
- store32(tb, authhandle1);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata1, SHA1_DIGEST_SIZE);
- store32(tb, authhandle2);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata2, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH2_COMMAND, TPM_ORD_UNSEAL);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, blob, bloblen);
+ tpm_buf_append_u32(tb, authhandle1);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata1, SHA1_DIGEST_SIZE);
+ tpm_buf_append_u32(tb, authhandle2);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata2, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
@@ -670,23 +656,23 @@ static int tpm_unseal(struct tpm_buf *tb,
static int key_seal(struct trusted_key_payload *p,
struct trusted_key_options *o)
{
- struct tpm_buf *tb;
+ struct tpm_buf tb;
int ret;
- tb = kzalloc(sizeof *tb, GFP_KERNEL);
- if (!tb)
- return -ENOMEM;
+ ret = tpm_buf_init(&tb, 0, 0);
+ if (ret)
+ return ret;
/* include migratable flag at end of sealed key */
p->key[p->key_len] = p->migratable;
- ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth,
+ ret = tpm_seal(&tb, o->keytype, o->keyhandle, o->keyauth,
p->key, p->key_len + 1, p->blob, &p->blob_len,
o->blobauth, o->pcrinfo, o->pcrinfo_len);
if (ret < 0)
pr_info("trusted_key: srkseal failed (%d)\n", ret);
- kzfree(tb);
+ tpm_buf_destroy(&tb);
return ret;
}
@@ -696,14 +682,14 @@ static int key_seal(struct trusted_key_payload *p,
static int key_unseal(struct trusted_key_payload *p,
struct trusted_key_options *o)
{
- struct tpm_buf *tb;
+ struct tpm_buf tb;
int ret;
- tb = kzalloc(sizeof *tb, GFP_KERNEL);
- if (!tb)
- return -ENOMEM;
+ ret = tpm_buf_init(&tb, 0, 0);
+ if (ret)
+ return ret;
- ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
+ ret = tpm_unseal(&tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
o->blobauth, p->key, &p->key_len);
if (ret < 0)
pr_info("trusted_key: srkunseal failed (%d)\n", ret);
@@ -711,7 +697,7 @@ static int key_unseal(struct trusted_key_payload *p,
/* pull migratable flag out of sealed key */
p->migratable = p->key[--p->key_len];
- kzfree(tb);
+ tpm_buf_destroy(&tb);
return ret;
}
@@ -1016,7 +1002,7 @@ static int trusted_instantiate(struct key *key,
switch (key_cmd) {
case Opt_load:
if (tpm2)
- ret = tpm_unseal_trusted(chip, payload, options);
+ ret = tpm2_unseal_trusted(chip, payload, options);
else
ret = key_unseal(payload, options);
dump_payload(payload);
@@ -1032,7 +1018,7 @@ static int trusted_instantiate(struct key *key,
goto out;
}
if (tpm2)
- ret = tpm_seal_trusted(chip, payload, options);
+ ret = tpm2_seal_trusted(chip, payload, options);
else
ret = key_seal(payload, options);
if (ret < 0)
diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
new file mode 100644
index 000000000000..a9810ac2776f
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_tpm2.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ */
+
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+
+#include <keys/trusted-type.h>
+#include <keys/trusted_tpm.h>
+
+static struct tpm2_hash tpm2_hash_map[] = {
+ {HASH_ALGO_SHA1, TPM_ALG_SHA1},
+ {HASH_ALGO_SHA256, TPM_ALG_SHA256},
+ {HASH_ALGO_SHA384, TPM_ALG_SHA384},
+ {HASH_ALGO_SHA512, TPM_ALG_SHA512},
+ {HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
+};
+
+/**
+ * tpm_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
+ *
+ * @buf: an allocated tpm_buf instance
+ * @session_handle: session handle
+ * @nonce: the session nonce, may be NULL if not used
+ * @nonce_len: the session nonce length, may be 0 if not used
+ * @attributes: the session attributes
+ * @hmac: the session HMAC or password, may be NULL if not used
+ * @hmac_len: the session HMAC or password length, maybe 0 if not used
+ */
+static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
+ const u8 *nonce, u16 nonce_len,
+ u8 attributes,
+ const u8 *hmac, u16 hmac_len)
+{
+ tpm_buf_append_u32(buf, 9 + nonce_len + hmac_len);
+ tpm_buf_append_u32(buf, session_handle);
+ tpm_buf_append_u16(buf, nonce_len);
+
+ if (nonce && nonce_len)
+ tpm_buf_append(buf, nonce, nonce_len);
+
+ tpm_buf_append_u8(buf, attributes);
+ tpm_buf_append_u16(buf, hmac_len);
+
+ if (hmac && hmac_len)
+ tpm_buf_append(buf, hmac, hmac_len);
+}
+
+/**
+ * tpm2_seal_trusted() - seal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: < 0 on error and 0 on success.
+ */
+int tpm2_seal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ unsigned int blob_len;
+ struct tpm_buf buf;
+ u32 hash;
+ int i;
+ int rc;
+
+ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
+ if (options->hash == tpm2_hash_map[i].crypto_id) {
+ hash = tpm2_hash_map[i].tpm_id;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(tpm2_hash_map))
+ return -EINVAL;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ /* sensitive */
+ tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
+
+ tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
+ tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
+ tpm_buf_append_u16(&buf, payload->key_len + 1);
+ tpm_buf_append(&buf, payload->key, payload->key_len);
+ tpm_buf_append_u8(&buf, payload->migratable);
+
+ /* public */
+ tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
+ tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH);
+ tpm_buf_append_u16(&buf, hash);
+
+ /* policy */
+ if (options->policydigest_len) {
+ tpm_buf_append_u32(&buf, 0);
+ tpm_buf_append_u16(&buf, options->policydigest_len);
+ tpm_buf_append(&buf, options->policydigest,
+ options->policydigest_len);
+ } else {
+ tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
+ tpm_buf_append_u16(&buf, 0);
+ }
+
+ /* public parameters */
+ tpm_buf_append_u16(&buf, TPM_ALG_NULL);
+ tpm_buf_append_u16(&buf, 0);
+
+ /* outside info */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* creation PCR */
+ tpm_buf_append_u32(&buf, 0);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
+ if (rc)
+ goto out;
+
+ blob_len = be32_to_cpup((__be32 *) &buf.data[TPM_HEADER_SIZE]);
+ if (blob_len > MAX_BLOB_SIZE) {
+ rc = -E2BIG;
+ goto out;
+ }
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
+ payload->blob_len = blob_len;
+
+out:
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0) {
+ if (tpm2_rc_value(rc) == TPM2_RC_HASH)
+ rc = -EINVAL;
+ else
+ rc = -EPERM;
+ }
+
+ return rc;
+}
+
+/**
+ * tpm2_load_cmd() - execute a TPM2_Load command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: returned blob handle
+ *
+ * Return: 0 on success.
+ * -E2BIG on wrong payload size.
+ * -EPERM on tpm error status.
+ * < 0 error from tpm_send.
+ */
+static int tpm2_load_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 *blob_handle)
+{
+ struct tpm_buf buf;
+ unsigned int private_len;
+ unsigned int public_len;
+ unsigned int blob_len;
+ int rc;
+
+ private_len = be16_to_cpup((__be16 *) &payload->blob[0]);
+ if (private_len > (payload->blob_len - 2))
+ return -E2BIG;
+
+ public_len = be16_to_cpup((__be16 *) &payload->blob[2 + private_len]);
+ blob_len = private_len + public_len + 4;
+ if (blob_len > payload->blob_len)
+ return -E2BIG;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_LOAD);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ tpm_buf_append(&buf, payload->blob, blob_len);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
+ if (!rc)
+ *blob_handle = be32_to_cpup(
+ (__be32 *) &buf.data[TPM_HEADER_SIZE]);
+
+out:
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0)
+ rc = -EPERM;
+
+ return rc;
+}
+
+/**
+ * tpm2_unseal_cmd() - execute a TPM2_Unload command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: blob handle
+ *
+ * Return: 0 on success
+ * -EPERM on tpm error status
+ * < 0 error from tpm_send
+ */
+static int tpm2_unseal_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 blob_handle)
+{
+ struct tpm_buf buf;
+ u16 data_len;
+ u8 *data;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, blob_handle);
+ tpm2_buf_append_auth(&buf,
+ options->policyhandle ?
+ options->policyhandle : TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ TPM2_SA_CONTINUE_SESSION,
+ options->blobauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
+ if (rc > 0)
+ rc = -EPERM;
+
+ if (!rc) {
+ data_len = be16_to_cpup(
+ (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+ if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+ data = &buf.data[TPM_HEADER_SIZE + 6];
+
+ memcpy(payload->key, data, data_len - 1);
+ payload->key_len = data_len - 1;
+ payload->migratable = data[data_len - 1];
+ }
+
+out:
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+/**
+ * tpm2_unseal_trusted() - unseal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: Same as with tpm_send.
+ */
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ u32 blob_handle;
+ int rc;
+
+ rc = tpm2_load_cmd(chip, payload, options, &blob_handle);
+ if (rc)
+ return rc;
+
+ rc = tpm2_unseal_cmd(chip, payload, options, blob_handle);
+
+ return rc;
+}
diff --git a/tools/gpio/Build b/tools/gpio/Build
index 620c1937d957..4141f35837db 100644
--- a/tools/gpio/Build
+++ b/tools/gpio/Build
@@ -1,3 +1,4 @@
+gpio-utils-y += gpio-utils.o
lsgpio-y += lsgpio.o gpio-utils.o
gpio-hammer-y += gpio-hammer.o gpio-utils.o
gpio-event-mon-y += gpio-event-mon.o gpio-utils.o
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index 1178d302757e..6080de58861f 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -35,11 +35,15 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h
prepare: $(OUTPUT)include/linux/gpio.h
+GPIO_UTILS_IN := $(output)gpio-utils-in.o
+$(GPIO_UTILS_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=gpio-utils
+
#
# lsgpio
#
LSGPIO_IN := $(OUTPUT)lsgpio-in.o
-$(LSGPIO_IN): prepare FORCE
+$(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=lsgpio
$(OUTPUT)lsgpio: $(LSGPIO_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
@@ -48,7 +52,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN)
# gpio-hammer
#
GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o
-$(GPIO_HAMMER_IN): prepare FORCE
+$(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=gpio-hammer
$(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
@@ -57,7 +61,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
# gpio-event-mon
#
GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o
-$(GPIO_EVENT_MON_IN): prepare FORCE
+$(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=gpio-event-mon
$(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
index 1d338357df8a..1f97b33c840e 100644
--- a/tools/include/uapi/linux/fcntl.h
+++ b/tools/include/uapi/linux/fcntl.h
@@ -58,7 +58,7 @@
* Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be
* used to clear any hints previously set.
*/
-#define RWF_WRITE_LIFE_NOT_SET 0
+#define RWH_WRITE_LIFE_NOT_SET 0
#define RWH_WRITE_LIFE_NONE 1
#define RWH_WRITE_LIFE_SHORT 2
#define RWH_WRITE_LIFE_MEDIUM 3
@@ -66,6 +66,13 @@
#define RWH_WRITE_LIFE_EXTREME 5
/*
+ * The originally introduced spelling is remained from the first
+ * versions of the patch set that introduced the feature, see commit
+ * v4.13-rc1~212^2~51.
+ */
+#define RWF_WRITE_LIFE_NOT_SET RWH_WRITE_LIFE_NOT_SET
+
+/*
* Types of directory notifications that may be requested.
*/
#define DN_ACCESS 0x00000001 /* File accessed */