aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/sd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/sd.c')
-rw-r--r--drivers/scsi/sd.c36
1 files changed, 33 insertions, 3 deletions
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d175c5c5ccf8..a028ab3322a9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -231,11 +231,15 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
+ bool v;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_start_stop = v;
return count;
}
@@ -253,6 +257,7 @@ static ssize_t
allow_restart_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ bool v;
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -262,7 +267,10 @@ allow_restart_store(struct device *dev, struct device_attribute *attr,
if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
return -EINVAL;
- sdp->allow_restart = simple_strtoul(buf, NULL, 10);
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->allow_restart = v;
return count;
}
@@ -906,6 +914,26 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
else
sdkp->zeroing_mode = SD_ZERO_WRITE;
+ if (sdkp->max_ws_blocks &&
+ sdkp->physical_block_size > logical_block_size) {
+ /*
+ * Reporting a maximum number of blocks that is not aligned
+ * on the device physical size would cause a large write same
+ * request to be split into physically unaligned chunks by
+ * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
+ * even if the caller of these functions took care to align the
+ * large request. So make sure the maximum reported is aligned
+ * to the device physical block size. This is only an optional
+ * optimization for regular disks, but this is mandatory to
+ * avoid failure of large write same requests directed at
+ * sequential write required zones of host-managed ZBC disks.
+ */
+ sdkp->max_ws_blocks =
+ round_down(sdkp->max_ws_blocks,
+ bytes_to_logical(sdkp->device,
+ sdkp->physical_block_size));
+ }
+
out:
blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
(logical_block_size >> 9));
@@ -1284,6 +1312,7 @@ static int sd_init_command(struct scsi_cmnd *cmd)
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
+ u8 *cmnd;
if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
sd_zbc_write_unlock_zone(SCpnt);
@@ -1292,9 +1321,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
__free_page(rq->special_vec.bv_page);
if (SCpnt->cmnd != scsi_req(rq)->cmd) {
- mempool_free(SCpnt->cmnd, sd_cdb_pool);
+ cmnd = SCpnt->cmnd;
SCpnt->cmnd = NULL;
SCpnt->cmd_len = 0;
+ mempool_free(cmnd, sd_cdb_pool);
}
}