aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/block')
-rw-r--r--drivers/s390/block/Kconfig7
-rw-r--r--drivers/s390/block/Makefile3
-rw-r--r--drivers/s390/block/dasd.c115
-rw-r--r--drivers/s390/block/dasd_alias.c3
-rw-r--r--drivers/s390/block/dasd_devmap.c77
-rw-r--r--drivers/s390/block/dasd_eckd.c310
-rw-r--r--drivers/s390/block/dasd_eckd.h1
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/scm_blk.c268
-rw-r--r--drivers/s390/block/scm_blk.h64
-rw-r--r--drivers/s390/block/scm_blk_cluster.c255
-rw-r--r--drivers/s390/block/xpram.c2
12 files changed, 375 insertions, 732 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 0acb8c2f9475..31f014b57bfc 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -82,10 +82,3 @@ config SCM_BLOCK
To compile this driver as a module, choose M here: the
module will be called scm_block.
-
-config SCM_BLOCK_CLUSTER_WRITE
- def_bool y
- prompt "SCM force cluster writes"
- depends on SCM_BLOCK
- help
- Force writes to Storage Class Memory (SCM) to be in done in clusters.
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index c2f4e673e031..b64e2b32c753 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -19,7 +19,4 @@ obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o
scm_block-objs := scm_drv.o scm_blk.o
-ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
-scm_block-objs += scm_blk_cluster.o
-endif
obj-$(CONFIG_SCM_BLOCK) += scm_block.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 6fb3fd5efc11..670ac0a4ef49 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1965,8 +1965,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device,
{
int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
- if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
- /* dasd is being set offline. */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+ !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * dasd is being set offline
+ * but it is no safe offline where we have to allow I/O
+ */
return 1;
}
if (device->stopped) {
@@ -2672,7 +2676,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
*/
if (basedev->state < DASD_STATE_READY) {
while ((req = blk_fetch_request(block->request_queue)))
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
return;
}
@@ -2692,7 +2696,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
"Rejecting write request %p",
req);
blk_start_request(req);
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
continue;
}
if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
@@ -2702,7 +2706,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
"Rejecting failfast request %p",
req);
blk_start_request(req);
- __blk_end_request_all(req, -ETIMEDOUT);
+ __blk_end_request_all(req, BLK_STS_TIMEOUT);
continue;
}
cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -2734,7 +2738,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
"on request %p",
PTR_ERR(cqr), req);
blk_start_request(req);
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
continue;
}
/*
@@ -2755,21 +2759,29 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
{
struct request *req;
int status;
- int error = 0;
+ blk_status_t error = BLK_STS_OK;
req = (struct request *) cqr->callback_data;
dasd_profile_end(cqr->block, cqr, req);
+
status = cqr->block->base->discipline->free_cp(cqr, req);
if (status < 0)
- error = status;
+ error = errno_to_blk_status(status);
else if (status == 0) {
- if (cqr->intrc == -EPERM)
- error = -EBADE;
- else if (cqr->intrc == -ENOLINK ||
- cqr->intrc == -ETIMEDOUT)
- error = cqr->intrc;
- else
- error = -EIO;
+ switch (cqr->intrc) {
+ case -EPERM:
+ error = BLK_STS_NEXUS;
+ break;
+ case -ENOLINK:
+ error = BLK_STS_TRANSPORT;
+ break;
+ case -ETIMEDOUT:
+ error = BLK_STS_TIMEOUT;
+ break;
+ default:
+ error = BLK_STS_IOERR;
+ break;
+ }
}
__blk_end_request_all(req, error);
}
@@ -3190,7 +3202,7 @@ static void dasd_flush_request_queue(struct dasd_block *block)
spin_lock_irq(&block->request_queue_lock);
while ((req = blk_fetch_request(block->request_queue)))
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
spin_unlock_irq(&block->request_queue_lock);
}
@@ -3562,57 +3574,69 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
else
pr_warn("%s: The DASD cannot be set offline while it is in use\n",
dev_name(&cdev->dev));
- clear_bit(DASD_FLAG_OFFLINE, &device->flags);
- goto out_busy;
+ rc = -EBUSY;
+ goto out_err;
}
}
- if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
- /*
- * safe offline already running
- * could only be called by normal offline so safe_offline flag
- * needs to be removed to run normal offline and kill all I/O
- */
- if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags))
- /* Already doing normal offline processing */
- goto out_busy;
- else
- clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
- } else {
- if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
- /* Already doing offline processing */
- goto out_busy;
+ /*
+ * Test if the offline processing is already running and exit if so.
+ * If a safe offline is being processed this could only be a normal
+ * offline that should be able to overtake the safe offline and
+ * cancel any I/O we do not want to wait for any longer
+ */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
+ &device->flags);
+ } else {
+ rc = -EBUSY;
+ goto out_err;
+ }
}
-
set_bit(DASD_FLAG_OFFLINE, &device->flags);
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
/*
- * if safe_offline called set safe_offline_running flag and
+ * if safe_offline is called set safe_offline_running flag and
* clear safe_offline so that a call to normal offline
* can overrun safe_offline processing
*/
if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
!test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /* need to unlock here to wait for outstanding I/O */
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
/*
* If we want to set the device safe offline all IO operations
* should be finished before continuing the offline process
* so sync bdev first and then wait for our queues to become
* empty
*/
- /* sync blockdev and partitions */
if (device->block) {
rc = fsync_bdev(device->block->bdev);
if (rc != 0)
goto interrupted;
}
- /* schedule device tasklet and wait for completion */
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
_wait_for_empty_queues(device));
if (rc != 0)
goto interrupted;
+
+ /*
+ * check if a normal offline process overtook the offline
+ * processing in this case simply do nothing beside returning
+ * that we got interrupted
+ * otherwise mark safe offline as not running any longer and
+ * continue with normal offline
+ */
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ rc = -ERESTARTSYS;
+ goto out_err;
+ }
+ clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
}
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
dasd_set_target_state(device, DASD_STATE_NEW);
/* dasd_delete_device destroys the device reference. */
@@ -3624,22 +3648,18 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
*/
if (block)
dasd_free_block(block);
+
return 0;
interrupted:
/* interrupted by signal */
- clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
- dasd_put_device(device);
-
- return rc;
-
-out_busy:
+out_err:
dasd_put_device(device);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
-
- return -EBUSY;
+ return rc;
}
EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
@@ -3901,7 +3921,6 @@ EXPORT_SYMBOL(dasd_schedule_requeue);
int dasd_generic_pm_freeze(struct ccw_device *cdev)
{
struct dasd_device *device = dasd_device_from_cdev(cdev);
- int rc;
if (IS_ERR(device))
return PTR_ERR(device);
@@ -3910,7 +3929,7 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
set_bit(DASD_FLAG_SUSPENDED, &device->flags);
if (device->discipline->freeze)
- rc = device->discipline->freeze(device);
+ device->discipline->freeze(device);
/* disallow new I/O */
dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 1e560188dd13..0e0e622eadc3 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -754,7 +754,6 @@ static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
struct alias_pav_group *pavgroup;
struct dasd_device *device, *temp;
struct dasd_eckd_private *private;
- int rc;
unsigned long flags;
LIST_HEAD(active);
@@ -785,7 +784,7 @@ static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
device = list_first_entry(&active, struct dasd_device,
alias_list);
spin_unlock_irqrestore(&lcu->lock, flags);
- rc = dasd_flush_device_queue(device);
+ dasd_flush_device_queue(device);
spin_lock_irqsave(&lcu->lock, flags);
/*
* only move device around if it wasn't moved away while we
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 1164b51d09f3..779dce069cc5 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -150,7 +150,7 @@ static int __init dasd_busid(char *str, int *id0, int *id1, int *devno)
/* Old style 0xXXXX or XXXX */
if (!kstrtouint(str, 16, &val)) {
*id0 = *id1 = 0;
- if (val < 0 || val > 0xffff)
+ if (val > 0xffff)
return -EINVAL;
*devno = val;
return 0;
@@ -315,45 +315,58 @@ static int __init dasd_parse_range(const char *range)
char *features_str = NULL;
char *from_str = NULL;
char *to_str = NULL;
- size_t len = strlen(range) + 1;
- char tmp[len];
+ int rc = 0;
+ char *tmp;
- strlcpy(tmp, range, len);
+ tmp = kstrdup(range, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
- if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str))
- goto out_err;
+ if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str)) {
+ rc = -EINVAL;
+ goto out;
+ }
- if (dasd_busid(from_str, &from_id0, &from_id1, &from))
- goto out_err;
+ if (dasd_busid(from_str, &from_id0, &from_id1, &from)) {
+ rc = -EINVAL;
+ goto out;
+ }
to = from;
to_id0 = from_id0;
to_id1 = from_id1;
if (to_str) {
- if (dasd_busid(to_str, &to_id0, &to_id1, &to))
- goto out_err;
+ if (dasd_busid(to_str, &to_id0, &to_id1, &to)) {
+ rc = -EINVAL;
+ goto out;
+ }
if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) {
pr_err("%s is not a valid device range\n", range);
- goto out_err;
+ rc = -EINVAL;
+ goto out;
}
}
features = dasd_feature_list(features_str);
- if (features < 0)
- goto out_err;
+ if (features < 0) {
+ rc = -EINVAL;
+ goto out;
+ }
/* each device in dasd= parameter should be set initially online */
features |= DASD_FEATURE_INITIAL_ONLINE;
while (from <= to) {
sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++);
devmap = dasd_add_busid(bus_id, features);
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
+ if (IS_ERR(devmap)) {
+ rc = PTR_ERR(devmap);
+ goto out;
+ }
}
- return 0;
+out:
+ kfree(tmp);
-out_err:
- return -EINVAL;
+ return rc;
}
/*
@@ -735,13 +748,22 @@ static ssize_t
dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
- int ro_flag;
+ struct dasd_device *device;
+ int ro_flag = 0;
devmap = dasd_find_busid(dev_name(dev));
- if (!IS_ERR(devmap))
- ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0;
- else
- ro_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_READONLY) != 0;
+ if (IS_ERR(devmap))
+ goto out;
+
+ ro_flag = !!(devmap->features & DASD_FEATURE_READONLY);
+
+ spin_lock(&dasd_devmap_lock);
+ device = devmap->device;
+ if (device)
+ ro_flag |= test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+ spin_unlock(&dasd_devmap_lock);
+
+out:
return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n");
}
@@ -764,7 +786,7 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
- return PTR_ERR(device);
+ return count;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
@@ -928,11 +950,14 @@ dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
{
struct ccw_device *cdev = to_ccwdev(dev);
struct dasd_device *device;
+ unsigned long flags;
int rc;
- device = dasd_device_from_cdev(cdev);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device)) {
rc = PTR_ERR(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
goto out;
}
@@ -940,12 +965,14 @@ dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
rc = -EBUSY;
goto out;
}
set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
dasd_put_device(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
rc = ccw_device_set_offline(cdev);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 122456e4db89..c3e5ad641b0b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -213,10 +213,8 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
geo->head |= head;
}
-static int
-check_XRC (struct ccw1 *de_ccw,
- struct DE_eckd_data *data,
- struct dasd_device *device)
+static int check_XRC(struct ccw1 *ccw, struct DE_eckd_data *data,
+ struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int rc;
@@ -224,7 +222,7 @@ check_XRC (struct ccw1 *de_ccw,
if (!private->rdc_data.facilities.XRC_supported)
return 0;
- /* switch on System Time Stamp - needed for XRC Support */
+ /* switch on System Time Stamp - needed for XRC Support */
data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
@@ -233,24 +231,30 @@ check_XRC (struct ccw1 *de_ccw,
if (rc == -EOPNOTSUPP || rc == -EACCES)
rc = 0;
- de_ccw->count = sizeof(struct DE_eckd_data);
- de_ccw->flags |= CCW_FLAG_SLI;
+ if (ccw) {
+ ccw->count = sizeof(struct DE_eckd_data);
+ ccw->flags |= CCW_FLAG_SLI;
+ }
+
return rc;
}
static int
define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
- unsigned int totrk, int cmd, struct dasd_device *device)
+ unsigned int totrk, int cmd, struct dasd_device *device,
+ int blksize)
{
struct dasd_eckd_private *private = device->private;
- u32 begcyl, endcyl;
u16 heads, beghead, endhead;
+ u32 begcyl, endcyl;
int rc = 0;
- ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
- ccw->flags = 0;
- ccw->count = 16;
- ccw->cda = (__u32) __pa(data);
+ if (ccw) {
+ ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
+ ccw->flags = 0;
+ ccw->count = 16;
+ ccw->cda = (__u32)__pa(data);
+ }
memset(data, 0, sizeof(struct DE_eckd_data));
switch (cmd) {
@@ -269,18 +273,24 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
data->mask.perm = 0x1;
data->attributes.operation = DASD_BYPASS_CACHE;
break;
+ case DASD_ECKD_CCW_READ_TRACK:
+ case DASD_ECKD_CCW_READ_TRACK_DATA:
+ data->mask.perm = 0x1;
+ data->attributes.operation = private->attrib.operation;
+ data->blk_size = 0;
+ break;
case DASD_ECKD_CCW_WRITE:
case DASD_ECKD_CCW_WRITE_MT:
case DASD_ECKD_CCW_WRITE_KD:
case DASD_ECKD_CCW_WRITE_KD_MT:
data->mask.perm = 0x02;
data->attributes.operation = private->attrib.operation;
- rc = check_XRC (ccw, data, device);
+ rc = check_XRC(ccw, data, device);
break;
case DASD_ECKD_CCW_WRITE_CKD:
case DASD_ECKD_CCW_WRITE_CKD_MT:
data->attributes.operation = DASD_BYPASS_CACHE;
- rc = check_XRC (ccw, data, device);
+ rc = check_XRC(ccw, data, device);
break;
case DASD_ECKD_CCW_ERASE:
case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
@@ -288,7 +298,18 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
data->mask.perm = 0x3;
data->mask.auth = 0x1;
data->attributes.operation = DASD_BYPASS_CACHE;
- rc = check_XRC (ccw, data, device);
+ rc = check_XRC(ccw, data, device);
+ break;
+ case DASD_ECKD_CCW_WRITE_FULL_TRACK:
+ data->mask.perm = 0x03;
+ data->attributes.operation = private->attrib.operation;
+ data->blk_size = 0;
+ break;
+ case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+ data->mask.perm = 0x02;
+ data->attributes.operation = private->attrib.operation;
+ data->blk_size = blksize;
+ rc = check_XRC(ccw, data, device);
break;
default:
dev_err(&device->cdev->dev,
@@ -325,36 +346,26 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
return rc;
}
-static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
- struct dasd_device *device)
-{
- struct dasd_eckd_private *private = device->private;
- int rc;
-
- if (!private->rdc_data.facilities.XRC_supported)
- return 0;
-
- /* switch on System Time Stamp - needed for XRC Support */
- pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid' */
- pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
- pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
-
- rc = get_phys_clock(&pfxdata->define_extent.ep_sys_time);
- /* Ignore return code if sync clock is switched off. */
- if (rc == -EOPNOTSUPP || rc == -EACCES)
- rc = 0;
- return rc;
-}
-static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
- unsigned int rec_on_trk, int count, int cmd,
- struct dasd_device *device, unsigned int reclen,
- unsigned int tlf)
+static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
+ unsigned int trk, unsigned int rec_on_trk,
+ int count, int cmd, struct dasd_device *device,
+ unsigned int reclen, unsigned int tlf)
{
struct dasd_eckd_private *private = device->private;
int sector;
int dn, d;
+ if (ccw) {
+ ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
+ ccw->flags = 0;
+ if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
+ ccw->count = 22;
+ else
+ ccw->count = 20;
+ ccw->cda = (__u32)__pa(data);
+ }
+
memset(data, 0, sizeof(*data));
sector = 0;
if (rec_on_trk) {
@@ -481,14 +492,12 @@ static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
unsigned int trk, unsigned int totrk, int cmd,
struct dasd_device *basedev, struct dasd_device *startdev,
- unsigned char format, unsigned int rec_on_trk, int count,
+ unsigned int format, unsigned int rec_on_trk, int count,
unsigned int blksize, unsigned int tlf)
{
struct dasd_eckd_private *basepriv, *startpriv;
- struct DE_eckd_data *dedata;
struct LRE_eckd_data *lredata;
- u32 begcyl, endcyl;
- u16 heads, beghead, endhead;
+ struct DE_eckd_data *dedata;
int rc = 0;
basepriv = basedev->private;
@@ -527,98 +536,19 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
pfxdata->validity.hyper_pav = 1;
}
- /* define extend data (mostly)*/
- switch (cmd) {
- case DASD_ECKD_CCW_READ_HOME_ADDRESS:
- case DASD_ECKD_CCW_READ_RECORD_ZERO:
- case DASD_ECKD_CCW_READ:
- case DASD_ECKD_CCW_READ_MT:
- case DASD_ECKD_CCW_READ_CKD:
- case DASD_ECKD_CCW_READ_CKD_MT:
- case DASD_ECKD_CCW_READ_KD:
- case DASD_ECKD_CCW_READ_KD_MT:
- dedata->mask.perm = 0x1;
- dedata->attributes.operation = basepriv->attrib.operation;
- break;
- case DASD_ECKD_CCW_READ_COUNT:
- dedata->mask.perm = 0x1;
- dedata->attributes.operation = DASD_BYPASS_CACHE;
- break;
- case DASD_ECKD_CCW_READ_TRACK:
- case DASD_ECKD_CCW_READ_TRACK_DATA:
- dedata->mask.perm = 0x1;
- dedata->attributes.operation = basepriv->attrib.operation;
- dedata->blk_size = 0;
- break;
- case DASD_ECKD_CCW_WRITE:
- case DASD_ECKD_CCW_WRITE_MT:
- case DASD_ECKD_CCW_WRITE_KD:
- case DASD_ECKD_CCW_WRITE_KD_MT:
- dedata->mask.perm = 0x02;
- dedata->attributes.operation = basepriv->attrib.operation;
- rc = check_XRC_on_prefix(pfxdata, basedev);
- break;
- case DASD_ECKD_CCW_WRITE_CKD:
- case DASD_ECKD_CCW_WRITE_CKD_MT:
- dedata->attributes.operation = DASD_BYPASS_CACHE;
- rc = check_XRC_on_prefix(pfxdata, basedev);
- break;
- case DASD_ECKD_CCW_ERASE:
- case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
- case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
- dedata->mask.perm = 0x3;
- dedata->mask.auth = 0x1;
- dedata->attributes.operation = DASD_BYPASS_CACHE;
- rc = check_XRC_on_prefix(pfxdata, basedev);
- break;
- case DASD_ECKD_CCW_WRITE_FULL_TRACK:
- dedata->mask.perm = 0x03;
- dedata->attributes.operation = basepriv->attrib.operation;
- dedata->blk_size = 0;
- break;
- case DASD_ECKD_CCW_WRITE_TRACK_DATA:
- dedata->mask.perm = 0x02;
- dedata->attributes.operation = basepriv->attrib.operation;
- dedata->blk_size = blksize;
- rc = check_XRC_on_prefix(pfxdata, basedev);
- break;
- default:
- DBF_DEV_EVENT(DBF_ERR, basedev,
- "PFX LRE unknown opcode 0x%x", cmd);
- BUG();
- return -EINVAL;
- }
-
- dedata->attributes.mode = 0x3; /* ECKD */
-
- if ((basepriv->rdc_data.cu_type == 0x2105 ||
- basepriv->rdc_data.cu_type == 0x2107 ||
- basepriv->rdc_data.cu_type == 0x1750)
- && !(basepriv->uses_cdl && trk < 2))
- dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
-
- heads = basepriv->rdc_data.trk_per_cyl;
- begcyl = trk / heads;
- beghead = trk % heads;
- endcyl = totrk / heads;
- endhead = totrk % heads;
-
- /* check for sequential prestage - enhance cylinder range */
- if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
- dedata->attributes.operation == DASD_SEQ_ACCESS) {
-
- if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
- endcyl += basepriv->attrib.nr_cyl;
- else
- endcyl = (basepriv->real_cyl - 1);
- }
+ rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
- set_ch_t(&dedata->beg_ext, begcyl, beghead);
- set_ch_t(&dedata->end_ext, endcyl, endhead);
+ /*
+ * For some commands the System Time Stamp is set in the define extent
+ * data when XRC is supported. The validity of the time stamp must be
+ * reflected in the prefix data as well.
+ */
+ if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
+ pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
if (format == 1) {
- fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
- basedev, blksize, tlf);
+ locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
+ basedev, blksize, tlf);
}
return rc;
@@ -1887,7 +1817,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
ccw = cqr->cpaddr;
/* Define extent for the first 3 tracks. */
define_extent(ccw++, cqr->data, 0, 2,
- DASD_ECKD_CCW_READ_COUNT, device);
+ DASD_ECKD_CCW_READ_COUNT, device, 0);
LO_data = cqr->data + sizeof(struct DE_eckd_data);
/* Locate record for the first 4 records on track 0. */
ccw[-1].flags |= CCW_FLAG_CC;
@@ -2266,7 +2196,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
count, 0, 0);
} else {
define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
- DASD_ECKD_CCW_READ_COUNT, startdev);
+ DASD_ECKD_CCW_READ_COUNT, startdev, 0);
data += sizeof(struct DE_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
@@ -2420,7 +2350,7 @@ dasd_eckd_build_format(struct dasd_device *base,
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
- DASD_ECKD_CCW_WRITE_CKD, startdev);
+ DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
/* grant subsystem permission to format R0 */
if (r0_perm)
((struct DE_eckd_data *) data)
@@ -2444,7 +2374,7 @@ dasd_eckd_build_format(struct dasd_device *base,
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
- DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev);
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
data += sizeof(struct DE_eckd_data);
}
ccw[-1].flags |= CCW_FLAG_CC;
@@ -2463,7 +2393,7 @@ dasd_eckd_build_format(struct dasd_device *base,
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
- DASD_ECKD_CCW_WRITE_CKD, startdev);
+ DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
data += sizeof(struct DE_eckd_data);
}
ccw[-1].flags |= CCW_FLAG_CC;
@@ -3187,7 +3117,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
sizeof(struct PFX_eckd_data));
} else {
if (define_extent(ccw++, cqr->data, first_trk,
- last_trk, cmd, basedev) == -EAGAIN) {
+ last_trk, cmd, basedev, 0) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
@@ -3509,12 +3439,19 @@ static int prepare_itcw(struct itcw *itcw,
dedata->mask.perm = 0x02;
dedata->attributes.operation = basepriv->attrib.operation;
dedata->blk_size = blksize;
- rc = check_XRC_on_prefix(&pfxdata, basedev);
+ rc = check_XRC(NULL, dedata, basedev);
dedata->ga_extended |= 0x42;
lredata->operation.orientation = 0x0;
lredata->operation.operation = 0x3F;
lredata->extended_operation = 0x23;
lredata->auxiliary.check_bytes = 0x2;
+ /*
+ * If XRC is supported the System Time Stamp is set. The
+ * validity of the time stamp must be reflected in the prefix
+ * data as well.
+ */
+ if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
+ pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
pfx_cmd = DASD_ECKD_CCW_PFX;
break;
case DASD_ECKD_CCW_READ_COUNT_MT:
@@ -3842,25 +3779,28 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
return cqr;
}
-static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
- struct dasd_block *block,
- struct request *req)
+static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
+ struct dasd_block *block,
+ struct request *req)
{
- unsigned long *idaws;
+ sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
+ unsigned int seg_len, len_to_track_end;
+ unsigned int cidaw, cplength, datasize;
+ sector_t first_trk, last_trk, sectors;
+ struct dasd_eckd_private *base_priv;
struct dasd_device *basedev;
- struct dasd_ccw_req *cqr;
- struct ccw1 *ccw;
struct req_iterator iter;
+ struct dasd_ccw_req *cqr;
+ unsigned int first_offs;
+ unsigned int trkcount;
+ unsigned long *idaws;
+ unsigned int size;
+ unsigned char cmd;
struct bio_vec bv;
+ struct ccw1 *ccw;
+ int use_prefix;
+ void *data;
char *dst;
- unsigned char cmd;
- unsigned int trkcount;
- unsigned int seg_len, len_to_track_end;
- unsigned int first_offs;
- unsigned int cidaw, cplength, datasize;
- sector_t first_trk, last_trk, sectors;
- sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
- unsigned int pfx_datasize;
/*
* raw track access needs to be mutiple of 64k and on 64k boundary
@@ -3878,8 +3818,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
DBF_DEV_EVENT(DBF_ERR, basedev,
"raw write not track aligned (%lu,%lu) req %p",
start_padding_sectors, end_padding_sectors, req);
- cqr = ERR_PTR(-EINVAL);
- goto out;
+ return ERR_PTR(-EINVAL);
}
first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
@@ -3892,10 +3831,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
cmd = DASD_ECKD_CCW_READ_TRACK;
else if (rq_data_dir(req) == WRITE)
cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
- else {
- cqr = ERR_PTR(-EINVAL);
- goto out;
- }
+ else
+ return ERR_PTR(-EINVAL);
/*
* Raw track based I/O needs IDAWs for each page,
@@ -3903,38 +3840,46 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
*/
cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
- /* 1x prefix + one read/write ccw per track */
- cplength = 1 + trkcount;
-
/*
- * struct PFX_eckd_data has up to 2 byte as extended parameter
- * this is needed for write full track and has to be mentioned
- * separately
- * add 8 instead of 2 to keep 8 byte boundary
+ * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
+ * of extended parameter. This is needed for write full track.
*/
- pfx_datasize = sizeof(struct PFX_eckd_data) + 8;
+ base_priv = basedev->private;
+ use_prefix = base_priv->features.feature[8] & 0x01;
+ if (use_prefix) {
+ cplength = 1 + trkcount;
+ size = sizeof(struct PFX_eckd_data) + 2;
+ } else {
+ cplength = 2 + trkcount;
+ size = sizeof(struct DE_eckd_data) +
+ sizeof(struct LRE_eckd_data) + 2;
+ }
+ size = ALIGN(size, 8);
- datasize = pfx_datasize + cidaw * sizeof(unsigned long long);
+ datasize = size + cidaw * sizeof(unsigned long long);
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
datasize, startdev);
if (IS_ERR(cqr))
- goto out;
+ return cqr;
+
ccw = cqr->cpaddr;
+ data = cqr->data;
- if (prefix_LRE(ccw++, cqr->data, first_trk, last_trk, cmd,
- basedev, startdev, 1 /* format */, first_offs + 1,
- trkcount, 0, 0) == -EAGAIN) {
- /* Clock not in sync and XRC is enabled.
- * Try again later.
- */
- dasd_sfree_request(cqr, startdev);
- cqr = ERR_PTR(-EAGAIN);
- goto out;
+ if (use_prefix) {
+ prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
+ startdev, 1, first_offs + 1, trkcount, 0, 0);
+ } else {
+ define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
+ ccw[-1].flags |= CCW_FLAG_CC;
+
+ data += sizeof(struct DE_eckd_data);
+ locate_record_ext(ccw++, data, first_trk, first_offs + 1,
+ trkcount, cmd, basedev, 0, 0);
}
- idaws = (unsigned long *)(cqr->data + pfx_datasize);
+ idaws = (unsigned long *)(cqr->data + size);
len_to_track_end = 0;
if (start_padding_sectors) {
ccw[-1].flags |= CCW_FLAG_CC;
@@ -3984,9 +3929,6 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
- if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
- cqr = NULL;
-out:
return cqr;
}
@@ -4096,7 +4038,7 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
private->count++;
if ((base->features & DASD_FEATURE_USERAW))
- cqr = dasd_raw_build_cp(startdev, block, req);
+ cqr = dasd_eckd_build_cp_raw(startdev, block, req);
else
cqr = dasd_eckd_build_cp(startdev, block, req);
if (IS_ERR(cqr))
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index e2a710c250a5..fb1f537d986a 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -29,6 +29,7 @@
#define DASD_ECKD_CCW_SNID 0x34
#define DASD_ECKD_CCW_RSSD 0x3e
#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
+#define DASD_ECKD_CCW_LOCATE_RECORD_EXT 0x4b
#define DASD_ECKD_CCW_SNSS 0x54
#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
#define DASD_ECKD_CCW_WRITE_MT 0x85
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 88fa7b3f7a9d..68bae4f6bd88 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -853,7 +853,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
unsigned long source_addr;
unsigned long bytes_done;
- blk_queue_split(q, &bio, q->bio_split);
+ blk_queue_split(q, &bio);
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 152de6817875..0071febac9e6 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -13,6 +13,7 @@
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -42,7 +43,6 @@ static void __scm_free_rq(struct scm_request *scmrq)
struct aob_rq_header *aobrq = to_aobrq(scmrq);
free_page((unsigned long) scmrq->aob);
- __scm_free_rq_cluster(scmrq);
kfree(scmrq->request);
kfree(aobrq);
}
@@ -82,9 +82,6 @@ static int __scm_alloc_rq(void)
if (!scmrq->request)
goto free;
- if (__scm_alloc_rq_cluster(scmrq))
- goto free;
-
INIT_LIST_HEAD(&scmrq->list);
spin_lock_irq(&list_lock);
list_add(&scmrq->list, &inactive_requests);
@@ -114,13 +111,13 @@ static struct scm_request *scm_request_fetch(void)
{
struct scm_request *scmrq = NULL;
- spin_lock(&list_lock);
+ spin_lock_irq(&list_lock);
if (list_empty(&inactive_requests))
goto out;
scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
list_del(&scmrq->list);
out:
- spin_unlock(&list_lock);
+ spin_unlock_irq(&list_lock);
return scmrq;
}
@@ -231,140 +228,133 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
aob->request.data = (u64) aobrq;
scmrq->bdev = bdev;
scmrq->retries = 4;
- scmrq->error = 0;
+ scmrq->error = BLK_STS_OK;
/* We don't use all msbs - place aidaws at the end of the aob page. */
scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
- scm_request_cluster_init(scmrq);
}
-static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
-{
- if (atomic_read(&bdev->queued_reqs)) {
- /* Queue restart is triggered by the next interrupt. */
- return;
- }
- blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
-}
-
-void scm_request_requeue(struct scm_request *scmrq)
+static void scm_request_requeue(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
int i;
- scm_release_cluster(scmrq);
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
- blk_requeue_request(bdev->rq, scmrq->request[i]);
+ blk_mq_requeue_request(scmrq->request[i], false);
atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
- scm_ensure_queue_restart(bdev);
+ blk_mq_kick_requeue_list(bdev->rq);
}
-void scm_request_finish(struct scm_request *scmrq)
+static void scm_request_finish(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
int i;
- scm_release_cluster(scmrq);
- for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
- blk_end_request_all(scmrq->request[i], scmrq->error);
+ for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
+ if (scmrq->error)
+ blk_mq_end_request(scmrq->request[i], scmrq->error);
+ else
+ blk_mq_complete_request(scmrq->request[i]);
+ }
atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
}
-static int scm_request_start(struct scm_request *scmrq)
+static void scm_request_start(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
- int ret;
atomic_inc(&bdev->queued_reqs);
- if (!scmrq->aob->request.msb_count) {
- scm_request_requeue(scmrq);
- return -EINVAL;
- }
-
- ret = eadm_start_aob(scmrq->aob);
- if (ret) {
+ if (eadm_start_aob(scmrq->aob)) {
SCM_LOG(5, "no subchannel");
scm_request_requeue(scmrq);
}
- return ret;
}
-static void scm_blk_request(struct request_queue *rq)
+struct scm_queue {
+ struct scm_request *scmrq;
+ spinlock_t lock;
+};
+
+static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *qd)
{
- struct scm_device *scmdev = rq->queuedata;
+ struct scm_device *scmdev = hctx->queue->queuedata;
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
- struct scm_request *scmrq = NULL;
- struct request *req;
+ struct scm_queue *sq = hctx->driver_data;
+ struct request *req = qd->rq;
+ struct scm_request *scmrq;
- while ((req = blk_peek_request(rq))) {
- if (!scm_permit_request(bdev, req))
- goto out;
+ spin_lock(&sq->lock);
+ if (!scm_permit_request(bdev, req)) {
+ spin_unlock(&sq->lock);
+ return BLK_STS_RESOURCE;
+ }
+ scmrq = sq->scmrq;
+ if (!scmrq) {
+ scmrq = scm_request_fetch();
if (!scmrq) {
- scmrq = scm_request_fetch();
- if (!scmrq) {
- SCM_LOG(5, "no request");
- goto out;
- }
- scm_request_init(bdev, scmrq);
+ SCM_LOG(5, "no request");
+ spin_unlock(&sq->lock);
+ return BLK_STS_RESOURCE;
}
- scm_request_set(scmrq, req);
+ scm_request_init(bdev, scmrq);
+ sq->scmrq = scmrq;
+ }
+ scm_request_set(scmrq, req);
- if (!scm_reserve_cluster(scmrq)) {
- SCM_LOG(5, "cluster busy");
- scm_request_set(scmrq, NULL);
- if (scmrq->aob->request.msb_count)
- goto out;
+ if (scm_request_prepare(scmrq)) {
+ SCM_LOG(5, "aidaw alloc failed");
+ scm_request_set(scmrq, NULL);
- scm_request_done(scmrq);
- return;
- }
+ if (scmrq->aob->request.msb_count)
+ scm_request_start(scmrq);
- if (scm_need_cluster_request(scmrq)) {
- if (scmrq->aob->request.msb_count) {
- /* Start cluster requests separately. */
- scm_request_set(scmrq, NULL);
- if (scm_request_start(scmrq))
- return;
- } else {
- atomic_inc(&bdev->queued_reqs);
- blk_start_request(req);
- scm_initiate_cluster_request(scmrq);
- }
- scmrq = NULL;
- continue;
- }
+ sq->scmrq = NULL;
+ spin_unlock(&sq->lock);
+ return BLK_STS_RESOURCE;
+ }
+ blk_mq_start_request(req);
- if (scm_request_prepare(scmrq)) {
- SCM_LOG(5, "aidaw alloc failed");
- scm_request_set(scmrq, NULL);
- goto out;
- }
- blk_start_request(req);
+ if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
+ scm_request_start(scmrq);
+ sq->scmrq = NULL;
+ }
+ spin_unlock(&sq->lock);
+ return BLK_STS_OK;
+}
- if (scmrq->aob->request.msb_count < nr_requests_per_io)
- continue;
+static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int idx)
+{
+ struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
- if (scm_request_start(scmrq))
- return;
+ if (!qd)
+ return -ENOMEM;
- scmrq = NULL;
- }
-out:
- if (scmrq)
- scm_request_start(scmrq);
- else
- scm_ensure_queue_restart(bdev);
+ spin_lock_init(&qd->lock);
+ hctx->driver_data = qd;
+
+ return 0;
+}
+
+static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
+{
+ struct scm_queue *qd = hctx->driver_data;
+
+ WARN_ON(qd->scmrq);
+ kfree(hctx->driver_data);
+ hctx->driver_data = NULL;
}
static void __scmrq_log_error(struct scm_request *scmrq)
{
struct aob *aob = scmrq->aob;
- if (scmrq->error == -ETIMEDOUT)
+ if (scmrq->error == BLK_STS_TIMEOUT)
SCM_LOG(1, "Request timeout");
else {
SCM_LOG(1, "Request error");
@@ -377,27 +367,12 @@ static void __scmrq_log_error(struct scm_request *scmrq)
scmrq->error);
}
-void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
-{
- struct scm_request *scmrq = data;
- struct scm_blk_dev *bdev = scmrq->bdev;
-
- scmrq->error = error;
- if (error)
- __scmrq_log_error(scmrq);
-
- spin_lock(&bdev->lock);
- list_add_tail(&scmrq->list, &bdev->finished_requests);
- spin_unlock(&bdev->lock);
- tasklet_hi_schedule(&bdev->tasklet);
-}
-
static void scm_blk_handle_error(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
unsigned long flags;
- if (scmrq->error != -EIO)
+ if (scmrq->error != BLK_STS_IOERR)
goto restart;
/* For -EIO the response block is valid. */
@@ -419,54 +394,46 @@ restart:
return;
requeue:
- spin_lock_irqsave(&bdev->rq_lock, flags);
scm_request_requeue(scmrq);
- spin_unlock_irqrestore(&bdev->rq_lock, flags);
}
-static void scm_blk_tasklet(struct scm_blk_dev *bdev)
+void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
{
- struct scm_request *scmrq;
- unsigned long flags;
-
- spin_lock_irqsave(&bdev->lock, flags);
- while (!list_empty(&bdev->finished_requests)) {
- scmrq = list_first_entry(&bdev->finished_requests,
- struct scm_request, list);
- list_del(&scmrq->list);
- spin_unlock_irqrestore(&bdev->lock, flags);
+ struct scm_request *scmrq = data;
- if (scmrq->error && scmrq->retries-- > 0) {
+ scmrq->error = error;
+ if (error) {
+ __scmrq_log_error(scmrq);
+ if (scmrq->retries-- > 0) {
scm_blk_handle_error(scmrq);
-
- /* Request restarted or requeued, handle next. */
- spin_lock_irqsave(&bdev->lock, flags);
- continue;
+ return;
}
+ }
- if (scm_test_cluster_request(scmrq)) {
- scm_cluster_request_irq(scmrq);
- spin_lock_irqsave(&bdev->lock, flags);
- continue;
- }
+ scm_request_finish(scmrq);
+}
- scm_request_finish(scmrq);
- spin_lock_irqsave(&bdev->lock, flags);
- }
- spin_unlock_irqrestore(&bdev->lock, flags);
- /* Look out for more requests. */
- blk_run_queue(bdev->rq);
+static void scm_blk_request_done(struct request *req)
+{
+ blk_mq_end_request(req, 0);
}
static const struct block_device_operations scm_blk_devops = {
.owner = THIS_MODULE,
};
+static const struct blk_mq_ops scm_mq_ops = {
+ .queue_rq = scm_blk_request,
+ .complete = scm_blk_request_done,
+ .init_hctx = scm_blk_init_hctx,
+ .exit_hctx = scm_blk_exit_hctx,
+};
+
int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
{
- struct request_queue *rq;
- int len, ret = -ENOMEM;
unsigned int devindex, nr_max_blk;
+ struct request_queue *rq;
+ int len, ret;
devindex = atomic_inc_return(&nr_devices) - 1;
/* scma..scmz + scmaa..scmzz */
@@ -477,18 +444,23 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
bdev->scmdev = scmdev;
bdev->state = SCM_OPER;
- spin_lock_init(&bdev->rq_lock);
spin_lock_init(&bdev->lock);
- INIT_LIST_HEAD(&bdev->finished_requests);
atomic_set(&bdev->queued_reqs, 0);
- tasklet_init(&bdev->tasklet,
- (void (*)(unsigned long)) scm_blk_tasklet,
- (unsigned long) bdev);
- rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
- if (!rq)
+ bdev->tag_set.ops = &scm_mq_ops;
+ bdev->tag_set.nr_hw_queues = nr_requests;
+ bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
+ bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+
+ ret = blk_mq_alloc_tag_set(&bdev->tag_set);
+ if (ret)
goto out;
+ rq = blk_mq_init_queue(&bdev->tag_set);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out_tag;
+ }
bdev->rq = rq;
nr_max_blk = min(scmdev->nr_max_block,
(unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
@@ -498,12 +470,12 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
blk_queue_max_segments(rq, nr_max_blk);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
- scm_blk_dev_cluster_setup(bdev);
bdev->gendisk = alloc_disk(SCM_NR_PARTS);
- if (!bdev->gendisk)
+ if (!bdev->gendisk) {
+ ret = -ENOMEM;
goto out_queue;
-
+ }
rq->queuedata = scmdev;
bdev->gendisk->private_data = scmdev;
bdev->gendisk->fops = &scm_blk_devops;
@@ -528,6 +500,8 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
out_queue:
blk_cleanup_queue(rq);
+out_tag:
+ blk_mq_free_tag_set(&bdev->tag_set);
out:
atomic_dec(&nr_devices);
return ret;
@@ -535,9 +509,9 @@ out:
void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
{
- tasklet_kill(&bdev->tasklet);
del_gendisk(bdev->gendisk);
blk_cleanup_queue(bdev->gendisk->queue);
+ blk_mq_free_tag_set(&bdev->tag_set);
put_disk(bdev->gendisk);
}
@@ -558,7 +532,7 @@ static bool __init scm_blk_params_valid(void)
if (!nr_requests_per_io || nr_requests_per_io > 64)
return false;
- return scm_cluster_size_valid();
+ return true;
}
static int __init scm_blk_init(void)
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 09218cdc5129..71288dd9dd7f 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -4,6 +4,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/genhd.h>
#include <linux/list.h>
@@ -14,18 +15,14 @@
#define SCM_QUEUE_DELAY 5
struct scm_blk_dev {
- struct tasklet_struct tasklet;
struct request_queue *rq;
struct gendisk *gendisk;
+ struct blk_mq_tag_set tag_set;
struct scm_device *scmdev;
- spinlock_t rq_lock; /* guard the request queue */
- spinlock_t lock; /* guard the rest of the blockdev */
+ spinlock_t lock;
atomic_t queued_reqs;
enum {SCM_OPER, SCM_WR_PROHIBIT} state;
struct list_head finished_requests;
-#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
- struct list_head cluster_list;
-#endif
};
struct scm_request {
@@ -35,14 +32,7 @@ struct scm_request {
struct aob *aob;
struct list_head list;
u8 retries;
- int error;
-#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
- struct {
- enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
- struct list_head list;
- void **buf;
- } cluster;
-#endif
+ blk_status_t error;
};
#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
@@ -50,57 +40,13 @@ struct scm_request {
int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
void scm_blk_dev_cleanup(struct scm_blk_dev *);
void scm_blk_set_available(struct scm_blk_dev *);
-void scm_blk_irq(struct scm_device *, void *, int);
-
-void scm_request_finish(struct scm_request *);
-void scm_request_requeue(struct scm_request *);
+void scm_blk_irq(struct scm_device *, void *, blk_status_t);
struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
int scm_drv_init(void);
void scm_drv_cleanup(void);
-#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
-void __scm_free_rq_cluster(struct scm_request *);
-int __scm_alloc_rq_cluster(struct scm_request *);
-void scm_request_cluster_init(struct scm_request *);
-bool scm_reserve_cluster(struct scm_request *);
-void scm_release_cluster(struct scm_request *);
-void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
-bool scm_need_cluster_request(struct scm_request *);
-void scm_initiate_cluster_request(struct scm_request *);
-void scm_cluster_request_irq(struct scm_request *);
-bool scm_test_cluster_request(struct scm_request *);
-bool scm_cluster_size_valid(void);
-#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
-static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
-static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
-{
- return 0;
-}
-static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
-static inline bool scm_reserve_cluster(struct scm_request *scmrq)
-{
- return true;
-}
-static inline void scm_release_cluster(struct scm_request *scmrq) {}
-static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
-static inline bool scm_need_cluster_request(struct scm_request *scmrq)
-{
- return false;
-}
-static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
-static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
-static inline bool scm_test_cluster_request(struct scm_request *scmrq)
-{
- return false;
-}
-static inline bool scm_cluster_size_valid(void)
-{
- return true;
-}
-#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
-
extern debug_info_t *scm_debug;
#define SCM_LOG(imp, txt) do { \
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
deleted file mode 100644
index 7497ddde2dd6..000000000000
--- a/drivers/s390/block/scm_blk_cluster.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Block driver for s390 storage class memory.
- *
- * Copyright IBM Corp. 2012
- * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
- */
-
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/genhd.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <asm/eadm.h>
-#include "scm_blk.h"
-
-static unsigned int write_cluster_size = 64;
-module_param(write_cluster_size, uint, S_IRUGO);
-MODULE_PARM_DESC(write_cluster_size,
- "Number of pages used for contiguous writes.");
-
-#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
-
-void __scm_free_rq_cluster(struct scm_request *scmrq)
-{
- int i;
-
- if (!scmrq->cluster.buf)
- return;
-
- for (i = 0; i < 2 * write_cluster_size; i++)
- free_page((unsigned long) scmrq->cluster.buf[i]);
-
- kfree(scmrq->cluster.buf);
-}
-
-int __scm_alloc_rq_cluster(struct scm_request *scmrq)
-{
- int i;
-
- scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
- GFP_KERNEL);
- if (!scmrq->cluster.buf)
- return -ENOMEM;
-
- for (i = 0; i < 2 * write_cluster_size; i++) {
- scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
- if (!scmrq->cluster.buf[i])
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&scmrq->cluster.list);
- return 0;
-}
-
-void scm_request_cluster_init(struct scm_request *scmrq)
-{
- scmrq->cluster.state = CLUSTER_NONE;
-}
-
-static bool clusters_intersect(struct request *A, struct request *B)
-{
- unsigned long firstA, lastA, firstB, lastB;
-
- firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE;
- lastA = (((u64) blk_rq_pos(A) << 9) +
- blk_rq_bytes(A) - 1) / CLUSTER_SIZE;
-
- firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE;
- lastB = (((u64) blk_rq_pos(B) << 9) +
- blk_rq_bytes(B) - 1) / CLUSTER_SIZE;
-
- return (firstB <= lastA && firstA <= lastB);
-}
-
-bool scm_reserve_cluster(struct scm_request *scmrq)
-{
- struct request *req = scmrq->request[scmrq->aob->request.msb_count];
- struct scm_blk_dev *bdev = scmrq->bdev;
- struct scm_request *iter;
- int pos, add = 1;
-
- if (write_cluster_size == 0)
- return true;
-
- spin_lock(&bdev->lock);
- list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
- if (iter == scmrq) {
- /*
- * We don't have to use clusters_intersect here, since
- * cluster requests are always started separately.
- */
- add = 0;
- continue;
- }
- for (pos = 0; pos < iter->aob->request.msb_count; pos++) {
- if (clusters_intersect(req, iter->request[pos]) &&
- (rq_data_dir(req) == WRITE ||
- rq_data_dir(iter->request[pos]) == WRITE)) {
- spin_unlock(&bdev->lock);
- return false;
- }
- }
- }
- if (add)
- list_add(&scmrq->cluster.list, &bdev->cluster_list);
- spin_unlock(&bdev->lock);
-
- return true;
-}
-
-void scm_release_cluster(struct scm_request *scmrq)
-{
- struct scm_blk_dev *bdev = scmrq->bdev;
- unsigned long flags;
-
- if (write_cluster_size == 0)
- return;
-
- spin_lock_irqsave(&bdev->lock, flags);
- list_del(&scmrq->cluster.list);
- spin_unlock_irqrestore(&bdev->lock, flags);
-}
-
-void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
-{
- INIT_LIST_HEAD(&bdev->cluster_list);
- blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
-}
-
-static int scm_prepare_cluster_request(struct scm_request *scmrq)
-{
- struct scm_blk_dev *bdev = scmrq->bdev;
- struct scm_device *scmdev = bdev->gendisk->private_data;
- struct request *req = scmrq->request[0];
- struct msb *msb = &scmrq->aob->msb[0];
- struct req_iterator iter;
- struct aidaw *aidaw;
- struct bio_vec bv;
- int i = 0;
- u64 addr;
-
- switch (scmrq->cluster.state) {
- case CLUSTER_NONE:
- scmrq->cluster.state = CLUSTER_READ;
- /* fall through */
- case CLUSTER_READ:
- msb->bs = MSB_BS_4K;
- msb->oc = MSB_OC_READ;
- msb->flags = MSB_FLAG_IDA;
- msb->blk_count = write_cluster_size;
-
- addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
- msb->scm_addr = round_down(addr, CLUSTER_SIZE);
-
- if (msb->scm_addr !=
- round_down(addr + (u64) blk_rq_bytes(req) - 1,
- CLUSTER_SIZE))
- msb->blk_count = 2 * write_cluster_size;
-
- aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE);
- if (!aidaw)
- return -ENOMEM;
-
- scmrq->aob->request.msb_count = 1;
- msb->data_addr = (u64) aidaw;
- for (i = 0; i < msb->blk_count; i++) {
- aidaw->data_addr = (u64) scmrq->cluster.buf[i];
- aidaw++;
- }
-
- break;
- case CLUSTER_WRITE:
- aidaw = (void *) msb->data_addr;
- msb->oc = MSB_OC_WRITE;
-
- for (addr = msb->scm_addr;
- addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
- addr += PAGE_SIZE) {
- aidaw->data_addr = (u64) scmrq->cluster.buf[i];
- aidaw++;
- i++;
- }
- rq_for_each_segment(bv, req, iter) {
- aidaw->data_addr = (u64) page_address(bv.bv_page);
- aidaw++;
- i++;
- }
- for (; i < msb->blk_count; i++) {
- aidaw->data_addr = (u64) scmrq->cluster.buf[i];
- aidaw++;
- }
- break;
- }
- return 0;
-}
-
-bool scm_need_cluster_request(struct scm_request *scmrq)
-{
- int pos = scmrq->aob->request.msb_count;
-
- if (rq_data_dir(scmrq->request[pos]) == READ)
- return false;
-
- return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE;
-}
-
-/* Called with queue lock held. */
-void scm_initiate_cluster_request(struct scm_request *scmrq)
-{
- if (scm_prepare_cluster_request(scmrq))
- goto requeue;
- if (eadm_start_aob(scmrq->aob))
- goto requeue;
- return;
-requeue:
- scm_request_requeue(scmrq);
-}
-
-bool scm_test_cluster_request(struct scm_request *scmrq)
-{
- return scmrq->cluster.state != CLUSTER_NONE;
-}
-
-void scm_cluster_request_irq(struct scm_request *scmrq)
-{
- struct scm_blk_dev *bdev = scmrq->bdev;
- unsigned long flags;
-
- switch (scmrq->cluster.state) {
- case CLUSTER_NONE:
- BUG();
- break;
- case CLUSTER_READ:
- if (scmrq->error) {
- scm_request_finish(scmrq);
- break;
- }
- scmrq->cluster.state = CLUSTER_WRITE;
- spin_lock_irqsave(&bdev->rq_lock, flags);
- scm_initiate_cluster_request(scmrq);
- spin_unlock_irqrestore(&bdev->rq_lock, flags);
- break;
- case CLUSTER_WRITE:
- scm_request_finish(scmrq);
- break;
- }
-}
-
-bool scm_cluster_size_valid(void)
-{
- if (write_cluster_size == 1 || write_cluster_size > 128)
- return false;
-
- return !(write_cluster_size & (write_cluster_size - 1));
-}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index b9d7e755c8a3..a48f0d40c1d2 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -190,7 +190,7 @@ static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
unsigned long page_addr;
unsigned long bytes;
- blk_queue_split(q, &bio, q->bio_split);
+ blk_queue_split(q, &bio);
if ((bio->bi_iter.bi_sector & 7) != 0 ||
(bio->bi_iter.bi_size & 4095) != 0)