aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/Kconfig2
-rw-r--r--drivers/s390/block/dasd.c233
-rw-r--r--drivers/s390/block/dasd_devmap.c72
-rw-r--r--drivers/s390/block/dasd_diag.c22
-rw-r--r--drivers/s390/block/dasd_eckd.c968
-rw-r--r--drivers/s390/block/dasd_eckd.h175
-rw-r--r--drivers/s390/block/dasd_eer.c1
-rw-r--r--drivers/s390/block/dasd_fba.c45
-rw-r--r--drivers/s390/block/dasd_fba.h5
-rw-r--r--drivers/s390/block/dasd_int.h33
-rw-r--r--drivers/s390/block/dasd_ioctl.c56
-rw-r--r--drivers/s390/block/dcssblk.c1
-rw-r--r--drivers/s390/char/Kconfig22
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/fs3270.c2
-rw-r--r--drivers/s390/char/sclp.c14
-rw-r--r--drivers/s390/char/sclp.h10
-rw-r--r--drivers/s390/char/sclp_async.c189
-rw-r--r--drivers/s390/char/sclp_early.c4
-rw-r--r--drivers/s390/char/sclp_early_core.c20
-rw-r--r--drivers/s390/char/sclp_sdias.c74
-rw-r--r--drivers/s390/char/tape_char.c2
-rw-r--r--drivers/s390/char/zcore.c26
-rw-r--r--drivers/s390/cio/Makefile3
-rw-r--r--drivers/s390/cio/airq.c52
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/ccwreq.c9
-rw-r--r--drivers/s390/cio/chsc.c30
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/cio.h7
-rw-r--r--drivers/s390/cio/css.c191
-rw-r--r--drivers/s390/cio/device.c78
-rw-r--r--drivers/s390/cio/device_fsm.c49
-rw-r--r--drivers/s390/cio/device_id.c20
-rw-r--r--drivers/s390/cio/device_ops.c21
-rw-r--r--drivers/s390/cio/device_pgid.c22
-rw-r--r--drivers/s390/cio/device_status.c24
-rw-r--r--drivers/s390/cio/io_sch.h20
-rw-r--r--drivers/s390/cio/ioasm.c1
-rw-r--r--drivers/s390/cio/qdio.h6
-rw-r--r--drivers/s390/cio/qdio_debug.c9
-rw-r--r--drivers/s390/cio/qdio_main.c231
-rw-r--r--drivers/s390/cio/qdio_setup.c4
-rw-r--r--drivers/s390/cio/qdio_thinint.c10
-rw-r--r--drivers/s390/cio/scm.c4
-rw-r--r--drivers/s390/cio/trace.c1
-rw-r--r--drivers/s390/cio/trace.h23
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c88
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c539
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h9
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c95
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c143
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c227
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h48
-rw-r--r--drivers/s390/crypto/ap_bus.c39
-rw-r--r--drivers/s390/crypto/ap_bus.h3
-rw-r--r--drivers/s390/crypto/pkey_api.c8
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c34
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c379
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h15
-rw-r--r--drivers/s390/crypto/zcrypt_api.c23
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c4
-rw-r--r--drivers/s390/net/Kconfig8
-rw-r--r--drivers/s390/net/ism.h29
-rw-r--r--drivers/s390/net/ism_drv.c20
-rw-r--r--drivers/s390/net/qeth_core.h236
-rw-r--r--drivers/s390/net/qeth_core_main.c1768
-rw-r--r--drivers/s390/net/qeth_core_mpc.h53
-rw-r--r--drivers/s390/net/qeth_core_sys.c10
-rw-r--r--drivers/s390/net/qeth_ethtool.c17
-rw-r--r--drivers/s390/net/qeth_l2_main.c367
-rw-r--r--drivers/s390/net/qeth_l3_main.c472
-rw-r--r--drivers/s390/net/qeth_l3_sys.c26
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fc.c4
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c9
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c55
-rw-r--r--drivers/s390/scsi/zfcp_unit.c8
-rw-r--r--drivers/s390/virtio/virtio_ccw.c280
80 files changed, 4992 insertions, 2827 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 9ac7574e3cfb..a8682f69effc 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -38,7 +38,7 @@ config DASD_PROFILE
depends on DASD
help
Enable this option if you want to see profiling information
- in /proc/dasd/statistics.
+ in /proc/dasd/statistics.
config DASD_ECKD
def_tristate y
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e03304fe25bb..6cca72782af6 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -70,7 +70,6 @@ MODULE_LICENSE("GPL");
* SECTION: prototypes for static functions of dasd.c
*/
static int dasd_alloc_queue(struct dasd_block *);
-static void dasd_setup_queue(struct dasd_block *);
static void dasd_free_queue(struct dasd_block *);
static int dasd_flush_block_queue(struct dasd_block *);
static void dasd_device_tasklet(unsigned long);
@@ -120,9 +119,18 @@ struct dasd_device *dasd_alloc_device(void)
kfree(device);
return ERR_PTR(-ENOMEM);
}
+ /* Get two pages for ese format. */
+ device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
+ if (!device->ese_mem) {
+ free_page((unsigned long) device->erp_mem);
+ free_pages((unsigned long) device->ccw_mem, 1);
+ kfree(device);
+ return ERR_PTR(-ENOMEM);
+ }
dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
+ dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
spin_lock_init(&device->mem_lock);
atomic_set(&device->tasklet_scheduled, 0);
tasklet_init(&device->tasklet, dasd_device_tasklet,
@@ -146,6 +154,7 @@ struct dasd_device *dasd_alloc_device(void)
void dasd_free_device(struct dasd_device *device)
{
kfree(device->private);
+ free_pages((unsigned long) device->ese_mem, 1);
free_page((unsigned long) device->erp_mem);
free_pages((unsigned long) device->ccw_mem, 1);
kfree(device);
@@ -348,7 +357,8 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
}
return rc;
}
- dasd_setup_queue(block);
+ if (device->discipline->setup_blk_queue)
+ device->discipline->setup_blk_queue(block);
set_capacity(block->gdp,
block->blocks << block->s2b_shift);
device->state = DASD_STATE_READY;
@@ -1258,6 +1268,49 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
}
EXPORT_SYMBOL(dasd_smalloc_request);
+struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
+ int datasize,
+ struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ unsigned long flags;
+ int size, cqr_size;
+ char *data;
+
+ cqr_size = (sizeof(*cqr) + 7L) & -8L;
+ size = cqr_size;
+ if (cplength > 0)
+ size += cplength * sizeof(struct ccw1);
+ if (datasize > 0)
+ size += datasize;
+
+ spin_lock_irqsave(&device->mem_lock, flags);
+ cqr = dasd_alloc_chunk(&device->ese_chunks, size);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ if (!cqr)
+ return ERR_PTR(-ENOMEM);
+ memset(cqr, 0, sizeof(*cqr));
+ data = (char *)cqr + cqr_size;
+ cqr->cpaddr = NULL;
+ if (cplength > 0) {
+ cqr->cpaddr = data;
+ data += cplength * sizeof(struct ccw1);
+ memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
+ }
+ cqr->data = NULL;
+ if (datasize > 0) {
+ cqr->data = data;
+ memset(cqr->data, 0, datasize);
+ }
+
+ cqr->magic = magic;
+ set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ dasd_get_device(device);
+
+ return cqr;
+}
+EXPORT_SYMBOL(dasd_fmalloc_request);
+
void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
{
unsigned long flags;
@@ -1269,6 +1322,17 @@ void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
}
EXPORT_SYMBOL(dasd_sfree_request);
+void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&device->mem_lock, flags);
+ dasd_free_chunk(&device->ese_chunks, cqr);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_ffree_request);
+
/*
* Check discipline magic in cqr.
*/
@@ -1573,13 +1637,43 @@ static int dasd_check_hpf_error(struct irb *irb)
irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
}
+static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
+{
+ struct dasd_device *device = NULL;
+ u8 *sense = NULL;
+
+ if (!block)
+ return 0;
+ device = block->base;
+ if (!device || !device->discipline->is_ese)
+ return 0;
+ if (!device->discipline->is_ese(device))
+ return 0;
+
+ sense = dasd_get_sense(irb);
+ if (!sense)
+ return 0;
+
+ return !!(sense[1] & SNS1_NO_REC_FOUND) ||
+ !!(sense[1] & SNS1_FILE_PROTECTED) ||
+ scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
+}
+
+static int dasd_ese_oos_cond(u8 *sense)
+{
+ return sense[0] & SNS0_EQUIPMENT_CHECK &&
+ sense[1] & SNS1_PERM_ERR &&
+ sense[1] & SNS1_WRITE_INHIBITED &&
+ sense[25] == 0x01;
+}
+
/*
* Interrupt handler for "normal" ssch-io based dasd devices.
*/
void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
- struct dasd_ccw_req *cqr, *next;
+ struct dasd_ccw_req *cqr, *next, *fcqr;
struct dasd_device *device;
unsigned long now;
int nrf_suppressed = 0;
@@ -1641,6 +1735,17 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+
+ /*
+ * Extent pool probably out-of-space.
+ * Stop device and check exhaust level.
+ */
+ if (dasd_ese_oos_cond(sense)) {
+ dasd_generic_space_exhaust(device, cqr);
+ device->discipline->ext_pool_exhaust(device, cqr);
+ dasd_put_device(device);
+ return;
+ }
}
if (!(fp_suppressed || nrf_suppressed))
device->discipline->dump_sense_dbf(device, irb, "int");
@@ -1672,6 +1777,31 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
+ if (dasd_ese_needs_format(cqr->block, irb)) {
+ if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
+ device->discipline->ese_read(cqr);
+ cqr->status = DASD_CQR_SUCCESS;
+ cqr->stopclk = now;
+ dasd_device_clear_timer(device);
+ dasd_schedule_device_bh(device);
+ return;
+ }
+ fcqr = device->discipline->ese_format(device, cqr);
+ if (IS_ERR(fcqr)) {
+ /*
+ * If we can't format now, let the request go
+ * one extra round. Maybe we can format later.
+ */
+ cqr->status = DASD_CQR_QUEUED;
+ } else {
+ fcqr->status = DASD_CQR_QUEUED;
+ cqr->status = DASD_CQR_QUEUED;
+ list_add(&fcqr->devlist, &device->ccw_queue);
+ dasd_schedule_device_bh(device);
+ return;
+ }
+ }
+
/* Check for clear pending */
if (cqr->status == DASD_CQR_CLEAR_PENDING &&
scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
@@ -1910,7 +2040,7 @@ static void __dasd_device_check_expire(struct dasd_device *device)
static int __dasd_device_is_unusable(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
- int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
+ int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC);
if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
@@ -2412,6 +2542,15 @@ int dasd_sleep_on_queue(struct list_head *ccw_queue)
EXPORT_SYMBOL(dasd_sleep_on_queue);
/*
+ * Start requests from a ccw_queue and wait interruptible for their completion.
+ */
+int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
+{
+ return _dasd_sleep_on_queue(ccw_queue, 1);
+}
+EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
+
+/*
* Queue a request to the tail of the device ccw_queue and wait
* interruptible for it's completion.
*/
@@ -3130,55 +3269,6 @@ static int dasd_alloc_queue(struct dasd_block *block)
}
/*
- * Allocate and initialize request queue.
- */
-static void dasd_setup_queue(struct dasd_block *block)
-{
- unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->request_queue;
- unsigned int max_bytes, max_discard_sectors;
- int max;
-
- if (block->base->features & DASD_FEATURE_USERAW) {
- /*
- * the max_blocks value for raw_track access is 256
- * it is higher than the native ECKD value because we
- * only need one ccw per track
- * so the max_hw_sectors are
- * 2048 x 512B = 1024kB = 16 tracks
- */
- max = 2048;
- } else {
- max = block->base->discipline->max_blocks << block->s2b_shift;
- }
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- q->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(q, logical_block_size);
- blk_queue_max_hw_sectors(q, max);
- blk_queue_max_segments(q, USHRT_MAX);
- /* with page sized segments we can translate each segement into
- * one idaw/tidaw
- */
- blk_queue_max_segment_size(q, PAGE_SIZE);
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
-
- /* Only activate blocklayer discard support for devices that support it */
- if (block->base->features & DASD_FEATURE_DISCARD) {
- q->limits.discard_granularity = logical_block_size;
- q->limits.discard_alignment = PAGE_SIZE;
-
- /* Calculate max_discard_sectors and make it PAGE aligned */
- max_bytes = USHRT_MAX * logical_block_size;
- max_bytes = ALIGN(max_bytes, PAGE_SIZE) - PAGE_SIZE;
- max_discard_sectors = max_bytes / logical_block_size;
-
- blk_queue_max_discard_sectors(q, max_discard_sectors);
- blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- }
-}
-
-/*
* Deactivate and free request queue.
*/
static void dasd_free_queue(struct dasd_block *block)
@@ -3806,6 +3896,43 @@ int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
}
EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
+void dasd_generic_space_exhaust(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ dasd_eer_write(device, NULL, DASD_EER_NOSPC);
+
+ if (device->state < DASD_STATE_BASIC)
+ return;
+
+ if (cqr->status == DASD_CQR_IN_IO ||
+ cqr->status == DASD_CQR_CLEAR_PENDING) {
+ cqr->status = DASD_CQR_QUEUED;
+ cqr->retries++;
+ }
+ dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
+ dasd_device_clear_timer(device);
+ dasd_schedule_device_bh(device);
+}
+EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
+
+void dasd_generic_space_avail(struct dasd_device *device)
+{
+ dev_info(&device->cdev->dev, "Extent pool space is available\n");
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
+
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
+ dasd_schedule_device_bh(device);
+
+ if (device->block) {
+ dasd_schedule_block_bh(device->block);
+ if (device->block->request_queue)
+ blk_mq_run_hw_queues(device->block->request_queue, true);
+ }
+ if (!device->stopped)
+ wake_up(&generic_waitq);
+}
+EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
+
/*
* clear active requests and requeue them to block layer if possible
*/
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index fab35c6170cc..32fc51341d99 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -203,7 +203,7 @@ static int __init dasd_feature_list(char *str)
else if (len == 8 && !strncmp(str, "failfast", 8))
features |= DASD_FEATURE_FAILFAST;
else {
- pr_warn("%*s is not a supported device option\n",
+ pr_warn("%.*s is not a supported device option\n",
len, str);
rc = -EINVAL;
}
@@ -1642,6 +1642,35 @@ static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show,
dasd_path_interval_store);
+#define DASD_DEFINE_ATTR(_name, _func) \
+static ssize_t dasd_##_name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct ccw_device *cdev = to_ccwdev(dev); \
+ struct dasd_device *device = dasd_device_from_cdev(cdev); \
+ int val = 0; \
+ \
+ if (IS_ERR(device)) \
+ return -ENODEV; \
+ if (device->discipline && _func) \
+ val = _func(device); \
+ dasd_put_device(device); \
+ \
+ return snprintf(buf, PAGE_SIZE, "%d\n", val); \
+} \
+static DEVICE_ATTR(_name, 0444, dasd_##_name##_show, NULL); \
+
+DASD_DEFINE_ATTR(ese, device->discipline->is_ese);
+DASD_DEFINE_ATTR(extent_size, device->discipline->ext_size);
+DASD_DEFINE_ATTR(pool_id, device->discipline->ext_pool_id);
+DASD_DEFINE_ATTR(space_configured, device->discipline->space_configured);
+DASD_DEFINE_ATTR(space_allocated, device->discipline->space_allocated);
+DASD_DEFINE_ATTR(logical_capacity, device->discipline->logical_capacity);
+DASD_DEFINE_ATTR(warn_threshold, device->discipline->ext_pool_warn_thrshld);
+DASD_DEFINE_ATTR(cap_at_warnlevel, device->discipline->ext_pool_cap_at_warnlevel);
+DASD_DEFINE_ATTR(pool_oos, device->discipline->ext_pool_oos);
+
static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr,
&dev_attr_discipline.attr,
@@ -1667,6 +1696,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_path_interval.attr,
&dev_attr_path_reset.attr,
&dev_attr_hpf.attr,
+ &dev_attr_ese.attr,
NULL,
};
@@ -1674,6 +1704,39 @@ static const struct attribute_group dasd_attr_group = {
.attrs = dasd_attrs,
};
+static struct attribute *capacity_attrs[] = {
+ &dev_attr_space_configured.attr,
+ &dev_attr_space_allocated.attr,
+ &dev_attr_logical_capacity.attr,
+ NULL,
+};
+
+static const struct attribute_group capacity_attr_group = {
+ .name = "capacity",
+ .attrs = capacity_attrs,
+};
+
+static struct attribute *ext_pool_attrs[] = {
+ &dev_attr_pool_id.attr,
+ &dev_attr_extent_size.attr,
+ &dev_attr_warn_threshold.attr,
+ &dev_attr_cap_at_warnlevel.attr,
+ &dev_attr_pool_oos.attr,
+ NULL,
+};
+
+static const struct attribute_group ext_pool_attr_group = {
+ .name = "extent_pool",
+ .attrs = ext_pool_attrs,
+};
+
+static const struct attribute_group *dasd_attr_groups[] = {
+ &dasd_attr_group,
+ &capacity_attr_group,
+ &ext_pool_attr_group,
+ NULL,
+};
+
/*
* Return value of the specified feature.
*/
@@ -1715,16 +1778,15 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
EXPORT_SYMBOL(dasd_set_feature);
-int
-dasd_add_sysfs_files(struct ccw_device *cdev)
+int dasd_add_sysfs_files(struct ccw_device *cdev)
{
- return sysfs_create_group(&cdev->dev.kobj, &dasd_attr_group);
+ return sysfs_create_groups(&cdev->dev.kobj, dasd_attr_groups);
}
void
dasd_remove_sysfs_files(struct ccw_device *cdev)
{
- sysfs_remove_group(&cdev->dev.kobj, &dasd_attr_group);
+ sysfs_remove_groups(&cdev->dev.kobj, dasd_attr_groups);
}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index e1fe02477ea8..8d4971645cf1 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -615,14 +615,34 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
"dump sense not available for DIAG data");
}
+/*
+ * Initialize block layer request queue.
+ */
+static void dasd_diag_setup_blk_queue(struct dasd_block *block)
+{
+ unsigned int logical_block_size = block->bp_block;
+ struct request_queue *q = block->request_queue;
+ int max;
+
+ max = DIAG_MAX_BLOCKS << block->s2b_shift;
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ q->limits.max_dev_sectors = max;
+ blk_queue_logical_block_size(q, logical_block_size);
+ blk_queue_max_hw_sectors(q, max);
+ blk_queue_max_segments(q, USHRT_MAX);
+ /* With page sized segments each segment can be translated into one idaw/tidaw */
+ blk_queue_max_segment_size(q, PAGE_SIZE);
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+}
+
static struct dasd_discipline dasd_diag_discipline = {
.owner = THIS_MODULE,
.name = "DIAG",
.ebcname = "DIAG",
- .max_blocks = DIAG_MAX_BLOCKS,
.check_device = dasd_diag_check_device,
.verify_path = dasd_generic_verify_path,
.fill_geometry = dasd_diag_fill_geometry,
+ .setup_blk_queue = dasd_diag_setup_blk_queue,
.start_IO = dasd_start_diag,
.term_IO = dasd_diag_term_IO,
.handle_terminated_request = dasd_diag_handle_terminated_request,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index f89f9d02e788..fc53e1e221f0 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -42,20 +42,6 @@
#endif /* PRINTK_HEADER */
#define PRINTK_HEADER "dasd(eckd):"
-#define ECKD_C0(i) (i->home_bytes)
-#define ECKD_F(i) (i->formula)
-#define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
- (i->factors.f_0x02.f1))
-#define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
- (i->factors.f_0x02.f2))
-#define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
- (i->factors.f_0x02.f3))
-#define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
-#define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
-#define ECKD_F6(i) (i->factor6)
-#define ECKD_F7(i) (i->factor7)
-#define ECKD_F8(i) (i->factor8)
-
/*
* raw track access always map to 64k in memory
* so it maps to 16 blocks of 4k per track
@@ -103,6 +89,19 @@ static struct {
} *dasd_reserve_req;
static DEFINE_MUTEX(dasd_reserve_mutex);
+static struct {
+ struct dasd_ccw_req cqr;
+ struct ccw1 ccw[2];
+ char data[40];
+} *dasd_vol_info_req;
+static DEFINE_MUTEX(dasd_vol_info_mutex);
+
+struct ext_pool_exhaust_work_data {
+ struct work_struct worker;
+ struct dasd_device *device;
+ struct dasd_device *base;
+};
+
/* definitions for the path verification worker */
struct path_verification_work_data {
struct work_struct worker;
@@ -122,6 +121,7 @@ struct check_attention_work_data {
__u8 lpum;
};
+static int dasd_eckd_ext_pool_id(struct dasd_device *);
static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
struct dasd_device *, struct dasd_device *,
unsigned int, int, unsigned int, unsigned int,
@@ -157,17 +157,10 @@ static const int sizes_trk0[] = { 28, 148, 84 };
#define LABEL_SIZE 140
/* head and record addresses of count_area read in analysis ccw */
-static const int count_area_head[] = { 0, 0, 0, 0, 2 };
+static const int count_area_head[] = { 0, 0, 0, 0, 1 };
static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
static inline unsigned int
-round_up_multiple(unsigned int no, unsigned int mult)
-{
- int rem = no % mult;
- return (rem ? no - rem + mult : no);
-}
-
-static inline unsigned int
ceil_quot(unsigned int d1, unsigned int d2)
{
return (d1 + (d2 - 1)) / d2;
@@ -1491,6 +1484,311 @@ static int dasd_eckd_read_features(struct dasd_device *device)
return rc;
}
+/* Read Volume Information - Volume Storage Query */
+static int dasd_eckd_read_vol_info(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_rssd_vsq *vsq;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int useglobal;
+ int rc;
+
+ /* This command cannot be executed on an alias device */
+ if (private->uid.type == UA_BASE_PAV_ALIAS ||
+ private->uid.type == UA_HYPER_PAV_ALIAS)
+ return 0;
+
+ useglobal = 0;
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
+ sizeof(*prssdp) + sizeof(*vsq), device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate initialization request");
+ mutex_lock(&dasd_vol_info_mutex);
+ useglobal = 1;
+ cqr = &dasd_vol_info_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
+ cqr->cpaddr = &dasd_vol_info_req->ccw;
+ cqr->data = &dasd_vol_info_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
+ }
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = cqr->data;
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
+ prssdp->lss = private->ned->ID;
+ prssdp->volume = private->ned->unit_addr;
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(*prssdp);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->cda = (__u32)(addr_t)prssdp;
+
+ /* Read Subsystem Data - Volume Storage Query */
+ vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
+ memset(vsq, 0, sizeof(*vsq));
+
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(*vsq);
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t)vsq;
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = device->default_expires * HZ;
+ /* The command might not be supported. Suppress the error output */
+ __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
+
+ rc = dasd_sleep_on_interruptible(cqr);
+ if (rc == 0) {
+ memcpy(&private->vsq, vsq, sizeof(*vsq));
+ } else {
+ dev_warn(&device->cdev->dev,
+ "Reading the volume storage information failed with rc=%d\n", rc);
+ }
+
+ if (useglobal)
+ mutex_unlock(&dasd_vol_info_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
+
+ return rc;
+}
+
+static int dasd_eckd_is_ese(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->vsq.vol_info.ese;
+}
+
+static int dasd_eckd_ext_pool_id(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->vsq.extent_pool_id;
+}
+
+/*
+ * This value represents the total amount of available space. As more space is
+ * allocated by ESE volumes, this value will decrease.
+ * The data for this value is therefore updated on any call.
+ */
+static int dasd_eckd_space_configured(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ int rc;
+
+ rc = dasd_eckd_read_vol_info(device);
+
+ return rc ? : private->vsq.space_configured;
+}
+
+/*
+ * The value of space allocated by an ESE volume may have changed and is
+ * therefore updated on any call.
+ */
+static int dasd_eckd_space_allocated(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ int rc;
+
+ rc = dasd_eckd_read_vol_info(device);
+
+ return rc ? : private->vsq.space_allocated;
+}
+
+static int dasd_eckd_logical_capacity(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->vsq.logical_capacity;
+}
+
+static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
+{
+ struct ext_pool_exhaust_work_data *data;
+ struct dasd_device *device;
+ struct dasd_device *base;
+
+ data = container_of(work, struct ext_pool_exhaust_work_data, worker);
+ device = data->device;
+ base = data->base;
+
+ if (!base)
+ base = device;
+ if (dasd_eckd_space_configured(base) != 0) {
+ dasd_generic_space_avail(device);
+ } else {
+ dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
+ }
+
+ dasd_put_device(device);
+ kfree(data);
+}
+
+static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ struct ext_pool_exhaust_work_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_ATOMIC);
+ if (!data)
+ return -ENOMEM;
+ INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
+ dasd_get_device(device);
+ data->device = device;
+
+ if (cqr->block)
+ data->base = cqr->block->base;
+ else if (cqr->basedev)
+ data->base = cqr->basedev;
+ else
+ data->base = NULL;
+
+ schedule_work(&data->worker);
+
+ return 0;
+}
+
+static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
+ struct dasd_rssd_lcq *lcq)
+{
+ struct dasd_eckd_private *private = device->private;
+ int pool_id = dasd_eckd_ext_pool_id(device);
+ struct dasd_ext_pool_sum eps;
+ int i;
+
+ for (i = 0; i < lcq->pool_count; i++) {
+ eps = lcq->ext_pool_sum[i];
+ if (eps.pool_id == pool_id) {
+ memcpy(&private->eps, &eps,
+ sizeof(struct dasd_ext_pool_sum));
+ }
+ }
+}
+
+/* Read Extent Pool Information - Logical Configuration Query */
+static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_rssd_lcq *lcq;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ /* This command cannot be executed on an alias device */
+ if (private->uid.type == UA_BASE_PAV_ALIAS ||
+ private->uid.type == UA_HYPER_PAV_ALIAS)
+ return 0;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
+ sizeof(*prssdp) + sizeof(*lcq), device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate initialization request");
+ return PTR_ERR(cqr);
+ }
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = cqr->data;
+ memset(prssdp, 0, sizeof(*prssdp));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(*prssdp);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->cda = (__u32)(addr_t)prssdp;
+
+ lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
+ memset(lcq, 0, sizeof(*lcq));
+
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(*lcq);
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t)lcq;
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = device->default_expires * HZ;
+ /* The command might not be supported. Suppress the error output */
+ __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
+
+ rc = dasd_sleep_on_interruptible(cqr);
+ if (rc == 0) {
+ dasd_eckd_cpy_ext_pool_data(device, lcq);
+ } else {
+ dev_warn(&device->cdev->dev,
+ "Reading the logical configuration failed with rc=%d\n", rc);
+ }
+
+ dasd_sfree_request(cqr, cqr->memdev);
+
+ return rc;
+}
+
+/*
+ * Depending on the device type, the extent size is specified either as
+ * cylinders per extent (CKD) or size per extent (FBA)
+ * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
+ */
+static int dasd_eckd_ext_size(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_ext_pool_sum eps = private->eps;
+
+ if (!eps.flags.extent_size_valid)
+ return 0;
+ if (eps.extent_size.size_1G)
+ return 1113;
+ if (eps.extent_size.size_16M)
+ return 21;
+
+ return 0;
+}
+
+static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->eps.warn_thrshld;
+}
+
+static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->eps.flags.capacity_at_warnlevel;
+}
+
+/*
+ * Extent Pool out of space
+ */
+static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->eps.flags.pool_oos;
+}
/*
* Build CP for Perform Subsystem Function - SSC.
@@ -1721,6 +2019,16 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
/* Read Feature Codes */
dasd_eckd_read_features(device);
+ /* Read Volume Information */
+ rc = dasd_eckd_read_vol_info(device);
+ if (rc)
+ goto out_err3;
+
+ /* Read Extent Pool Information */
+ rc = dasd_eckd_read_ext_pool_info(device);
+ if (rc)
+ goto out_err3;
+
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&private->rdc_data, 64);
@@ -1751,6 +2059,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (readonly)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+ if (dasd_eckd_is_ese(device))
+ dasd_set_feature(device->cdev, DASD_FEATURE_DISCARD, 1);
+
dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
"with %d cylinders, %d heads, %d sectors%s\n",
private->rdc_data.dev_type,
@@ -1823,8 +2134,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
- /* Define extent for the first 3 tracks. */
- define_extent(ccw++, cqr->data, 0, 2,
+ /* Define extent for the first 2 tracks. */
+ define_extent(ccw++, cqr->data, 0, 1,
DASD_ECKD_CCW_READ_COUNT, device, 0);
LO_data = cqr->data + sizeof(struct DE_eckd_data);
/* Locate record for the first 4 records on track 0. */
@@ -1843,9 +2154,9 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
count_data++;
}
- /* Locate record for the first record on track 2. */
+ /* Locate record for the first record on track 1. */
ccw[-1].flags |= CCW_FLAG_CC;
- locate_record(ccw++, LO_data++, 2, 0, 1,
+ locate_record(ccw++, LO_data++, 1, 0, 1,
DASD_ECKD_CCW_READ_COUNT, device, 0);
/* Read count ccw. */
ccw[-1].flags |= CCW_FLAG_CC;
@@ -1860,6 +2171,9 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
cqr->retries = 255;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
+ /* Set flags to suppress output for expected errors */
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+
return cqr;
}
@@ -1967,7 +2281,7 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
}
}
if (i == 3)
- count_area = &private->count_area[4];
+ count_area = &private->count_area[3];
if (private->uses_cdl == 0) {
for (i = 0; i < 5; i++) {
@@ -2099,8 +2413,7 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
*/
itcw_size = itcw_calc_size(0, count, 0);
- cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
- NULL);
+ cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
if (IS_ERR(cqr))
return cqr;
@@ -2193,8 +2506,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
}
cplength += count;
- cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
- startdev, NULL);
+ cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
if (IS_ERR(cqr))
return cqr;
@@ -2241,13 +2553,11 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
}
static struct dasd_ccw_req *
-dasd_eckd_build_format(struct dasd_device *base,
- struct format_data_t *fdata,
- int enable_pav)
+dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
+ struct format_data_t *fdata, int enable_pav)
{
struct dasd_eckd_private *base_priv;
struct dasd_eckd_private *start_priv;
- struct dasd_device *startdev = NULL;
struct dasd_ccw_req *fcp;
struct eckd_count *ect;
struct ch_t address;
@@ -2338,9 +2648,8 @@ dasd_eckd_build_format(struct dasd_device *base,
fdata->intensity);
return ERR_PTR(-EINVAL);
}
- /* Allocate the format ccw request. */
- fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
- datasize, startdev, NULL);
+
+ fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
if (IS_ERR(fcp))
return fcp;
@@ -2513,7 +2822,7 @@ dasd_eckd_format_build_ccw_req(struct dasd_device *base,
struct dasd_ccw_req *ccw_req;
if (!fmt_buffer) {
- ccw_req = dasd_eckd_build_format(base, fdata, enable_pav);
+ ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
} else {
if (tpm)
ccw_req = dasd_eckd_build_check_tcw(base, fdata,
@@ -2659,7 +2968,7 @@ out_err:
rc = -EIO;
}
list_del_init(&cqr->blocklist);
- dasd_sfree_request(cqr, device);
+ dasd_ffree_request(cqr, device);
private->count--;
}
@@ -2699,6 +3008,96 @@ static int dasd_eckd_format_device(struct dasd_device *base,
}
/*
+ * Callback function to free ESE format requests.
+ */
+static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
+{
+ struct dasd_device *device = cqr->startdev;
+ struct dasd_eckd_private *private = device->private;
+
+ private->count--;
+ dasd_ffree_request(cqr, device);
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
+{
+ struct dasd_eckd_private *private;
+ struct format_data_t fdata;
+ unsigned int recs_per_trk;
+ struct dasd_ccw_req *fcqr;
+ struct dasd_device *base;
+ struct dasd_block *block;
+ unsigned int blksize;
+ struct request *req;
+ sector_t first_trk;
+ sector_t last_trk;
+ int rc;
+
+ req = cqr->callback_data;
+ base = cqr->block->base;
+ private = base->private;
+ block = base->block;
+ blksize = block->bp_block;
+ recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+
+ first_trk = blk_rq_pos(req) >> block->s2b_shift;
+ sector_div(first_trk, recs_per_trk);
+ last_trk =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+ sector_div(last_trk, recs_per_trk);
+
+ fdata.start_unit = first_trk;
+ fdata.stop_unit = last_trk;
+ fdata.blksize = blksize;
+ fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
+
+ rc = dasd_eckd_format_sanity_checks(base, &fdata);
+ if (rc)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * We're building the request with PAV disabled as we're reusing
+ * the former startdev.
+ */
+ fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
+ if (IS_ERR(fcqr))
+ return fcqr;
+
+ fcqr->callback = dasd_eckd_ese_format_cb;
+
+ return fcqr;
+}
+
+/*
+ * When data is read from an unformatted area of an ESE volume, this function
+ * returns zeroed data and thereby mimics a read of zero data.
+ */
+static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr)
+{
+ unsigned int blksize, off;
+ struct dasd_device *base;
+ struct req_iterator iter;
+ struct request *req;
+ struct bio_vec bv;
+ char *dst;
+
+ req = (struct request *) cqr->callback_data;
+ base = cqr->block->base;
+ blksize = base->block->bp_block;
+
+ rq_for_each_segment(bv, req, iter) {
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ for (off = 0; off < bv.bv_len; off += blksize) {
+ if (dst && rq_data_dir(req) == READ) {
+ dst += off;
+ memset(dst, 0, blksize);
+ }
+ }
+ }
+}
+
+/*
* Helper function to count consecutive records of a single track.
*/
static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
@@ -3033,6 +3432,277 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
}
}
+static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
+ unsigned int first_trk,
+ unsigned int last_trk)
+{
+ struct dasd_eckd_private *private = device->private;
+ unsigned int trks_per_vol;
+ int rc = 0;
+
+ trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
+
+ if (first_trk >= trks_per_vol) {
+ dev_warn(&device->cdev->dev,
+ "Start track number %u used in the space release command is too big\n",
+ first_trk);
+ rc = -EINVAL;
+ } else if (last_trk >= trks_per_vol) {
+ dev_warn(&device->cdev->dev,
+ "Stop track number %u used in the space release command is too big\n",
+ last_trk);
+ rc = -EINVAL;
+ } else if (first_trk > last_trk) {
+ dev_warn(&device->cdev->dev,
+ "Start track %u used in the space release command exceeds the end track\n",
+ first_trk);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+/*
+ * Helper function to count the amount of involved extents within a given range
+ * with extent alignment in mind.
+ */
+static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
+{
+ int cur_pos = 0;
+ int count = 0;
+ int tmp;
+
+ if (from == to)
+ return 1;
+
+ /* Count first partial extent */
+ if (from % trks_per_ext != 0) {
+ tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
+ if (tmp > to)
+ tmp = to;
+ cur_pos = tmp - from + 1;
+ count++;
+ }
+ /* Count full extents */
+ if (to - (from + cur_pos) + 1 >= trks_per_ext) {
+ tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
+ count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
+ cur_pos = tmp;
+ }
+ /* Count last partial extent */
+ if (cur_pos < to)
+ count++;
+
+ return count;
+}
+
+/*
+ * Release allocated space for a given range or an entire volume.
+ */
+static struct dasd_ccw_req *
+dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
+ struct request *req, unsigned int first_trk,
+ unsigned int last_trk, int by_extent)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_dso_ras_ext_range *ras_range;
+ struct dasd_rssd_features *features;
+ struct dasd_dso_ras_data *ras_data;
+ u16 heads, beg_head, end_head;
+ int cur_to_trk, cur_from_trk;
+ struct dasd_ccw_req *cqr;
+ u32 beg_cyl, end_cyl;
+ struct ccw1 *ccw;
+ int trks_per_ext;
+ size_t ras_size;
+ size_t size;
+ int nr_exts;
+ void *rq;
+ int i;
+
+ if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
+ return ERR_PTR(-EINVAL);
+
+ rq = req ? blk_mq_rq_to_pdu(req) : NULL;
+
+ features = &private->features;
+
+ trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
+ nr_exts = 0;
+ if (by_extent)
+ nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
+ ras_size = sizeof(*ras_data);
+ size = ras_size + (nr_exts * sizeof(*ras_range));
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate RAS request");
+ return cqr;
+ }
+
+ ras_data = cqr->data;
+ memset(ras_data, 0, size);
+
+ ras_data->order = DSO_ORDER_RAS;
+ ras_data->flags.vol_type = 0; /* CKD volume */
+ /* Release specified extents or entire volume */
+ ras_data->op_flags.by_extent = by_extent;
+ /*
+ * This bit guarantees initialisation of tracks within an extent that is
+ * not fully specified, but is only supported with a certain feature
+ * subset.
+ */
+ ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
+ ras_data->lss = private->ned->ID;
+ ras_data->dev_addr = private->ned->unit_addr;
+ ras_data->nr_exts = nr_exts;
+
+ if (by_extent) {
+ heads = private->rdc_data.trk_per_cyl;
+ cur_from_trk = first_trk;
+ cur_to_trk = first_trk + trks_per_ext -
+ (first_trk % trks_per_ext) - 1;
+ if (cur_to_trk > last_trk)
+ cur_to_trk = last_trk;
+ ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
+
+ for (i = 0; i < nr_exts; i++) {
+ beg_cyl = cur_from_trk / heads;
+ beg_head = cur_from_trk % heads;
+ end_cyl = cur_to_trk / heads;
+ end_head = cur_to_trk % heads;
+
+ set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
+ set_ch_t(&ras_range->end_ext, end_cyl, end_head);
+
+ cur_from_trk = cur_to_trk + 1;
+ cur_to_trk = cur_from_trk + trks_per_ext - 1;
+ if (cur_to_trk > last_trk)
+ cur_to_trk = last_trk;
+ ras_range++;
+ }
+ }
+
+ ccw = cqr->cpaddr;
+ ccw->cda = (__u32)(addr_t)cqr->data;
+ ccw->cmd_code = DASD_ECKD_CCW_DSO;
+ ccw->count = size;
+
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = block;
+ cqr->retries = 256;
+ cqr->expires = device->default_expires * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ return cqr;
+}
+
+static int dasd_eckd_release_space_full(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
+ if (IS_ERR(cqr))
+ return PTR_ERR(cqr);
+
+ rc = dasd_sleep_on_interruptible(cqr);
+
+ dasd_sfree_request(cqr, cqr->memdev);
+
+ return rc;
+}
+
+static int dasd_eckd_release_space_trks(struct dasd_device *device,
+ unsigned int from, unsigned int to)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_block *block = device->block;
+ struct dasd_ccw_req *cqr, *n;
+ struct list_head ras_queue;
+ unsigned int device_exts;
+ int trks_per_ext;
+ int stop, step;
+ int cur_pos;
+ int rc = 0;
+ int retry;
+
+ INIT_LIST_HEAD(&ras_queue);
+
+ device_exts = private->real_cyl / dasd_eckd_ext_size(device);
+ trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
+
+ /* Make sure device limits are not exceeded */
+ step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
+ cur_pos = from;
+
+ do {
+ retry = 0;
+ while (cur_pos < to) {
+ stop = cur_pos + step -
+ ((cur_pos + step) % trks_per_ext) - 1;
+ if (stop > to)
+ stop = to;
+
+ cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
+ if (IS_ERR(cqr)) {
+ rc = PTR_ERR(cqr);
+ if (rc == -ENOMEM) {
+ if (list_empty(&ras_queue))
+ goto out;
+ retry = 1;
+ break;
+ }
+ goto err_out;
+ }
+
+ spin_lock_irq(&block->queue_lock);
+ list_add_tail(&cqr->blocklist, &ras_queue);
+ spin_unlock_irq(&block->queue_lock);
+ cur_pos = stop + 1;
+ }
+
+ rc = dasd_sleep_on_queue_interruptible(&ras_queue);
+
+err_out:
+ list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
+ device = cqr->startdev;
+ private = device->private;
+
+ spin_lock_irq(&block->queue_lock);
+ list_del_init(&cqr->blocklist);
+ spin_unlock_irq(&block->queue_lock);
+ dasd_sfree_request(cqr, device);
+ private->count--;
+ }
+ } while (retry);
+
+out:
+ return rc;
+}
+
+static int dasd_eckd_release_space(struct dasd_device *device,
+ struct format_data_t *rdata)
+{
+ if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
+ return dasd_eckd_release_space_full(device);
+ else if (rdata->intensity == 0)
+ return dasd_eckd_release_space_trks(device, rdata->start_unit,
+ rdata->stop_unit);
+ else
+ return -EINVAL;
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_build_cp_discard(struct dasd_device *device, struct dasd_block *block,
+ struct request *req, sector_t first_trk,
+ sector_t last_trk)
+{
+ return dasd_eckd_dso_ras(device, block, req, first_trk, last_trk, 1);
+}
+
static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
struct dasd_device *startdev,
struct dasd_block *block,
@@ -3214,6 +3884,14 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
+
+ /* Set flags to suppress output for expected errors */
+ if (dasd_eckd_is_ese(basedev)) {
+ set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ }
+
return cqr;
}
@@ -3385,6 +4063,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
+
+ /* Set flags to suppress output for expected errors */
+ if (dasd_eckd_is_ese(basedev))
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+
return cqr;
}
@@ -3704,6 +4387,14 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
+
+ /* Set flags to suppress output for expected errors */
+ if (dasd_eckd_is_ese(basedev)) {
+ set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ }
+
return cqr;
out_error:
dasd_sfree_request(cqr, startdev);
@@ -3756,6 +4447,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
cmdwtd = private->features.feature[12] & 0x40;
use_prefix = private->features.feature[8] & 0x01;
+ if (req_op(req) == REQ_OP_DISCARD)
+ return dasd_eckd_build_cp_discard(startdev, block, req,
+ first_trk, last_trk);
+
cqr = NULL;
if (cdlspecial || dasd_page_cache) {
/* do nothing, just fall through to the cmd mode single case */
@@ -3827,7 +4522,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
if ((start_padding_sectors || end_padding_sectors) &&
(rq_data_dir(req) == WRITE)) {
DBF_DEV_EVENT(DBF_ERR, basedev,
- "raw write not track aligned (%lu,%lu) req %p",
+ "raw write not track aligned (%llu,%llu) req %p",
start_padding_sectors, end_padding_sectors, req);
return ERR_PTR(-EINVAL);
}
@@ -4034,12 +4729,14 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
struct dasd_block *block,
struct request *req)
{
+ struct dasd_device *startdev = NULL;
struct dasd_eckd_private *private;
- struct dasd_device *startdev;
- unsigned long flags;
struct dasd_ccw_req *cqr;
+ unsigned long flags;
- startdev = dasd_alias_get_start_dev(base);
+ /* Discard requests can only be processed on base devices */
+ if (req_op(req) != REQ_OP_DISCARD)
+ startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
private = startdev->private;
@@ -4965,6 +5662,16 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
/* Read Feature Codes */
dasd_eckd_read_features(device);
+ /* Read Volume Information */
+ rc = dasd_eckd_read_vol_info(device);
+ if (rc)
+ goto out_err2;
+
+ /* Read Extent Pool Information */
+ rc = dasd_eckd_read_ext_pool_info(device);
+ if (rc)
+ goto out_err2;
+
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&temp_rdc_data, 64);
@@ -5635,6 +6342,73 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
device->discipline->check_attention(device, lpum);
}
+static void dasd_eckd_oos_resume(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct alias_pav_group *pavgroup, *tempgroup;
+ struct dasd_device *dev, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&private->lcu->lock, flags);
+ list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
+ alias_list) {
+ if (dev->stopped & DASD_STOPPED_NOSPC)
+ dasd_generic_space_avail(dev);
+ }
+ list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
+ alias_list) {
+ if (dev->stopped & DASD_STOPPED_NOSPC)
+ dasd_generic_space_avail(dev);
+ }
+ /* devices in PAV groups */
+ list_for_each_entry_safe(pavgroup, tempgroup,
+ &private->lcu->grouplist,
+ group) {
+ list_for_each_entry_safe(dev, n, &pavgroup->baselist,
+ alias_list) {
+ if (dev->stopped & DASD_STOPPED_NOSPC)
+ dasd_generic_space_avail(dev);
+ }
+ list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
+ alias_list) {
+ if (dev->stopped & DASD_STOPPED_NOSPC)
+ dasd_generic_space_avail(dev);
+ }
+ }
+ spin_unlock_irqrestore(&private->lcu->lock, flags);
+}
+
+static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
+ __u8 lpum)
+{
+ struct dasd_oos_message *oos = messages;
+
+ switch (oos->code) {
+ case REPO_WARN:
+ case POOL_WARN:
+ dev_warn(&device->cdev->dev,
+ "Extent pool usage has reached a critical value\n");
+ dasd_eckd_oos_resume(device);
+ break;
+ case REPO_EXHAUST:
+ case POOL_EXHAUST:
+ dev_warn(&device->cdev->dev,
+ "Extent pool is exhausted\n");
+ break;
+ case REPO_RELIEVE:
+ case POOL_RELIEVE:
+ dev_info(&device->cdev->dev,
+ "Extent pool physical space constraint has been relieved\n");
+ break;
+ }
+
+ /* In any case, update related data */
+ dasd_eckd_read_ext_pool_info(device);
+
+ /* to make sure there is no attention left schedule work again */
+ device->discipline->check_attention(device, lpum);
+}
+
static void dasd_eckd_check_attention_work(struct work_struct *work)
{
struct check_attention_work_data *data;
@@ -5653,9 +6427,14 @@ static void dasd_eckd_check_attention_work(struct work_struct *work)
rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
if (rc)
goto out;
+
if (messages->length == ATTENTION_LENGTH_CUIR &&
messages->format == ATTENTION_FORMAT_CUIR)
dasd_eckd_handle_cuir(device, messages, data->lpum);
+ if (messages->length == ATTENTION_LENGTH_OOS &&
+ messages->format == ATTENTION_FORMAT_OOS)
+ dasd_eckd_handle_oos(device, messages, data->lpum);
+
out:
dasd_put_device(device);
kfree(messages);
@@ -5734,6 +6513,72 @@ static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
dasd_schedule_requeue(device);
}
+/*
+ * Initialize block layer request queue.
+ */
+static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
+{
+ unsigned int logical_block_size = block->bp_block;
+ struct request_queue *q = block->request_queue;
+ struct dasd_device *device = block->base;
+ struct dasd_eckd_private *private;
+ unsigned int max_discard_sectors;
+ unsigned int max_bytes;
+ unsigned int ext_bytes; /* Extent Size in Bytes */
+ int recs_per_trk;
+ int trks_per_cyl;
+ int ext_limit;
+ int ext_size; /* Extent Size in Cylinders */
+ int max;
+
+ private = device->private;
+ trks_per_cyl = private->rdc_data.trk_per_cyl;
+ recs_per_trk = recs_per_track(&private->rdc_data, 0, logical_block_size);
+
+ if (device->features & DASD_FEATURE_USERAW) {
+ /*
+ * the max_blocks value for raw_track access is 256
+ * it is higher than the native ECKD value because we
+ * only need one ccw per track
+ * so the max_hw_sectors are
+ * 2048 x 512B = 1024kB = 16 tracks
+ */
+ max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
+ } else {
+ max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
+ }
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ q->limits.max_dev_sectors = max;
+ blk_queue_logical_block_size(q, logical_block_size);
+ blk_queue_max_hw_sectors(q, max);
+ blk_queue_max_segments(q, USHRT_MAX);
+ /* With page sized segments each segment can be translated into one idaw/tidaw */
+ blk_queue_max_segment_size(q, PAGE_SIZE);
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+
+ if (dasd_eckd_is_ese(device)) {
+ /*
+ * Depending on the extent size, up to UINT_MAX bytes can be
+ * accepted. However, neither DASD_ECKD_RAS_EXTS_MAX nor the
+ * device limits should be exceeded.
+ */
+ ext_size = dasd_eckd_ext_size(device);
+ ext_limit = min(private->real_cyl / ext_size, DASD_ECKD_RAS_EXTS_MAX);
+ ext_bytes = ext_size * trks_per_cyl * recs_per_trk *
+ logical_block_size;
+ max_bytes = UINT_MAX - (UINT_MAX % ext_bytes);
+ if (max_bytes / ext_bytes > ext_limit)
+ max_bytes = ext_bytes * ext_limit;
+
+ max_discard_sectors = max_bytes / 512;
+
+ blk_queue_max_discard_sectors(q, max_discard_sectors);
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ q->limits.discard_granularity = ext_bytes;
+ q->limits.discard_alignment = ext_bytes;
+ }
+}
+
static struct ccw_driver dasd_eckd_driver = {
.driver = {
.name = "dasd-eckd",
@@ -5754,24 +6599,10 @@ static struct ccw_driver dasd_eckd_driver = {
.int_class = IRQIO_DAS,
};
-/*
- * max_blocks is dependent on the amount of storage that is available
- * in the static io buffer for each device. Currently each device has
- * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
- * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
- * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
- * addition we have one define extent ccw + 16 bytes of data and one
- * locate record ccw + 16 bytes of data. That makes:
- * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
- * We want to fit two into the available memory so that we can immediately
- * start the next request if one finishes off. That makes 249.5 blocks
- * for one request. Give a little safety and the result is 240.
- */
static struct dasd_discipline dasd_eckd_discipline = {
.owner = THIS_MODULE,
.name = "ECKD",
.ebcname = "ECKD",
- .max_blocks = 190,
.check_device = dasd_eckd_check_characteristics,
.uncheck_device = dasd_eckd_uncheck_device,
.do_analysis = dasd_eckd_do_analysis,
@@ -5779,6 +6610,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.basic_to_ready = dasd_eckd_basic_to_ready,
.online_to_ready = dasd_eckd_online_to_ready,
.basic_to_known = dasd_eckd_basic_to_known,
+ .setup_blk_queue = dasd_eckd_setup_blk_queue,
.fill_geometry = dasd_eckd_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
@@ -5806,6 +6638,19 @@ static struct dasd_discipline dasd_eckd_discipline = {
.disable_hpf = dasd_eckd_disable_hpf_device,
.hpf_enabled = dasd_eckd_hpf_enabled,
.reset_path = dasd_eckd_reset_path,
+ .is_ese = dasd_eckd_is_ese,
+ .space_allocated = dasd_eckd_space_allocated,
+ .space_configured = dasd_eckd_space_configured,
+ .logical_capacity = dasd_eckd_logical_capacity,
+ .release_space = dasd_eckd_release_space,
+ .ext_pool_id = dasd_eckd_ext_pool_id,
+ .ext_size = dasd_eckd_ext_size,
+ .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
+ .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
+ .ext_pool_oos = dasd_eckd_ext_pool_oos,
+ .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
+ .ese_format = dasd_eckd_ese_format,
+ .ese_read = dasd_eckd_ese_read,
};
static int __init
@@ -5818,16 +6663,22 @@ dasd_eckd_init(void)
GFP_KERNEL | GFP_DMA);
if (!dasd_reserve_req)
return -ENOMEM;
+ dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
+ GFP_KERNEL | GFP_DMA);
+ if (!dasd_vol_info_req)
+ return -ENOMEM;
path_verification_worker = kmalloc(sizeof(*path_verification_worker),
GFP_KERNEL | GFP_DMA);
if (!path_verification_worker) {
kfree(dasd_reserve_req);
+ kfree(dasd_vol_info_req);
return -ENOMEM;
}
rawpadpage = (void *)__get_free_page(GFP_KERNEL);
if (!rawpadpage) {
kfree(path_verification_worker);
kfree(dasd_reserve_req);
+ kfree(dasd_vol_info_req);
return -ENOMEM;
}
ret = ccw_driver_register(&dasd_eckd_driver);
@@ -5836,6 +6687,7 @@ dasd_eckd_init(void)
else {
kfree(path_verification_worker);
kfree(dasd_reserve_req);
+ kfree(dasd_vol_info_req);
free_page((unsigned long)rawpadpage);
}
return ret;
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 5869d2fede35..6943508d0f1d 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -50,16 +50,26 @@
#define DASD_ECKD_CCW_PFX_READ 0xEA
#define DASD_ECKD_CCW_RSCK 0xF9
#define DASD_ECKD_CCW_RCD 0xFA
+#define DASD_ECKD_CCW_DSO 0xF7
+
+/* Define Subssystem Function / Orders */
+#define DSO_ORDER_RAS 0x81
/*
- * Perform Subsystem Function / Sub-Orders
+ * Perform Subsystem Function / Orders
*/
#define PSF_ORDER_PRSSD 0x18
#define PSF_ORDER_CUIR_RESPONSE 0x1A
-#define PSF_SUBORDER_QHA 0x1C
#define PSF_ORDER_SSC 0x1D
/*
+ * Perform Subsystem Function / Sub-Orders
+ */
+#define PSF_SUBORDER_QHA 0x1C /* Query Host Access */
+#define PSF_SUBORDER_VSQ 0x52 /* Volume Storage Query */
+#define PSF_SUBORDER_LCQ 0x53 /* Logical Configuration Query */
+
+/*
* CUIR response condition codes
*/
#define PSF_CUIR_INVALID 0x00
@@ -80,10 +90,22 @@
#define CUIR_RESUME 0x02
/*
+ * Out-of-space (OOS) Codes
+ */
+#define REPO_WARN 0x01
+#define REPO_EXHAUST 0x02
+#define POOL_WARN 0x03
+#define POOL_EXHAUST 0x04
+#define REPO_RELIEVE 0x05
+#define POOL_RELIEVE 0x06
+
+/*
* attention message definitions
*/
#define ATTENTION_LENGTH_CUIR 0x0e
#define ATTENTION_FORMAT_CUIR 0x01
+#define ATTENTION_LENGTH_OOS 0x10
+#define ATTENTION_FORMAT_OOS 0x06
#define DASD_ECKD_PG_GROUPED 0x10
@@ -99,6 +121,12 @@
#define DASD_ECKD_PATH_THRHLD 256
#define DASD_ECKD_PATH_INTERVAL 300
+/*
+ * Maximum number of blocks to be chained
+ */
+#define DASD_ECKD_MAX_BLOCKS 190
+#define DASD_ECKD_MAX_BLOCKS_RAW 256
+
/*****************************************************************************
* SECTION: Type Definitions
****************************************************************************/
@@ -116,35 +144,12 @@ struct ch_t {
__u16 head;
} __attribute__ ((packed));
-struct chs_t {
- __u16 cyl;
- __u16 head;
- __u32 sector;
-} __attribute__ ((packed));
-
struct chr_t {
__u16 cyl;
__u16 head;
__u8 record;
} __attribute__ ((packed));
-struct geom_t {
- __u16 cyl;
- __u16 head;
- __u32 sector;
-} __attribute__ ((packed));
-
-struct eckd_home {
- __u8 skip_control[14];
- __u16 cell_number;
- __u8 physical_addr[3];
- __u8 flag;
- struct ch_t track_addr;
- __u8 reserved;
- __u8 key_length;
- __u8 reserved2[2];
-} __attribute__ ((packed));
-
struct DE_eckd_data {
struct {
unsigned char perm:2; /* Permissions on this extent */
@@ -387,6 +392,86 @@ struct dasd_rssd_messages {
char messages[4087];
} __packed;
+/*
+ * Read Subsystem Data - Volume Storage Query
+ */
+struct dasd_rssd_vsq {
+ struct {
+ __u8 tse:1;
+ __u8 space_not_available:1;
+ __u8 ese:1;
+ __u8 unused:5;
+ } __packed vol_info;
+ __u8 unused1;
+ __u16 extent_pool_id;
+ __u8 warn_cap_limit;
+ __u8 warn_cap_guaranteed;
+ __u16 unused2;
+ __u32 limit_capacity;
+ __u32 guaranteed_capacity;
+ __u32 space_allocated;
+ __u32 space_configured;
+ __u32 logical_capacity;
+} __packed;
+
+/*
+ * Extent Pool Summary
+ */
+struct dasd_ext_pool_sum {
+ __u16 pool_id;
+ __u8 repo_warn_thrshld;
+ __u8 warn_thrshld;
+ struct {
+ __u8 type:1; /* 0 - CKD / 1 - FB */
+ __u8 track_space_efficient:1;
+ __u8 extent_space_efficient:1;
+ __u8 standard_volume:1;
+ __u8 extent_size_valid:1;
+ __u8 capacity_at_warnlevel:1;
+ __u8 pool_oos:1;
+ __u8 unused0:1;
+ __u8 unused1;
+ } __packed flags;
+ struct {
+ __u8 reserved0:1;
+ __u8 size_1G:1;
+ __u8 reserved1:5;
+ __u8 size_16M:1;
+ } __packed extent_size;
+ __u8 unused;
+} __packed;
+
+/*
+ * Read Subsystem Data-Response - Logical Configuration Query - Header
+ */
+struct dasd_rssd_lcq {
+ __u16 data_length; /* Length of data returned */
+ __u16 pool_count; /* Count of extent pools returned - Max: 448 */
+ struct {
+ __u8 pool_info_valid:1; /* Detailed Information valid */
+ __u8 pool_id_volume:1;
+ __u8 pool_id_cec:1;
+ __u8 unused0:5;
+ __u8 unused1;
+ } __packed header_flags;
+ char sfi_type[6]; /* Storage Facility Image Type (EBCDIC) */
+ char sfi_model[3]; /* Storage Facility Image Model (EBCDIC) */
+ __u8 sfi_seq_num[10]; /* Storage Facility Image Sequence Number */
+ __u8 reserved[7];
+ struct dasd_ext_pool_sum ext_pool_sum[448];
+} __packed;
+
+struct dasd_oos_message {
+ __u16 length;
+ __u8 format;
+ __u8 code;
+ __u8 percentage_empty;
+ __u8 reserved;
+ __u16 ext_pool_id;
+ __u16 token;
+ __u8 unused[6];
+} __packed;
+
struct dasd_cuir_message {
__u16 length;
__u8 format;
@@ -461,6 +546,42 @@ struct dasd_psf_ssc_data {
unsigned char reserved[59];
} __attribute__((packed));
+/* Maximum number of extents for a single Release Allocated Space command */
+#define DASD_ECKD_RAS_EXTS_MAX 110U
+
+struct dasd_dso_ras_ext_range {
+ struct ch_t beg_ext;
+ struct ch_t end_ext;
+} __packed;
+
+/*
+ * Define Subsytem Operation - Release Allocated Space
+ */
+struct dasd_dso_ras_data {
+ __u8 order;
+ struct {
+ __u8 message:1; /* Must be zero */
+ __u8 reserved1:2;
+ __u8 vol_type:1; /* 0 - CKD/FBA, 1 - FB */
+ __u8 reserved2:4;
+ } __packed flags;
+ /* Operation Flags to specify scope */
+ struct {
+ __u8 reserved1:2;
+ /* Release Space by Extent */
+ __u8 by_extent:1; /* 0 - entire volume, 1 - specified extents */
+ __u8 guarantee_init:1;
+ __u8 force_release:1; /* Internal - will be ignored */
+ __u16 reserved2:11;
+ } __packed op_flags;
+ __u8 lss;
+ __u8 dev_addr;
+ __u32 reserved1;
+ __u8 reserved2[10];
+ __u16 nr_exts; /* Defines number of ext_scope - max 110 */
+ __u16 reserved3;
+} __packed;
+
/*
* some structures and definitions for alias handling
@@ -551,6 +672,8 @@ struct dasd_eckd_private {
int uses_cdl;
struct attrib_data_t attrib; /* e.g. cache operations */
struct dasd_rssd_features features;
+ struct dasd_rssd_vsq vsq;
+ struct dasd_ext_pool_sum eps;
u32 real_cyl;
/* alias managemnet */
@@ -572,7 +695,5 @@ int dasd_alias_remove_device(struct dasd_device *);
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
void dasd_alias_handle_summary_unit_check(struct work_struct *);
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
-void dasd_alias_lcu_setup_complete(struct dasd_device *);
-void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
int dasd_alias_update_add_device(struct dasd_device *);
#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 93bb09da7fdc..5ae64af9ccea 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -386,6 +386,7 @@ void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
dasd_eer_write_standard_trigger(device, cqr, id);
break;
case DASD_EER_NOPATH:
+ case DASD_EER_NOSPC:
dasd_eer_write_standard_trigger(device, NULL, id);
break;
case DASD_EER_STATECHANGE:
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 56007a3e7f11..cbb770824226 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -770,27 +770,46 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
}
/*
- * max_blocks is dependent on the amount of storage that is available
- * in the static io buffer for each device. Currently each device has
- * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
- * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
- * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
- * addition we have one define extent ccw + 16 bytes of data and a
- * locate record ccw for each block (stupid devices!) + 16 bytes of data.
- * That makes:
- * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
- * We want to fit two into the available memory so that we can immediately
- * start the next request if one finishes off. That makes 100.1 blocks
- * for one request. Give a little safety and the result is 96.
+ * Initialize block layer request queue.
*/
+static void dasd_fba_setup_blk_queue(struct dasd_block *block)
+{
+ unsigned int logical_block_size = block->bp_block;
+ struct request_queue *q = block->request_queue;
+ unsigned int max_bytes, max_discard_sectors;
+ int max;
+
+ max = DASD_FBA_MAX_BLOCKS << block->s2b_shift;
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ q->limits.max_dev_sectors = max;
+ blk_queue_logical_block_size(q, logical_block_size);
+ blk_queue_max_hw_sectors(q, max);
+ blk_queue_max_segments(q, USHRT_MAX);
+ /* With page sized segments each segment can be translated into one idaw/tidaw */
+ blk_queue_max_segment_size(q, PAGE_SIZE);
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+
+ q->limits.discard_granularity = logical_block_size;
+ q->limits.discard_alignment = PAGE_SIZE;
+
+ /* Calculate max_discard_sectors and make it PAGE aligned */
+ max_bytes = USHRT_MAX * logical_block_size;
+ max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
+ max_discard_sectors = max_bytes / logical_block_size;
+
+ blk_queue_max_discard_sectors(q, max_discard_sectors);
+ blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+}
+
static struct dasd_discipline dasd_fba_discipline = {
.owner = THIS_MODULE,
.name = "FBA ",
.ebcname = "FBA ",
- .max_blocks = 96,
.check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis,
.verify_path = dasd_generic_verify_path,
+ .setup_blk_queue = dasd_fba_setup_blk_queue,
.fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
index b14bf1b2c691..8f75df06e893 100644
--- a/drivers/s390/block/dasd_fba.h
+++ b/drivers/s390/block/dasd_fba.h
@@ -9,6 +9,11 @@
#ifndef DASD_FBA_H
#define DASD_FBA_H
+/*
+ * Maximum number of blocks to be chained
+ */
+#define DASD_FBA_MAX_BLOCKS 96
+
struct DE_fba_data {
struct {
unsigned char perm:2; /* Permissions on this extent */
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index de6b96036aa4..91c9f9586e0f 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -268,7 +268,6 @@ struct dasd_discipline {
struct module *owner;
char ebcname[8]; /* a name used for tagging and printks */
char name[8]; /* a name used for tagging and printks */
- int max_blocks; /* maximum number of blocks to be chained */
struct list_head list; /* used for list of disciplines */
@@ -307,6 +306,10 @@ struct dasd_discipline {
int (*online_to_ready) (struct dasd_device *);
int (*basic_to_known)(struct dasd_device *);
+ /*
+ * Initialize block layer request queue.
+ */
+ void (*setup_blk_queue)(struct dasd_block *);
/* (struct dasd_device *);
* Device operation functions. build_cp creates a ccw chain for
* a block device request, start_io starts the request and
@@ -367,6 +370,25 @@ struct dasd_discipline {
void (*disable_hpf)(struct dasd_device *);
int (*hpf_enabled)(struct dasd_device *);
void (*reset_path)(struct dasd_device *, __u8);
+
+ /*
+ * Extent Space Efficient (ESE) relevant functions
+ */
+ int (*is_ese)(struct dasd_device *);
+ /* Capacity */
+ int (*space_allocated)(struct dasd_device *);
+ int (*space_configured)(struct dasd_device *);
+ int (*logical_capacity)(struct dasd_device *);
+ int (*release_space)(struct dasd_device *, struct format_data_t *);
+ /* Extent Pool */
+ int (*ext_pool_id)(struct dasd_device *);
+ int (*ext_size)(struct dasd_device *);
+ int (*ext_pool_cap_at_warnlevel)(struct dasd_device *);
+ int (*ext_pool_warn_thrshld)(struct dasd_device *);
+ int (*ext_pool_oos)(struct dasd_device *);
+ int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *);
+ struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *);
+ void (*ese_read)(struct dasd_ccw_req *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -386,6 +408,7 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
#define DASD_EER_NOPATH 2
#define DASD_EER_STATECHANGE 3
#define DASD_EER_PPRCSUSPEND 4
+#define DASD_EER_NOSPC 5
/* DASD path handling */
@@ -482,8 +505,10 @@ struct dasd_device {
spinlock_t mem_lock;
void *ccw_mem;
void *erp_mem;
+ void *ese_mem;
struct list_head ccw_chunks;
struct list_head erp_chunks;
+ struct list_head ese_chunks;
atomic_t tasklet_scheduled;
struct tasklet_struct tasklet;
@@ -558,6 +583,7 @@ struct dasd_queue {
#define DASD_STOPPED_SU 16 /* summary unit check handling */
#define DASD_STOPPED_PM 32 /* pm state transition */
#define DASD_UNRESUMED_PM 64 /* pm resume failed state */
+#define DASD_STOPPED_NOSPC 128 /* no space left */
/* per device flags */
#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
@@ -700,7 +726,9 @@ extern struct kmem_cache *dasd_page_cache;
struct dasd_ccw_req *
dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
+struct dasd_ccw_req *dasd_fmalloc_request(int, int, int, struct dasd_device *);
void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_ffree_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
struct dasd_device *dasd_alloc_device(void);
@@ -727,6 +755,7 @@ void dasd_schedule_block_bh(struct dasd_block *);
int dasd_sleep_on(struct dasd_ccw_req *);
int dasd_sleep_on_queue(struct list_head *);
int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
+int dasd_sleep_on_queue_interruptible(struct list_head *);
int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
void dasd_device_set_timer(struct dasd_device *, int);
void dasd_device_clear_timer(struct dasd_device *);
@@ -750,6 +779,8 @@ int dasd_generic_restore_device(struct ccw_device *);
enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
void dasd_generic_path_event(struct ccw_device *, int *);
int dasd_generic_verify_path(struct dasd_device *, __u8);
+void dasd_generic_space_exhaust(struct dasd_device *, struct dasd_ccw_req *);
+void dasd_generic_space_avail(struct dasd_device *);
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 8e26001dc11c..9a5f3add325f 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -333,6 +333,59 @@ out_err:
return rc;
}
+static int dasd_release_space(struct dasd_device *device,
+ struct format_data_t *rdata)
+{
+ if (!device->discipline->is_ese && !device->discipline->is_ese(device))
+ return -ENOTSUPP;
+ if (!device->discipline->release_space)
+ return -ENOTSUPP;
+
+ return device->discipline->release_space(device, rdata);
+}
+
+/*
+ * Release allocated space
+ */
+static int dasd_ioctl_release_space(struct block_device *bdev, void __user *argp)
+{
+ struct format_data_t rdata;
+ struct dasd_device *base;
+ int rc = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!argp)
+ return -EINVAL;
+
+ base = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!base)
+ return -ENODEV;
+ if (base->features & DASD_FEATURE_READONLY ||
+ test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
+ rc = -EROFS;
+ goto out_err;
+ }
+ if (bdev != bdev->bd_contains) {
+ pr_warn("%s: The specified DASD is a partition and tracks cannot be released\n",
+ dev_name(&base->cdev->dev));
+ rc = -EINVAL;
+ goto out_err;
+ }
+
+ if (copy_from_user(&rdata, argp, sizeof(rdata))) {
+ rc = -EFAULT;
+ goto out_err;
+ }
+
+ rc = dasd_release_space(base, &rdata);
+
+out_err:
+ dasd_put_device(base);
+
+ return rc;
+}
+
#ifdef CONFIG_DASD_PROFILE
/*
* Reset device profile information
@@ -595,6 +648,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
case BIODASDREADALLCMB:
rc = dasd_ioctl_readall_cmb(block, cmd, argp);
break;
+ case BIODASDRAS:
+ rc = dasd_ioctl_release_space(bdev, argp);
+ break;
default:
/* if the discipline has an ioctl method try it. */
rc = -ENOTTY;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4e8aedd50cb0..d04d4378ca50 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -59,6 +59,7 @@ static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
static const struct dax_operations dcssblk_dax_ops = {
.direct_access = dcssblk_dax_direct_access,
+ .dax_supported = generic_fsdax_supported,
.copy_from_iter = dcssblk_dax_copy_from_iter,
.copy_to_iter = dcssblk_dax_copy_to_iter,
};
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index ab0b243a947d..6cc4b19acf85 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -79,27 +79,6 @@ config SCLP_VT220_CONSOLE
Include support for using an IBM SCLP VT220-compatible terminal as a
Linux system console.
-config SCLP_ASYNC
- def_tristate m
- prompt "Support for Call Home via Asynchronous SCLP Records"
- depends on S390
- help
- This option enables the call home function, which is able to inform
- the service element and connected organisations about a kernel panic.
- You should only select this option if you know what you are doing,
- want for inform other people about your kernel panics,
- need this feature and intend to run your kernel in LPAR.
-
-config SCLP_ASYNC_ID
- string "Component ID for Call Home"
- depends on SCLP_ASYNC
- default "000000000"
- help
- The Component ID for Call Home is used to identify the correct
- problem reporting queue the call home records should be sent to.
-
- If your are unsure, please use the default value "000000000".
-
config HMC_DRV
def_tristate m
prompt "Support for file transfers from HMC drive CD/DVD-ROM"
@@ -205,4 +184,3 @@ config S390_VMUR
depends on S390
help
Character device driver for z/VM reader, puncher and printer.
-
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 3072b89785dd..b8a8816d94e7 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_TN3215) += con3215.o
obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
-obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_PCI) += sclp_pci.o
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 8b48ba9c598e..4c4683d8784a 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -486,7 +486,7 @@ fs3270_open(struct inode *inode, struct file *filp)
raw3270_del_view(&fp->view);
goto out;
}
- nonseekable_open(inode, filp);
+ stream_open(inode, filp);
filp->private_data = fp;
out:
mutex_unlock(&fs3270_mutex);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index e9aa71cdfc44..d2ab3f07c008 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -45,8 +45,8 @@ static struct list_head sclp_req_queue;
/* Data for read and and init requests. */
static struct sclp_req sclp_read_req;
static struct sclp_req sclp_init_req;
-static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
-static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+static void *sclp_read_sccb;
+static struct init_sccb *sclp_init_sccb;
/* Suspend request */
static DECLARE_COMPLETION(sclp_request_queue_flushed);
@@ -753,9 +753,8 @@ EXPORT_SYMBOL(sclp_remove_processed);
static inline void
__sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
{
- struct init_sccb *sccb;
+ struct init_sccb *sccb = sclp_init_sccb;
- sccb = (struct init_sccb *) sclp_init_sccb;
clear_page(sccb);
memset(&sclp_init_req, 0, sizeof(struct sclp_req));
sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
@@ -782,7 +781,7 @@ static int
sclp_init_mask(int calculate)
{
unsigned long flags;
- struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
+ struct init_sccb *sccb = sclp_init_sccb;
sccb_mask_t receive_mask;
sccb_mask_t send_mask;
int retry;
@@ -1175,6 +1174,9 @@ sclp_init(void)
if (sclp_init_state != sclp_init_state_uninitialized)
goto fail_unlock;
sclp_init_state = sclp_init_state_initializing;
+ sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
+ sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
+ BUG_ON(!sclp_read_sccb || !sclp_init_sccb);
/* Set up variables */
INIT_LIST_HEAD(&sclp_req_queue);
INIT_LIST_HEAD(&sclp_reg_list);
@@ -1207,6 +1209,8 @@ fail_unregister_reboot_notifier:
unregister_reboot_notifier(&sclp_reboot_notifier);
fail_init_state_uninitialized:
sclp_init_state = sclp_init_state_uninitialized;
+ free_page((unsigned long) sclp_read_sccb);
+ free_page((unsigned long) sclp_init_sccb);
fail_unlock:
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 367e9d384d85..196333013e54 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -197,7 +197,9 @@ struct read_info_sccb {
u32 hmfai; /* 124-127 */
u8 _pad_128[134 - 128]; /* 128-133 */
u8 byte_134; /* 134 */
- u8 _pad_135[4096 - 135]; /* 135-4095 */
+ u8 cpudirq; /* 135 */
+ u16 cbl; /* 136-137 */
+ u8 _pad_138[4096 - 138]; /* 138-4095 */
} __packed __aligned(PAGE_SIZE);
struct read_storage_sccb {
@@ -319,7 +321,7 @@ extern int sclp_console_drop;
extern unsigned long sclp_console_full;
extern bool sclp_mask_compat_mode;
-extern char sclp_early_sccb[PAGE_SIZE];
+extern char *sclp_early_sccb;
void sclp_early_wait_irq(void);
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
@@ -365,14 +367,14 @@ sclp_ascebc(unsigned char ch)
/* translate string from EBCDIC to ASCII */
static inline void
-sclp_ebcasc_str(unsigned char *str, int nr)
+sclp_ebcasc_str(char *str, int nr)
{
(MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
}
/* translate string from ASCII to EBCDIC */
static inline void
-sclp_ascebc_str(unsigned char *str, int nr)
+sclp_ascebc_str(char *str, int nr)
{
(MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
}
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
deleted file mode 100644
index e69b12a40636..000000000000
--- a/drivers/s390/char/sclp_async.c
+++ /dev/null
@@ -1,189 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Enable Asynchronous Notification via SCLP.
- *
- * Copyright IBM Corp. 2009
- * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/kmod.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/proc_fs.h>
-#include <linux/sysctl.h>
-#include <linux/utsname.h>
-#include "sclp.h"
-
-static int callhome_enabled;
-static struct sclp_req *request;
-static struct sclp_async_sccb *sccb;
-static int sclp_async_send_wait(char *message);
-static struct ctl_table_header *callhome_sysctl_header;
-static DEFINE_SPINLOCK(sclp_async_lock);
-#define SCLP_NORMAL_WRITE 0x00
-
-struct async_evbuf {
- struct evbuf_header header;
- u64 reserved;
- u8 rflags;
- u8 empty;
- u8 rtype;
- u8 otype;
- char comp_id[12];
- char data[3000]; /* there is still some space left */
-} __attribute__((packed));
-
-struct sclp_async_sccb {
- struct sccb_header header;
- struct async_evbuf evbuf;
-} __attribute__((packed));
-
-static struct sclp_register sclp_async_register = {
- .send_mask = EVTYP_ASYNC_MASK,
-};
-
-static int call_home_on_panic(struct notifier_block *self,
- unsigned long event, void *data)
-{
- strncat(data, init_utsname()->nodename,
- sizeof(init_utsname()->nodename));
- sclp_async_send_wait(data);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block call_home_panic_nb = {
- .notifier_call = call_home_on_panic,
- .priority = INT_MAX,
-};
-
-static int zero;
-static int one = 1;
-
-static struct ctl_table callhome_table[] = {
- {
- .procname = "callhome",
- .data = &callhome_enabled,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
- },
- {}
-};
-
-static struct ctl_table kern_dir_table[] = {
- {
- .procname = "kernel",
- .maxlen = 0,
- .mode = 0555,
- .child = callhome_table,
- },
- {}
-};
-
-/*
- * Function used to transfer asynchronous notification
- * records which waits for send completion
- */
-static int sclp_async_send_wait(char *message)
-{
- struct async_evbuf *evb;
- int rc;
- unsigned long flags;
-
- if (!callhome_enabled)
- return 0;
- sccb->evbuf.header.type = EVTYP_ASYNC;
- sccb->evbuf.rtype = 0xA5;
- sccb->evbuf.otype = 0x00;
- evb = &sccb->evbuf;
- request->command = SCLP_CMDW_WRITE_EVENT_DATA;
- request->sccb = sccb;
- request->status = SCLP_REQ_FILLED;
- strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
- /*
- * Retain Queue
- * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
- */
- strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID,
- sizeof(sccb->evbuf.comp_id));
- sccb->evbuf.header.length = sizeof(sccb->evbuf);
- sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
- sccb->header.function_code = SCLP_NORMAL_WRITE;
- rc = sclp_add_request(request);
- if (rc)
- return rc;
- spin_lock_irqsave(&sclp_async_lock, flags);
- while (request->status != SCLP_REQ_DONE &&
- request->status != SCLP_REQ_FAILED) {
- sclp_sync_wait();
- }
- spin_unlock_irqrestore(&sclp_async_lock, flags);
- if (request->status != SCLP_REQ_DONE)
- return -EIO;
- rc = ((struct sclp_async_sccb *)
- request->sccb)->header.response_code;
- if (rc != 0x0020)
- return -EIO;
- if (evb->header.flags != 0x80)
- return -EIO;
- return rc;
-}
-
-static int __init sclp_async_init(void)
-{
- int rc;
-
- rc = sclp_register(&sclp_async_register);
- if (rc)
- return rc;
- rc = -EOPNOTSUPP;
- if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
- goto out_sclp;
- rc = -ENOMEM;
- callhome_sysctl_header = register_sysctl_table(kern_dir_table);
- if (!callhome_sysctl_header)
- goto out_sclp;
- request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
- sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!request || !sccb)
- goto out_mem;
- rc = atomic_notifier_chain_register(&panic_notifier_list,
- &call_home_panic_nb);
- if (!rc)
- goto out;
-out_mem:
- kfree(request);
- free_page((unsigned long) sccb);
- unregister_sysctl_table(callhome_sysctl_header);
-out_sclp:
- sclp_unregister(&sclp_async_register);
-out:
- return rc;
-}
-module_init(sclp_async_init);
-
-static void __exit sclp_async_exit(void)
-{
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &call_home_panic_nb);
- unregister_sysctl_table(callhome_sysctl_header);
- sclp_unregister(&sclp_async_register);
- free_page((unsigned long) sccb);
- kfree(request);
-}
-module_exit(sclp_async_exit);
-
-MODULE_AUTHOR("Copyright IBM Corp. 2009");
-MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 8332788681c4..e71992a3c55f 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -40,6 +40,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_kss = !!(sccb->fac98 & 0x01);
+ sclp.has_sipl = !!(sccb->cbl & 0x02);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
@@ -93,6 +94,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
sclp.hmfai = sccb->hmfai;
+ sclp.has_dirq = !!(sccb->cpudirq & 0x80);
}
/*
@@ -144,7 +146,7 @@ static void __init sclp_early_console_detect(struct init_sccb *sccb)
void __init sclp_early_detect(void)
{
- void *sccb = &sclp_early_sccb;
+ void *sccb = sclp_early_sccb;
sclp_early_facilities_detect(sccb);
sclp_early_init_core_info(sccb);
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 387c114ded3f..7737470f8498 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -16,7 +16,7 @@
static struct read_info_sccb __bootdata(sclp_info_sccb);
static int __bootdata(sclp_info_sccb_valid);
-char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data);
+char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET;
int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
/*
* Used to keep track of the size of the event masks. Qemu until version 2.11
@@ -91,8 +91,8 @@ static void sclp_early_print_lm(const char *str, unsigned int len)
struct mto *mto;
struct go *go;
- sccb = (struct write_sccb *) &sclp_early_sccb;
- end = (unsigned char *) sccb + sizeof(sclp_early_sccb) - 1;
+ sccb = (struct write_sccb *) sclp_early_sccb;
+ end = (unsigned char *) sccb + EARLY_SCCB_SIZE - 1;
memset(sccb, 0, sizeof(*sccb));
ptr = (unsigned char *) &sccb->msg.mdb.mto;
offset = 0;
@@ -139,9 +139,9 @@ static void sclp_early_print_vt220(const char *str, unsigned int len)
{
struct vt220_sccb *sccb;
- sccb = (struct vt220_sccb *) &sclp_early_sccb;
- if (sizeof(*sccb) + len >= sizeof(sclp_early_sccb))
- len = sizeof(sclp_early_sccb) - sizeof(*sccb);
+ sccb = (struct vt220_sccb *) sclp_early_sccb;
+ if (sizeof(*sccb) + len >= EARLY_SCCB_SIZE)
+ len = EARLY_SCCB_SIZE - sizeof(*sccb);
memset(sccb, 0, sizeof(*sccb));
memcpy(&sccb->msg.data, str, len);
sccb->header.length = sizeof(*sccb) + len;
@@ -199,7 +199,7 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
BUILD_BUG_ON(sizeof(struct init_sccb) > PAGE_SIZE);
*have_linemode = *have_vt220 = 0;
- sccb = (struct init_sccb *) &sclp_early_sccb;
+ sccb = (struct init_sccb *) sclp_early_sccb;
receive_mask = disable ? 0 : EVTYP_OPCMD_MASK;
send_mask = disable ? 0 : EVTYP_VT220MSG_MASK | EVTYP_MSG_MASK;
rc = sclp_early_set_event_mask(sccb, receive_mask, send_mask);
@@ -304,7 +304,7 @@ int __init sclp_early_get_hsa_size(unsigned long *hsa_size)
void __weak __init add_mem_detect_block(u64 start, u64 end) {}
int __init sclp_early_read_storage_info(void)
{
- struct read_storage_sccb *sccb = (struct read_storage_sccb *)&sclp_early_sccb;
+ struct read_storage_sccb *sccb = (struct read_storage_sccb *)sclp_early_sccb;
int rc, id, max_id = 0;
unsigned long rn, rzm;
sclp_cmdw_t command;
@@ -320,8 +320,8 @@ int __init sclp_early_read_storage_info(void)
rzm <<= 20;
for (id = 0; id <= max_id; id++) {
- memset(sclp_early_sccb, 0, sizeof(sclp_early_sccb));
- sccb->header.length = sizeof(sclp_early_sccb);
+ memset(sclp_early_sccb, 0, EARLY_SCCB_SIZE);
+ sccb->header.length = EARLY_SCCB_SIZE;
command = SCLP_CMDW_READ_STORAGE_INFO | (id << 8);
rc = sclp_early_cmd(command, sccb);
if (rc)
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 8e0b69a2f11a..13f97fd73aca 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -29,7 +29,7 @@ static struct sclp_register sclp_sdias_register = {
.send_mask = EVTYP_SDIAS_MASK,
};
-static struct sdias_sccb sccb __attribute__((aligned(4096)));
+static struct sdias_sccb *sclp_sdias_sccb;
static struct sdias_evbuf sdias_evbuf;
static DECLARE_COMPLETION(evbuf_accepted);
@@ -58,6 +58,7 @@ static void sdias_callback(struct sclp_req *request, void *data)
static int sdias_sclp_send(struct sclp_req *req)
{
+ struct sdias_sccb *sccb = sclp_sdias_sccb;
int retries;
int rc;
@@ -78,16 +79,16 @@ static int sdias_sclp_send(struct sclp_req *req)
continue;
}
/* if not accepted, retry */
- if (!(sccb.evbuf.hdr.flags & 0x80)) {
+ if (!(sccb->evbuf.hdr.flags & 0x80)) {
TRACE("sclp request failed: flags=%x\n",
- sccb.evbuf.hdr.flags);
+ sccb->evbuf.hdr.flags);
continue;
}
/*
* for the sync interface the response is in the initial sccb
*/
if (!sclp_sdias_register.receiver_fn) {
- memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
+ memcpy(&sdias_evbuf, &sccb->evbuf, sizeof(sdias_evbuf));
TRACE("sync request done\n");
return 0;
}
@@ -104,23 +105,24 @@ static int sdias_sclp_send(struct sclp_req *req)
*/
int sclp_sdias_blk_count(void)
{
+ struct sdias_sccb *sccb = sclp_sdias_sccb;
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
- memset(&sccb, 0, sizeof(sccb));
+ memset(sccb, 0, sizeof(*sccb));
memset(&request, 0, sizeof(request));
- sccb.hdr.length = sizeof(sccb);
- sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
- sccb.evbuf.hdr.type = EVTYP_SDIAS;
- sccb.evbuf.event_qual = SDIAS_EQ_SIZE;
- sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
- sccb.evbuf.event_id = 4712;
- sccb.evbuf.dbs = 1;
+ sccb->hdr.length = sizeof(*sccb);
+ sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
+ sccb->evbuf.hdr.type = EVTYP_SDIAS;
+ sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
+ sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
+ sccb->evbuf.event_id = 4712;
+ sccb->evbuf.dbs = 1;
- request.sccb = &sccb;
+ request.sccb = sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
@@ -130,8 +132,8 @@ int sclp_sdias_blk_count(void)
pr_err("sclp_send failed for get_nr_blocks\n");
goto out;
}
- if (sccb.hdr.response_code != 0x0020) {
- TRACE("send failed: %x\n", sccb.hdr.response_code);
+ if (sccb->hdr.response_code != 0x0020) {
+ TRACE("send failed: %x\n", sccb->hdr.response_code);
rc = -EIO;
goto out;
}
@@ -163,30 +165,31 @@ out:
*/
int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
{
+ struct sdias_sccb *sccb = sclp_sdias_sccb;
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
- memset(&sccb, 0, sizeof(sccb));
+ memset(sccb, 0, sizeof(*sccb));
memset(&request, 0, sizeof(request));
- sccb.hdr.length = sizeof(sccb);
- sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
- sccb.evbuf.hdr.type = EVTYP_SDIAS;
- sccb.evbuf.hdr.flags = 0;
- sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA;
- sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
- sccb.evbuf.event_id = 4712;
- sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64;
- sccb.evbuf.event_status = 0;
- sccb.evbuf.blk_cnt = nr_blks;
- sccb.evbuf.asa = (unsigned long)dest;
- sccb.evbuf.fbn = start_blk;
- sccb.evbuf.lbn = 0;
- sccb.evbuf.dbs = 1;
-
- request.sccb = &sccb;
+ sccb->hdr.length = sizeof(*sccb);
+ sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
+ sccb->evbuf.hdr.type = EVTYP_SDIAS;
+ sccb->evbuf.hdr.flags = 0;
+ sccb->evbuf.event_qual = SDIAS_EQ_STORE_DATA;
+ sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
+ sccb->evbuf.event_id = 4712;
+ sccb->evbuf.asa_size = SDIAS_ASA_SIZE_64;
+ sccb->evbuf.event_status = 0;
+ sccb->evbuf.blk_cnt = nr_blks;
+ sccb->evbuf.asa = (unsigned long)dest;
+ sccb->evbuf.fbn = start_blk;
+ sccb->evbuf.lbn = 0;
+ sccb->evbuf.dbs = 1;
+
+ request.sccb = sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
@@ -196,8 +199,8 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
pr_err("sclp_send failed: %x\n", rc);
goto out;
}
- if (sccb.hdr.response_code != 0x0020) {
- TRACE("copy failed: %x\n", sccb.hdr.response_code);
+ if (sccb->hdr.response_code != 0x0020) {
+ TRACE("copy failed: %x\n", sccb->hdr.response_code);
rc = -EIO;
goto out;
}
@@ -256,6 +259,8 @@ int __init sclp_sdias_init(void)
{
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return 0;
+ sclp_sdias_sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
+ BUG_ON(!sclp_sdias_sccb);
sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
debug_register_view(sdias_dbf, &debug_sprintf_view);
debug_set_level(sdias_dbf, 6);
@@ -264,6 +269,7 @@ int __init sclp_sdias_init(void)
if (sclp_sdias_init_async() == 0)
goto out;
TRACE("init failed\n");
+ free_page((unsigned long) sclp_sdias_sccb);
return -ENODEV;
out:
TRACE("init done\n");
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index fc206c9d1c56..ea4253939555 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -290,7 +290,7 @@ tapechar_open (struct inode *inode, struct file *filp)
rc = tape_open(device);
if (rc == 0) {
filp->private_data = device;
- nonseekable_open(inode, filp);
+ stream_open(inode, filp);
} else
tape_put_device(device);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 76d3c50bf078..08f812475f5e 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -4,7 +4,7 @@
* dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
* dump format as s390 standalone dumps.
*
- * For more information please refer to Documentation/s390/zfcpdump.txt
+ * For more information please refer to Documentation/s390/zfcpdump.rst
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
@@ -51,7 +51,7 @@ static struct dentry *zcore_dir;
static struct dentry *zcore_memmap_file;
static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
-static struct ipl_parameter_block *ipl_block;
+static struct ipl_parameter_block *zcore_ipl_block;
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
@@ -182,8 +182,8 @@ static const struct file_operations zcore_memmap_fops = {
static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
- if (ipl_block) {
- diag308(DIAG308_SET, ipl_block);
+ if (zcore_ipl_block) {
+ diag308(DIAG308_SET, zcore_ipl_block);
diag308(DIAG308_LOAD_CLEAR, NULL);
}
return count;
@@ -191,7 +191,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
static int zcore_reipl_open(struct inode *inode, struct file *filp)
{
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int zcore_reipl_release(struct inode *inode, struct file *filp)
@@ -265,18 +265,20 @@ static int __init zcore_reipl_init(void)
return rc;
if (ipib_info.ipib == 0)
return 0;
- ipl_block = (void *) __get_free_page(GFP_KERNEL);
- if (!ipl_block)
+ zcore_ipl_block = (void *) __get_free_page(GFP_KERNEL);
+ if (!zcore_ipl_block)
return -ENOMEM;
if (ipib_info.ipib < sclp.hsa_size)
- rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
+ rc = memcpy_hsa_kernel(zcore_ipl_block, ipib_info.ipib,
+ PAGE_SIZE);
else
- rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
- if (rc || (__force u32)csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
+ rc = memcpy_real(zcore_ipl_block, (void *) ipib_info.ipib,
+ PAGE_SIZE);
+ if (rc || (__force u32)csum_partial(zcore_ipl_block, zcore_ipl_block->hdr.len, 0) !=
ipib_info.checksum) {
TRACE("Checksum does not match\n");
- free_page((unsigned long) ipl_block);
- ipl_block = NULL;
+ free_page((unsigned long) zcore_ipl_block);
+ zcore_ipl_block = NULL;
}
return 0;
}
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index f230516abb96..f6a8db04177c 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -20,5 +20,6 @@ obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
-vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o
+vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
+ vfio_ccw_async.o
obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index a45011e4529e..427b2e24a8ce 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -16,9 +16,11 @@
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/slab.h>
+#include <linux/dmapool.h>
#include <asm/airq.h>
#include <asm/isc.h>
+#include <asm/cio.h>
#include "cio.h"
#include "cio_debug.h"
@@ -27,6 +29,8 @@
static DEFINE_SPINLOCK(airq_lists_lock);
static struct hlist_head airq_lists[MAX_ISC+1];
+static struct dma_pool *airq_iv_cache;
+
/**
* register_adapter_interrupt() - register adapter interrupt handler
* @airq: pointer to adapter interrupt descriptor
@@ -95,7 +99,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
rcu_read_lock();
hlist_for_each_entry_rcu(airq, head, list)
if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
- airq->handler(airq);
+ airq->handler(airq, !tpi_info->directed_irq);
rcu_read_unlock();
return IRQ_HANDLED;
@@ -113,6 +117,11 @@ void __init init_airq_interrupts(void)
setup_irq(THIN_INTERRUPT, &airq_interrupt);
}
+static inline unsigned long iv_size(unsigned long bits)
+{
+ return BITS_TO_LONGS(bits) * sizeof(unsigned long);
+}
+
/**
* airq_iv_create - create an interrupt vector
* @bits: number of bits in the interrupt vector
@@ -129,10 +138,23 @@ struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
if (!iv)
goto out;
iv->bits = bits;
- size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
- iv->vector = kzalloc(size, GFP_KERNEL);
- if (!iv->vector)
- goto out_free;
+ iv->flags = flags;
+ size = iv_size(bits);
+
+ if (flags & AIRQ_IV_CACHELINE) {
+ if ((cache_line_size() * BITS_PER_BYTE) < bits
+ || !airq_iv_cache)
+ goto out_free;
+
+ iv->vector = dma_pool_zalloc(airq_iv_cache, GFP_KERNEL,
+ &iv->vector_dma);
+ if (!iv->vector)
+ goto out_free;
+ } else {
+ iv->vector = cio_dma_zalloc(size);
+ if (!iv->vector)
+ goto out_free;
+ }
if (flags & AIRQ_IV_ALLOC) {
iv->avail = kmalloc(size, GFP_KERNEL);
if (!iv->avail)
@@ -165,7 +187,10 @@ out_free:
kfree(iv->ptr);
kfree(iv->bitlock);
kfree(iv->avail);
- kfree(iv->vector);
+ if (iv->flags & AIRQ_IV_CACHELINE && iv->vector)
+ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
+ else
+ cio_dma_free(iv->vector, size);
kfree(iv);
out:
return NULL;
@@ -181,7 +206,10 @@ void airq_iv_release(struct airq_iv *iv)
kfree(iv->data);
kfree(iv->ptr);
kfree(iv->bitlock);
- kfree(iv->vector);
+ if (iv->flags & AIRQ_IV_CACHELINE)
+ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
+ else
+ cio_dma_free(iv->vector, iv_size(iv->bits));
kfree(iv->avail);
kfree(iv);
}
@@ -275,3 +303,13 @@ unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
return bit;
}
EXPORT_SYMBOL(airq_iv_scan);
+
+int __init airq_init(void)
+{
+ airq_iv_cache = dma_pool_create("airq_iv_cache", cio_get_dma_css_dev(),
+ cache_line_size(),
+ cache_line_size(), PAGE_SIZE);
+ if (!airq_iv_cache)
+ return -ENOMEM;
+ return 0;
+}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 4ebf6d4fc66c..c522e9313c50 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -581,7 +581,7 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
}
EXPORT_SYMBOL(ccwgroup_driver_register);
-static int __ccwgroup_match_all(struct device *dev, void *data)
+static int __ccwgroup_match_all(struct device *dev, const void *data)
{
return 1;
}
@@ -608,9 +608,9 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
}
EXPORT_SYMBOL(ccwgroup_driver_unregister);
-static int __ccwgroupdev_check_busid(struct device *dev, void *id)
+static int __ccwgroupdev_check_busid(struct device *dev, const void *id)
{
- char *bus_id = id;
+ const char *bus_id = id;
return (strcmp(bus_id, dev_name(dev)) == 0);
}
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 603268a33ea1..73582a0a2622 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -63,7 +63,7 @@ static void ccwreq_stop(struct ccw_device *cdev, int rc)
return;
req->done = 1;
ccw_device_set_timeout(cdev, 0);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
if (rc && rc != -ENODEV && req->drc)
rc = req->drc;
req->callback(cdev, req->data, rc);
@@ -86,7 +86,7 @@ static void ccwreq_do(struct ccw_device *cdev)
continue;
}
/* Perform start function. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
rc = cio_start(sch, cp, (u8) req->mask);
if (rc == 0) {
/* I/O started successfully. */
@@ -169,7 +169,7 @@ int ccw_request_cancel(struct ccw_device *cdev)
*/
static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
{
- struct irb *irb = &cdev->private->irb;
+ struct irb *irb = &cdev->private->dma_area->irb;
struct cmd_scsw *scsw = &irb->scsw.cmd;
enum uc_todo todo;
@@ -187,7 +187,8 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
CIO_TRACE_EVENT(2, "sensedata");
CIO_HEX_EVENT(2, &cdev->private->dev_id,
sizeof(struct ccw_dev_id));
- CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
+ CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw,
+ SENSE_MAX_COUNT);
/* Check for command reject. */
if (irb->ecw[0] & SNS0_CMD_REJECT)
return IO_REJECTED;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a835b31aad99..6392a1b95b02 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -323,36 +323,6 @@ struct chsc_sei {
} __packed __aligned(PAGE_SIZE);
/*
- * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
- */
-
-#define ND_VALIDITY_VALID 0
-#define ND_VALIDITY_OUTDATED 1
-#define ND_VALIDITY_INVALID 2
-
-struct node_descriptor {
- /* Flags. */
- union {
- struct {
- u32 validity:3;
- u32 reserved:5;
- } __packed;
- u8 byte0;
- } __packed;
-
- /* Node parameters. */
- u32 params:24;
-
- /* Node ID. */
- char type[6];
- char model[3];
- char manufacturer[3];
- char plant[2];
- char seq[12];
- u16 tag;
-} __packed;
-
-/*
* Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
*/
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 8d9f36625ba5..8f080d3fd380 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -203,7 +203,7 @@ static void chsc_cleanup_sch_driver(void)
static DEFINE_SPINLOCK(chsc_lock);
-static int chsc_subchannel_match_next_free(struct device *dev, void *data)
+static int chsc_subchannel_match_next_free(struct device *dev, const void *data)
{
struct subchannel *sch = to_subchannel(dev);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index de744ca158fd..18f5458f90e8 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -564,7 +564,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
}
static struct irqaction io_interrupt = {
- .name = "IO",
+ .name = "I/O",
.handler = do_cio_interrupt,
};
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 9811fd8a0c73..ba7d2480613b 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -51,7 +51,7 @@ struct tpi_info {
struct subchannel_id schid;
u32 intparm;
u32 adapter_IO:1;
- u32 :1;
+ u32 directed_irq:1;
u32 isc:3;
u32 :27;
u32 type:3;
@@ -113,9 +113,10 @@ struct subchannel {
enum sch_todo todo;
struct work_struct todo_work;
struct schib_config config;
+ char *driver_override; /* Driver name to force a match */
} __attribute__ ((aligned(8)));
-DECLARE_PER_CPU(struct irb, cio_irb);
+DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
#define to_subchannel(n) container_of(n, struct subchannel, dev)
@@ -135,6 +136,8 @@ extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
+extern int __init airq_init(void);
+
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index aea502922646..22c55816100b 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -20,6 +20,8 @@
#include <linux/reboot.h>
#include <linux/suspend.h>
#include <linux/proc_fs.h>
+#include <linux/genalloc.h>
+#include <linux/dma-mapping.h>
#include <asm/isc.h>
#include <asm/crw.h>
@@ -165,6 +167,7 @@ static void css_subchannel_release(struct device *dev)
sch->config.intparm = 0;
cio_commit_config(sch);
+ kfree(sch->driver_override);
kfree(sch->lock);
kfree(sch);
}
@@ -224,6 +227,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
INIT_WORK(&sch->todo_work, css_sch_todo);
sch->dev.release = &css_subchannel_release;
device_initialize(&sch->dev);
+ /*
+ * The physical addresses of some the dma structures that can
+ * belong to a subchannel need to fit 31 bit width (e.g. ccw).
+ */
+ sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
+ sch->dev.dma_mask = &sch->dev.coherent_dma_mask;
return sch;
err:
@@ -315,9 +324,57 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(modalias);
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ char *driver_override, *old, *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ device_lock(dev);
+ old = sch->driver_override;
+ if (strlen(driver_override)) {
+ sch->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ sch->driver_override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
+ device_unlock(dev);
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
static struct attribute *subch_attrs[] = {
&dev_attr_type.attr,
&dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
NULL,
};
@@ -434,10 +491,10 @@ static int css_probe_device(struct subchannel_id schid, struct schib *schib)
}
static int
-check_subchannel(struct device * dev, void * data)
+check_subchannel(struct device *dev, const void *data)
{
struct subchannel *sch;
- struct subchannel_id *schid = data;
+ struct subchannel_id *schid = (void *)data;
sch = to_subchannel(dev);
return schid_equal(&sch->schid, schid);
@@ -899,6 +956,13 @@ static int __init setup_css(int nr)
dev_set_name(&css->device, "css%x", nr);
css->device.groups = cssdev_attr_groups;
css->device.release = channel_subsystem_release;
+ /*
+ * We currently allocate notifier bits with this (using
+ * css->device as the device argument with the DMA API)
+ * and are fine with 64 bit addresses.
+ */
+ css->device.coherent_dma_mask = DMA_BIT_MASK(64);
+ css->device.dma_mask = &css->device.coherent_dma_mask;
mutex_init(&css->mutex);
css->cssid = chsc_get_cssid(nr);
@@ -1018,6 +1082,111 @@ static struct notifier_block css_power_notifier = {
.notifier_call = css_power_event,
};
+#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
+static struct gen_pool *cio_dma_pool;
+
+/* Currently cio supports only a single css */
+struct device *cio_get_dma_css_dev(void)
+{
+ return &channel_subsystems[0]->device;
+}
+
+struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
+{
+ struct gen_pool *gp_dma;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ int i;
+
+ gp_dma = gen_pool_create(3, -1);
+ if (!gp_dma)
+ return NULL;
+ for (i = 0; i < nr_pages; ++i) {
+ cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
+ CIO_DMA_GFP);
+ if (!cpu_addr)
+ return gp_dma;
+ gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
+ dma_addr, PAGE_SIZE, -1);
+ }
+ return gp_dma;
+}
+
+static void __gp_dma_free_dma(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk, void *data)
+{
+ size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
+
+ dma_free_coherent((struct device *) data, chunk_size,
+ (void *) chunk->start_addr,
+ (dma_addr_t) chunk->phys_addr);
+}
+
+void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
+{
+ if (!gp_dma)
+ return;
+ /* this is quite ugly but no better idea */
+ gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
+ gen_pool_destroy(gp_dma);
+}
+
+static int cio_dma_pool_init(void)
+{
+ /* No need to free up the resources: compiled in */
+ cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
+ if (!cio_dma_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ dma_addr_t dma_addr;
+ unsigned long addr;
+ size_t chunk_size;
+
+ if (!gp_dma)
+ return NULL;
+ addr = gen_pool_alloc(gp_dma, size);
+ while (!addr) {
+ chunk_size = round_up(size, PAGE_SIZE);
+ addr = (unsigned long) dma_alloc_coherent(dma_dev,
+ chunk_size, &dma_addr, CIO_DMA_GFP);
+ if (!addr)
+ return NULL;
+ gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_alloc(gp_dma, size);
+ }
+ return (void *) addr;
+}
+
+void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
+{
+ if (!cpu_addr)
+ return;
+ memset(cpu_addr, 0, size);
+ gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
+}
+
+/*
+ * Allocate dma memory from the css global pool. Intended for memory not
+ * specific to any single device within the css. The allocated memory
+ * is not guaranteed to be 31-bit addressable.
+ *
+ * Caution: Not suitable for early stuff like console.
+ */
+void *cio_dma_zalloc(size_t size)
+{
+ return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
+}
+
+void cio_dma_free(void *cpu_addr, size_t size)
+{
+ cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
+}
+
/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing.
@@ -1059,16 +1228,22 @@ static int __init css_bus_init(void)
if (ret)
goto out_unregister;
ret = register_pm_notifier(&css_power_notifier);
- if (ret) {
- unregister_reboot_notifier(&css_reboot_notifier);
- goto out_unregister;
- }
+ if (ret)
+ goto out_unregister_rn;
+ ret = cio_dma_pool_init();
+ if (ret)
+ goto out_unregister_pmn;
+ airq_init();
css_init_done = 1;
/* Enable default isc for I/O subchannels. */
isc_register(IO_SCH_ISC);
return 0;
+out_unregister_pmn:
+ unregister_pm_notifier(&css_power_notifier);
+out_unregister_rn:
+ unregister_reboot_notifier(&css_reboot_notifier);
out_unregister:
while (i-- > 0) {
struct channel_subsystem *css = channel_subsystems[i];
@@ -1222,6 +1397,10 @@ static int css_bus_match(struct device *dev, struct device_driver *drv)
struct css_driver *driver = to_cssdriver(drv);
struct css_device_id *id;
+ /* When driver_override is set, only bind to the matching driver */
+ if (sch->driver_override && strcmp(sch->driver_override, drv->name))
+ return 0;
+
for (id = driver->subchannel_type; id->match_flags; id++) {
if (sch->st == id->type)
return 1;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 1540229a37bb..c421899be20f 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -24,6 +24,7 @@
#include <linux/timer.h>
#include <linux/kernel_stat.h>
#include <linux/sched/signal.h>
+#include <linux/dma-mapping.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -642,10 +643,10 @@ static int ccw_device_add(struct ccw_device *cdev)
return device_add(dev);
}
-static int match_dev_id(struct device *dev, void *data)
+static int match_dev_id(struct device *dev, const void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
- struct ccw_dev_id *dev_id = data;
+ struct ccw_dev_id *dev_id = (void *)data;
return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
}
@@ -687,6 +688,9 @@ ccw_device_release(struct device *dev)
struct ccw_device *cdev;
cdev = to_ccwdev(dev);
+ cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
+ sizeof(*cdev->private->dma_area));
+ cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
/* Release reference of parent subchannel. */
put_device(cdev->dev.parent);
kfree(cdev->private);
@@ -696,15 +700,33 @@ ccw_device_release(struct device *dev)
static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
{
struct ccw_device *cdev;
+ struct gen_pool *dma_pool;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
- if (cdev) {
- cdev->private = kzalloc(sizeof(struct ccw_device_private),
- GFP_KERNEL | GFP_DMA);
- if (cdev->private)
- return cdev;
- }
+ if (!cdev)
+ goto err_cdev;
+ cdev->private = kzalloc(sizeof(struct ccw_device_private),
+ GFP_KERNEL | GFP_DMA);
+ if (!cdev->private)
+ goto err_priv;
+ cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
+ cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask;
+ dma_pool = cio_gp_dma_create(&cdev->dev, 1);
+ if (!dma_pool)
+ goto err_dma_pool;
+ cdev->private->dma_pool = dma_pool;
+ cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
+ sizeof(*cdev->private->dma_area));
+ if (!cdev->private->dma_area)
+ goto err_dma_area;
+ return cdev;
+err_dma_area:
+ cio_gp_dma_destroy(dma_pool, &cdev->dev);
+err_dma_pool:
+ kfree(cdev->private);
+err_priv:
kfree(cdev);
+err_cdev:
return ERR_PTR(-ENOMEM);
}
@@ -884,7 +906,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
wake_up(&ccw_device_init_wq);
break;
case DEV_STATE_OFFLINE:
- /*
+ /*
* We can't register the device in interrupt context so
* we schedule a work item.
*/
@@ -1062,6 +1084,14 @@ static int io_subchannel_probe(struct subchannel *sch)
if (!io_priv)
goto out_schedule;
+ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
+ sizeof(*io_priv->dma_area),
+ &io_priv->dma_area_dma, GFP_KERNEL);
+ if (!io_priv->dma_area) {
+ kfree(io_priv);
+ goto out_schedule;
+ }
+
set_io_private(sch, io_priv);
css_schedule_eval(sch->schid);
return 0;
@@ -1088,6 +1118,8 @@ static int io_subchannel_remove(struct subchannel *sch)
set_io_private(sch, NULL);
spin_unlock_irq(sch->lock);
out_free:
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
kfree(io_priv);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
@@ -1593,13 +1625,19 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
return ERR_CAST(sch);
io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
- if (!io_priv) {
- put_device(&sch->dev);
- return ERR_PTR(-ENOMEM);
- }
+ if (!io_priv)
+ goto err_priv;
+ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
+ sizeof(*io_priv->dma_area),
+ &io_priv->dma_area_dma, GFP_KERNEL);
+ if (!io_priv->dma_area)
+ goto err_dma_area;
set_io_private(sch, io_priv);
cdev = io_subchannel_create_ccwdev(sch);
if (IS_ERR(cdev)) {
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
+ set_io_private(sch, NULL);
put_device(&sch->dev);
kfree(io_priv);
return cdev;
@@ -1607,6 +1645,12 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
cdev->drv = drv;
ccw_device_set_int_class(cdev);
return cdev;
+
+err_dma_area:
+ kfree(io_priv);
+err_priv:
+ put_device(&sch->dev);
+ return ERR_PTR(-ENOMEM);
}
void __init ccw_device_destroy_console(struct ccw_device *cdev)
@@ -1617,6 +1661,8 @@ void __init ccw_device_destroy_console(struct ccw_device *cdev)
set_io_private(sch, NULL);
put_device(&sch->dev);
put_device(&cdev->dev);
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
kfree(io_priv);
}
@@ -1653,11 +1699,9 @@ EXPORT_SYMBOL_GPL(ccw_device_force_console);
* get ccw_device matching the busid, but only if owned by cdrv
*/
static int
-__ccwdev_check_busid(struct device *dev, void *id)
+__ccwdev_check_busid(struct device *dev, const void *id)
{
- char *bus_id;
-
- bus_id = id;
+ const char *bus_id = id;
return (strcmp(bus_id, dev_name(dev)) == 0);
}
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 9169af7dbb43..8fc267324ebb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -67,8 +67,10 @@ static void ccw_timeout_log(struct ccw_device *cdev)
sizeof(struct tcw), 0);
} else {
printk(KERN_WARNING "cio: orb indicates command mode\n");
- if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
- (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
+ if ((void *)(addr_t)orb->cmd.cpa ==
+ &private->dma_area->sense_ccw ||
+ (void *)(addr_t)orb->cmd.cpa ==
+ cdev->private->dma_area->iccws)
printk(KERN_WARNING "cio: last channel program "
"(intern):\n");
else
@@ -143,18 +145,22 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
void ccw_device_update_sense_data(struct ccw_device *cdev)
{
memset(&cdev->id, 0, sizeof(cdev->id));
- cdev->id.cu_type = cdev->private->senseid.cu_type;
- cdev->id.cu_model = cdev->private->senseid.cu_model;
- cdev->id.dev_type = cdev->private->senseid.dev_type;
- cdev->id.dev_model = cdev->private->senseid.dev_model;
+ cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
+ cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
+ cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
+ cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
}
int ccw_device_test_sense_data(struct ccw_device *cdev)
{
- return cdev->id.cu_type == cdev->private->senseid.cu_type &&
- cdev->id.cu_model == cdev->private->senseid.cu_model &&
- cdev->id.dev_type == cdev->private->senseid.dev_type &&
- cdev->id.dev_model == cdev->private->senseid.dev_model;
+ return cdev->id.cu_type ==
+ cdev->private->dma_area->senseid.cu_type &&
+ cdev->id.cu_model ==
+ cdev->private->dma_area->senseid.cu_model &&
+ cdev->id.dev_type ==
+ cdev->private->dma_area->senseid.dev_type &&
+ cdev->id.dev_model ==
+ cdev->private->dma_area->senseid.dev_model;
}
/*
@@ -342,7 +348,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
cio_disable_subchannel(sch);
/* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
cdev->private->state = state;
@@ -509,13 +515,14 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- create_fake_irb(&cdev->private->irb,
+ create_fake_irb(&cdev->private->dma_area->irb,
cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
- &cdev->private->irb);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ &cdev->private->dma_area->irb);
+ memset(&cdev->private->dma_area->irb, 0,
+ sizeof(struct irb));
}
ccw_device_report_path_events(cdev);
ccw_device_handle_broken_paths(cdev);
@@ -672,7 +679,8 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
if (scsw_actl(&sch->schib.scsw) != 0 ||
(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
- (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
+ (scsw_stctl(&cdev->private->dma_area->irb.scsw) &
+ SCSW_STCTL_STATUS_PEND)) {
/*
* No final status yet or final status not yet delivered
* to the device driver. Can't do path verification now,
@@ -719,7 +727,7 @@ static int ccw_device_call_handler(struct ccw_device *cdev)
* - fast notification was requested (primary status)
* - unsolicited interrupts
*/
- stctl = scsw_stctl(&cdev->private->irb.scsw);
+ stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
(stctl == SCSW_STCTL_STATUS_PEND);
@@ -735,9 +743,9 @@ static int ccw_device_call_handler(struct ccw_device *cdev)
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
- &cdev->private->irb);
+ &cdev->private->dma_area->irb);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
return 1;
}
@@ -759,7 +767,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* Unit check but no sense data. Need basic sense. */
if (ccw_device_do_sense(cdev, irb) != 0)
goto call_handler_unsol;
- memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+ memcpy(&cdev->private->dma_area->irb, irb,
+ sizeof(struct irb));
cdev->private->state = DEV_STATE_W4SENSE;
cdev->private->intparm = 0;
return;
@@ -842,7 +851,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
if (scsw_fctl(&irb->scsw) &
(SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
cdev->private->flags.dosense = 0;
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
goto call_handler;
}
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index f6df83a9dfbb..740996d0dc8c 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -99,7 +99,7 @@ static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
static int diag210_get_dev_info(struct ccw_device *cdev)
{
struct ccw_dev_id *dev_id = &cdev->private->dev_id;
- struct senseid *senseid = &cdev->private->senseid;
+ struct senseid *senseid = &cdev->private->dma_area->senseid;
struct diag210 diag_data;
int rc;
@@ -134,8 +134,10 @@ err_failed:
static void snsid_init(struct ccw_device *cdev)
{
cdev->private->flags.esid = 0;
- memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
- cdev->private->senseid.cu_type = 0xffff;
+
+ memset(&cdev->private->dma_area->senseid, 0,
+ sizeof(cdev->private->dma_area->senseid));
+ cdev->private->dma_area->senseid.cu_type = 0xffff;
}
/*
@@ -143,16 +145,16 @@ static void snsid_init(struct ccw_device *cdev)
*/
static int snsid_check(struct ccw_device *cdev, void *data)
{
- struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
+ struct cmd_scsw *scsw = &cdev->private->dma_area->irb.scsw.cmd;
int len = sizeof(struct senseid) - scsw->count;
/* Check for incomplete SENSE ID data. */
if (len < SENSE_ID_MIN_LEN)
goto out_restart;
- if (cdev->private->senseid.cu_type == 0xffff)
+ if (cdev->private->dma_area->senseid.cu_type == 0xffff)
goto out_restart;
/* Check for incompatible SENSE ID data. */
- if (cdev->private->senseid.reserved != 0xff)
+ if (cdev->private->dma_area->senseid.reserved != 0xff)
return -EOPNOTSUPP;
/* Check for extended-identification information. */
if (len > SENSE_ID_BASIC_LEN)
@@ -170,7 +172,7 @@ out_restart:
static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct ccw_dev_id *id = &cdev->private->dev_id;
- struct senseid *senseid = &cdev->private->senseid;
+ struct senseid *senseid = &cdev->private->dma_area->senseid;
int vm = 0;
if (rc && MACHINE_IS_VM) {
@@ -200,7 +202,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
CIO_TRACE_EVENT(4, "snsid");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
@@ -208,7 +210,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
snsid_init(cdev);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_ID;
- cp->cda = (u32) (addr_t) &cdev->private->senseid;
+ cp->cda = (u32) (addr_t) &cdev->private->dma_area->senseid;
cp->count = sizeof(struct senseid);
cp->flags = CCW_FLAG_SLI;
/* Request setup. */
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 4435ae0b3027..d722458c5928 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -429,8 +429,8 @@ struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
if (cdev->private->flags.esid == 0)
return NULL;
for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
- if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
- return cdev->private->senseid.ciw + ciw_cnt;
+ if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
+ return cdev->private->dma_area->senseid.ciw + ciw_cnt;
return NULL;
}
@@ -699,6 +699,23 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
}
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
+/*
+ * Allocate zeroed dma coherent 31 bit addressable memory using
+ * the subchannels dma pool. Maximal size of allocation supported
+ * is PAGE_SIZE.
+ */
+void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
+{
+ return cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
+}
+EXPORT_SYMBOL(ccw_device_dma_zalloc);
+
+void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
+{
+ cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
+}
+EXPORT_SYMBOL(ccw_device_dma_free);
+
EXPORT_SYMBOL(ccw_device_set_options_mask);
EXPORT_SYMBOL(ccw_device_set_options);
EXPORT_SYMBOL(ccw_device_clear_options);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index d30a3babf176..767a85635a0f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -57,7 +57,7 @@ out:
static void nop_build_cp(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
cp->cmd_code = CCW_CMD_NOOP;
cp->cda = 0;
@@ -134,9 +134,9 @@ err:
static void spid_build_cp(struct ccw_device *cdev, u8 fn)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
int i = pathmask_to_pos(req->lpm);
- struct pgid *pgid = &cdev->private->pgid[i];
+ struct pgid *pgid = &cdev->private->dma_area->pgid[i];
pgid->inf.fc = fn;
cp->cmd_code = CCW_CMD_SET_PGID;
@@ -300,7 +300,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
int *mismatch, u8 *reserved, u8 *reset)
{
- struct pgid *pgid = &cdev->private->pgid[0];
+ struct pgid *pgid = &cdev->private->dma_area->pgid[0];
struct pgid *first = NULL;
int lpm;
int i;
@@ -342,7 +342,7 @@ static u8 pgid_to_donepm(struct ccw_device *cdev)
lpm = 0x80 >> i;
if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
- pgid = &cdev->private->pgid[i];
+ pgid = &cdev->private->dma_area->pgid[i];
if (sch->opm & lpm) {
if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
continue;
@@ -368,7 +368,8 @@ static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
int i;
for (i = 0; i < 8; i++)
- memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
+ memcpy(&cdev->private->dma_area->pgid[i], pgid,
+ sizeof(struct pgid));
}
/*
@@ -435,12 +436,12 @@ out:
static void snid_build_cp(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
int i = pathmask_to_pos(req->lpm);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_PGID;
- cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
+ cp->cda = (u32) (addr_t) &cdev->private->dma_area->pgid[i];
cp->count = sizeof(struct pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
@@ -516,7 +517,8 @@ static void verify_start(struct ccw_device *cdev)
sch->lpm = sch->schib.pmcw.pam;
/* Initialize PGID data. */
- memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ memset(cdev->private->dma_area->pgid, 0,
+ sizeof(cdev->private->dma_area->pgid));
cdev->private->pgid_valid_mask = 0;
cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
cdev->private->path_notoper_mask = 0;
@@ -626,7 +628,7 @@ struct stlck_data {
static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
cp[0].cmd_code = CCW_CMD_STLCK;
cp[0].cda = (u32) (addr_t) buf1;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 7d5c7892b2c4..0bd8f2642732 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -79,15 +79,15 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
* are condition that have to be met for the extended control
* bit to have meaning. Sick.
*/
- cdev->private->irb.scsw.cmd.ectl = 0;
+ cdev->private->dma_area->irb.scsw.cmd.ectl = 0;
if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
!(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
- cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
+ cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
/* Check if extended control word is valid. */
- if (!cdev->private->irb.scsw.cmd.ectl)
+ if (!cdev->private->dma_area->irb.scsw.cmd.ectl)
return;
/* Copy concurrent sense / model dependent information. */
- memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
+ memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw));
}
/*
@@ -118,7 +118,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
if (!ccw_device_accumulate_esw_valid(irb))
return;
- cdev_irb = &cdev->private->irb;
+ cdev_irb = &cdev->private->dma_area->irb;
/* Copy last path used mask. */
cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
@@ -210,7 +210,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
ccw_device_path_notoper(cdev);
/* No irb accumulation for transport mode irbs. */
if (scsw_is_tm(&irb->scsw)) {
- memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+ memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb));
return;
}
/*
@@ -219,7 +219,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
if (!scsw_is_solicited(&irb->scsw))
return;
- cdev_irb = &cdev->private->irb;
+ cdev_irb = &cdev->private->dma_area->irb;
/*
* If the clear function had been performed, all formerly pending
@@ -227,7 +227,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
* intermediate accumulated status to the device driver.
*/
if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
/* Copy bits which are valid only for the start function. */
if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
@@ -329,9 +329,9 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
/*
* We have ending status but no sense information. Do a basic sense.
*/
- sense_ccw = &to_io_private(sch)->sense_ccw;
+ sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
- sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
+ sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw);
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
@@ -364,7 +364,7 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
- cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
}
/* Check if path verification is required. */
@@ -386,7 +386,7 @@ ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
/* Check for basic sense. */
if (cdev->private->flags.dosense &&
!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
- cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
return 0;
}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 90e4e3a7841b..c03b4a19974e 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -9,15 +9,20 @@
#include "css.h"
#include "orb.h"
+struct io_subchannel_dma_area {
+ struct ccw1 sense_ccw; /* static ccw for sense command */
+};
+
struct io_subchannel_private {
union orb orb; /* operation request block */
- struct ccw1 sense_ccw; /* static ccw for sense command */
struct ccw_device *cdev;/* pointer to the child ccw device */
struct {
unsigned int suspend:1; /* allow suspend */
unsigned int prefetch:1;/* deny prefetch */
unsigned int inter:1; /* suppress intermediate interrupts */
} __packed options;
+ struct io_subchannel_dma_area *dma_area;
+ dma_addr_t dma_area_dma;
} __aligned(8);
#define to_io_private(n) ((struct io_subchannel_private *) \
@@ -115,6 +120,13 @@ enum cdev_todo {
#define FAKE_CMD_IRB 1
#define FAKE_TM_IRB 2
+struct ccw_device_dma_area {
+ struct senseid senseid; /* SenseID info */
+ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
+ struct irb irb; /* device status */
+ struct pgid pgid[8]; /* path group IDs per chpid*/
+};
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
@@ -156,11 +168,7 @@ struct ccw_device_private {
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
- struct irb irb; /* device status */
int async_kill_io_rc;
- struct senseid senseid; /* SenseID info */
- struct pgid pgid[8]; /* path group IDs per chpid*/
- struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
struct work_struct todo_work;
enum cdev_todo todo;
wait_queue_head_t wait_q;
@@ -169,6 +177,8 @@ struct ccw_device_private {
struct list_head cmb_list; /* list of measured devices */
u64 cmb_start_time; /* clock value of cmb reset */
void *cmb_wait; /* deferred cmb enable/disable */
+ struct gen_pool *dma_pool;
+ struct ccw_device_dma_area *dma_area;
enum interruption_class int_class;
};
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 14d328338ce2..08eb10283b18 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -233,6 +233,7 @@ int hsch(struct subchannel_id schid)
return ccode;
}
+EXPORT_SYMBOL(hsch);
static inline int __xsch(struct subchannel_id schid)
{
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index a6f7c2986b94..a06944399865 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -228,9 +228,6 @@ struct qdio_q {
*/
int first_to_check;
- /* first_to_check of the last time */
- int last_move;
-
/* beginning position for calling the program */
int first_to_kick;
@@ -341,8 +338,7 @@ static inline int multicast_outbound(struct qdio_q *q)
(q->nr == q->irq_ptr->nr_output_qs - 1);
}
-#define pci_out_supported(q) \
- (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
+#define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index d2f98e5829d4..35410e6eda2e 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -121,15 +121,14 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n",
q->timestamp, last_ai_time);
- seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n",
- atomic_read(&q->nr_buf_used),
- q->first_to_check, q->last_move);
+ seq_printf(m, "nr_used: %d ftc: %d\n",
+ atomic_read(&q->nr_buf_used), q->first_to_check);
if (q->is_input_q) {
seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
q->u.in.polling, q->u.in.ack_start,
q->u.in.ack_count);
- seq_printf(m, "DSCI: %d IRQs disabled: %u\n",
- *(u32 *)q->irq_ptr->dsci,
+ seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
+ *(u8 *)q->irq_ptr->dsci,
test_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state));
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9537e656e927..730c4e68094b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -205,17 +205,22 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
int auto_ack, int merge_pending)
{
unsigned char __state = 0;
- int i;
+ int i = 1;
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
/* get initial state: */
__state = q->slsb.val[bufnr];
+
+ /* Bail out early if there is no work on the queue: */
+ if (__state & SLSB_OWNER_CU)
+ goto out;
+
if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
__state = SLSB_P_OUTPUT_EMPTY;
- for (i = 1; i < count; i++) {
+ for (; i < count; i++) {
bufnr = next_buf(bufnr);
/* merge PENDING into EMPTY: */
@@ -228,6 +233,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
if (q->slsb.val[bufnr] != __state)
break;
}
+
+out:
*state = __state;
return i;
}
@@ -371,7 +378,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
static inline void qdio_sync_queues(struct qdio_q *q)
{
/* PCI capable outbound queues will also be scanned so sync them too */
- if (pci_out_supported(q))
+ if (pci_out_supported(q->irq_ptr))
qdio_siga_sync_all(q);
else
qdio_siga_sync_q(q);
@@ -382,7 +389,7 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
{
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- return get_buf_states(q, bufnr, state, 1, 0, 0);
+ return get_buf_state(q, bufnr, state, 0);
}
static inline void qdio_stop_polling(struct qdio_q *q)
@@ -415,7 +422,8 @@ static inline void account_sbals(struct qdio_q *q, unsigned int count)
q->q_stats.nr_sbals[pos]++;
}
-static void process_buffer_error(struct qdio_q *q, int count)
+static void process_buffer_error(struct qdio_q *q, unsigned int start,
+ int count)
{
unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
SLSB_P_OUTPUT_NOT_INIT;
@@ -424,29 +432,29 @@ static void process_buffer_error(struct qdio_q *q, int count)
/* special handling for no target buffer empty */
if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
- q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
+ q->sbal[start]->element[15].sflags == 0x10) {
qperf_inc(q, target_full);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
- q->first_to_check);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
goto set;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
- DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
+ DBF_ERROR("FTC:%3d C:%3d", start, count);
DBF_ERROR("F14:%2x F15:%2x",
- q->sbal[q->first_to_check]->element[14].sflags,
- q->sbal[q->first_to_check]->element[15].sflags);
+ q->sbal[start]->element[14].sflags,
+ q->sbal[start]->element[15].sflags);
set:
/*
* Interrupts may be avoided as long as the error is present
* so change the buffer state immediately to avoid starvation.
*/
- set_buf_states(q, q->first_to_check, state, count);
+ set_buf_states(q, start, state, count);
}
-static inline void inbound_primed(struct qdio_q *q, int count)
+static inline void inbound_primed(struct qdio_q *q, unsigned int start,
+ int count)
{
int new;
@@ -457,7 +465,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
if (!q->u.in.polling) {
q->u.in.polling = 1;
q->u.in.ack_count = count;
- q->u.in.ack_start = q->first_to_check;
+ q->u.in.ack_start = start;
return;
}
@@ -465,7 +473,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
q->u.in.ack_count);
q->u.in.ack_count = count;
- q->u.in.ack_start = q->first_to_check;
+ q->u.in.ack_start = start;
return;
}
@@ -473,7 +481,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
* ACK the newest buffer. The ACK will be removed in qdio_stop_polling
* or by the next inbound run.
*/
- new = add_buf(q->first_to_check, count - 1);
+ new = add_buf(start, count - 1);
if (q->u.in.polling) {
/* reset the previous ACK but first set the new one */
set_buf_state(q, new, SLSB_P_INPUT_ACK);
@@ -488,10 +496,10 @@ static inline void inbound_primed(struct qdio_q *q, int count)
if (!count)
return;
/* need to change ALL buffers to get more interrupts */
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
+ set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
}
-static int get_inbound_buffer_frontier(struct qdio_q *q)
+static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
int count;
@@ -504,64 +512,58 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
if (!count)
- goto out;
+ return 0;
/*
* No siga sync here, as a PCI or we after a thin interrupt
* already sync'ed the queues.
*/
- count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
+ count = get_buf_states(q, start, &state, count, 1, 0);
if (!count)
- goto out;
+ return 0;
switch (state) {
case SLSB_P_INPUT_PRIMED:
- inbound_primed(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ inbound_primed(q, start, count);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
- break;
+ return count;
case SLSB_P_INPUT_ERROR:
- process_buffer_error(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ process_buffer_error(q, start, count);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
- break;
+ return count;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
- q->nr, q->first_to_check);
- break;
+ q->nr, start);
+ return 0;
default:
WARN_ON_ONCE(1);
+ return 0;
}
-out:
- return q->first_to_check;
}
-static int qdio_inbound_q_moved(struct qdio_q *q)
+static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
{
- int bufnr;
+ int count;
- bufnr = get_inbound_buffer_frontier(q);
+ count = get_inbound_buffer_frontier(q, start);
- if (bufnr != q->last_move) {
- q->last_move = bufnr;
- if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_tod_clock();
- return 1;
- } else
- return 0;
+ if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
+ q->u.in.timestamp = get_tod_clock();
+
+ return count;
}
-static inline int qdio_inbound_q_done(struct qdio_q *q)
+static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
@@ -570,7 +572,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state, 0);
+ get_buf_state(q, start, &state, 0);
if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
/* more work coming */
@@ -588,8 +590,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* has (probably) not moved (see qdio_inbound_processing).
*/
if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
- q->first_to_check);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
return 1;
} else
return 0;
@@ -637,17 +638,13 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
return phys_aob;
}
-static void qdio_kick_handler(struct qdio_q *q)
+static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
{
int start = q->first_to_kick;
- int end = q->first_to_check;
- int count;
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
- count = sub_buf(end, start);
-
if (q->is_input_q) {
qperf_inc(q, inbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
@@ -663,7 +660,7 @@ static void qdio_kick_handler(struct qdio_q *q)
q->irq_ptr->int_parm);
/* for the next time */
- q->first_to_kick = end;
+ q->first_to_kick = add_buf(start, count);
q->qdio_error = 0;
}
@@ -678,14 +675,20 @@ static inline int qdio_tasklet_schedule(struct qdio_q *q)
static void __qdio_inbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_inbound);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return;
- qdio_kick_handler(q);
+ start = add_buf(start, count);
+ q->first_to_check = start;
+ qdio_kick_handler(q, count);
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
/* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched);
if (!qdio_tasklet_schedule(q))
@@ -697,7 +700,7 @@ static void __qdio_inbound_processing(struct qdio_q *q)
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched2);
qdio_tasklet_schedule(q);
}
@@ -709,7 +712,7 @@ void qdio_inbound_processing(unsigned long data)
__qdio_inbound_processing(q);
}
-static int get_outbound_buffer_frontier(struct qdio_q *q)
+static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
int count;
@@ -718,59 +721,50 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
- !pci_out_supported(q)) ||
+ !pci_out_supported(q->irq_ptr)) ||
(queue_type(q) == QDIO_IQDIO_QFMT &&
multicast_outbound(q)))
qdio_siga_sync_q(q);
- /*
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
- * would return 0.
- */
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ count = atomic_read(&q->nr_buf_used);
if (!count)
- goto out;
+ return 0;
- count = get_buf_states(q, q->first_to_check, &state, count, 0,
- q->u.out.use_cq);
+ count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
if (!count)
- goto out;
+ return 0;
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
+ case SLSB_P_OUTPUT_PENDING:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
"out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
- q->first_to_check = add_buf(q->first_to_check, count);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
-
- break;
+ return count;
case SLSB_P_OUTPUT_ERROR:
- process_buffer_error(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ process_buffer_error(q, start, count);
atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
- break;
+ return count;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
q->nr);
- break;
+ return 0;
case SLSB_P_OUTPUT_NOT_INIT:
case SLSB_P_OUTPUT_HALTED:
- break;
+ return 0;
default:
WARN_ON_ONCE(1);
+ return 0;
}
-
-out:
- return q->first_to_check;
}
/* all buffers processed? */
@@ -779,18 +773,16 @@ static inline int qdio_outbound_q_done(struct qdio_q *q)
return atomic_read(&q->nr_buf_used) == 0;
}
-static inline int qdio_outbound_q_moved(struct qdio_q *q)
+static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
{
- int bufnr;
+ int count;
- bufnr = get_outbound_buffer_frontier(q);
+ count = get_outbound_buffer_frontier(q, start);
- if (bufnr != q->last_move) {
- q->last_move = bufnr;
+ if (count)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
- return 1;
- } else
- return 0;
+
+ return count;
}
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
@@ -837,15 +829,21 @@ retry:
static void __qdio_outbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_outbound);
WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
- if (qdio_outbound_q_moved(q))
- qdio_kick_handler(q);
+ count = qdio_outbound_q_moved(q, start);
+ if (count) {
+ q->first_to_check = add_buf(start, count);
+ qdio_kick_handler(q, count);
+ }
- if (queue_type(q) == QDIO_ZFCP_QFMT)
- if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
- goto sched;
+ if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
+ !qdio_outbound_q_done(q))
+ goto sched;
if (q->u.out.pci_out_enabled)
return;
@@ -881,37 +879,40 @@ void qdio_outbound_timer(struct timer_list *t)
qdio_tasklet_schedule(q);
}
-static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
+static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
{
struct qdio_q *out;
int i;
- if (!pci_out_supported(q))
+ if (!pci_out_supported(irq))
return;
- for_each_output_queue(q->irq_ptr, out, i)
+ for_each_output_queue(irq, out, i)
if (!qdio_outbound_q_done(out))
qdio_tasklet_schedule(out);
}
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_inbound);
if (need_siga_sync(q) && need_siga_sync_after_ai(q))
qdio_sync_queues(q);
- /*
- * The interrupt could be caused by a PCI request. Check the
- * PCI capable outbound queues.
- */
- qdio_check_outbound_after_thinint(q);
+ /* The interrupt could be caused by a PCI request: */
+ qdio_check_outbound_pci_queues(q->irq_ptr);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return;
- qdio_kick_handler(q);
+ start = add_buf(start, count);
+ q->first_to_check = start;
+ qdio_kick_handler(q, count);
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched);
if (!qdio_tasklet_schedule(q))
return;
@@ -922,7 +923,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched2);
qdio_tasklet_schedule(q);
}
@@ -976,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
}
- if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
+ if (!pci_out_supported(irq_ptr))
return;
for_each_output_queue(irq_ptr, q, i) {
@@ -1642,7 +1643,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
*/
if (test_nonshared_ind(irq_ptr))
goto rescan;
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q, q->first_to_check))
goto rescan;
return 0;
@@ -1672,12 +1673,14 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
int *error)
{
struct qdio_q *q;
- int start, end;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ unsigned int start;
+ int count;
if (!irq_ptr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
+ start = q->first_to_check;
/*
* Cannot rely on automatic sync after interrupt since queues may
@@ -1686,25 +1689,27 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
if (need_siga_sync(q))
qdio_sync_queues(q);
- /* check the PCI capable outbound queues. */
- qdio_check_outbound_after_thinint(q);
+ qdio_check_outbound_pci_queues(irq_ptr);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return 0;
+ start = add_buf(start, count);
+ q->first_to_check = start;
+
/* Note: upper-layer MUST stop processing immediately here ... */
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return -EIO;
- start = q->first_to_kick;
- end = q->first_to_check;
- *bufnr = start;
+ *bufnr = q->first_to_kick;
*error = q->qdio_error;
/* for the next time */
- q->first_to_kick = end;
+ q->first_to_kick = add_buf(q->first_to_kick, count);
q->qdio_error = 0;
- return sub_buf(end, start);
+
+ return count;
}
EXPORT_SYMBOL(qdio_get_next_buffers);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index a59887fad13e..d4101cecdc8d 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -150,6 +150,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
return -ENOMEM;
}
irq_ptr_qs[i] = q;
+ INIT_LIST_HEAD(&q->entry);
}
return 0;
}
@@ -178,6 +179,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
+ INIT_LIST_HEAD(&q->entry);
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
@@ -523,7 +525,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
irq_ptr->schid.sch_no,
is_thinint_irq(irq_ptr),
(irq_ptr->sch_token) ? 1 : 0,
- (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0,
+ pci_out_supported(irq_ptr) ? 1 : 0,
css_general_characteristics.aif_tdd,
(irq_ptr->siga_flag.input) ? "R" : " ",
(irq_ptr->siga_flag.output) ? "W" : " ",
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 07dea602205b..93ee067c10ca 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -40,7 +40,7 @@ static LIST_HEAD(tiq_list);
static DEFINE_MUTEX(tiq_list_lock);
/* Adapter interrupt definitions */
-static void tiqdio_thinint_handler(struct airq_struct *airq);
+static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating);
static struct airq_struct tiqdio_airq = {
.handler = tiqdio_thinint_handler,
@@ -79,7 +79,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
mutex_lock(&tiq_list_lock);
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
- xchg(irq_ptr->dsci, 1 << 7);
}
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
@@ -87,14 +86,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
struct qdio_q *q;
q = irq_ptr->input_qs[0];
- /* if establish triggered an error */
- if (!q || !q->entry.prev || !q->entry.next)
+ if (!q)
return;
mutex_lock(&tiq_list_lock);
list_del_rcu(&q->entry);
mutex_unlock(&tiq_list_lock);
synchronize_rcu();
+ INIT_LIST_HEAD(&q->entry);
}
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
@@ -178,8 +177,9 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @airq: pointer to adapter interrupt descriptor
+ * @floating: flag to recognize floating vs. directed interrupts (unused)
*/
-static void tiqdio_thinint_handler(struct airq_struct *airq)
+static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
{
u32 si_used = clear_shared_ind();
struct qdio_q *q;
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index 6bca1d5455d4..9f26d4310bb3 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -174,10 +174,10 @@ out:
kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE);
}
-static int check_address(struct device *dev, void *data)
+static int check_address(struct device *dev, const void *data)
{
struct scm_device *scmdev = to_scm_dev(dev);
- struct sale *sale = data;
+ const struct sale *sale = data;
return scmdev->address == sale->sa;
}
diff --git a/drivers/s390/cio/trace.c b/drivers/s390/cio/trace.c
index e331cd97e83b..882ee538ca30 100644
--- a/drivers/s390/cio/trace.c
+++ b/drivers/s390/cio/trace.c
@@ -21,5 +21,4 @@ EXPORT_TRACEPOINT_SYMBOL(s390_cio_csch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_hsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_xsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_rsch);
-EXPORT_TRACEPOINT_SYMBOL(s390_cio_rchp);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_chsc);
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
index 0ebb29b6fd6d..4803139bce14 100644
--- a/drivers/s390/cio/trace.h
+++ b/drivers/s390/cio/trace.h
@@ -274,29 +274,6 @@ DEFINE_EVENT(s390_class_schid, s390_cio_rsch,
TP_ARGS(schid, cc)
);
-/**
- * s390_cio_rchp - Reset Channel Path (RCHP) instruction was performed
- * @chpid: Channel-Path Identifier
- * @cc: Condition code
- */
-TRACE_EVENT(s390_cio_rchp,
- TP_PROTO(struct chp_id chpid, int cc),
- TP_ARGS(chpid, cc),
- TP_STRUCT__entry(
- __field(u8, cssid)
- __field(u8, id)
- __field(int, cc)
- ),
- TP_fast_assign(
- __entry->cssid = chpid.cssid;
- __entry->id = chpid.id;
- __entry->cc = cc;
- ),
- TP_printk("chpid=%x.%02x cc=%d", __entry->cssid, __entry->id,
- __entry->cc
- )
-);
-
#define CHSC_MAX_REQUEST_LEN 64
#define CHSC_MAX_RESPONSE_LEN 64
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
new file mode 100644
index 000000000000..8c1d2357ef5b
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Async I/O region for vfio_ccw
+ *
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): Cornelia Huck <cohuck@redhat.com>
+ */
+
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "vfio_ccw_private.h"
+
+static ssize_t vfio_ccw_async_region_read(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_cmd_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ mutex_lock(&private->io_mutex);
+ region = private->region[i].data;
+ if (copy_to_user(buf, (void *)region + pos, count))
+ ret = -EFAULT;
+ else
+ ret = count;
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static ssize_t vfio_ccw_async_region_write(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_cmd_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ if (!mutex_trylock(&private->io_mutex))
+ return -EAGAIN;
+
+ region = private->region[i].data;
+ if (copy_from_user((void *)region + pos, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_ASYNC_REQ);
+
+ ret = region->ret_code ? region->ret_code : count;
+
+out_unlock:
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region)
+{
+
+}
+
+const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
+ .read = vfio_ccw_async_region_read,
+ .write = vfio_ccw_async_region_write,
+ .release = vfio_ccw_async_region_release,
+};
+
+int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private)
+{
+ return vfio_ccw_register_dev_region(private,
+ VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD,
+ &vfio_ccw_async_region_ops,
+ sizeof(struct ccw_cmd_region),
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE,
+ private->cmd_region);
+}
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 384b3987eeb4..1d4c893ead23 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -16,12 +16,6 @@
#include "vfio_ccw_cp.h"
-/*
- * Max length for ccw chain.
- * XXX: Limit to 256, need to check more?
- */
-#define CCWCHAIN_LEN_MAX 256
-
struct pfn_array {
/* Starting guest physical I/O address. */
unsigned long pa_iova;
@@ -33,11 +27,6 @@ struct pfn_array {
int pa_nr;
};
-struct pfn_array_table {
- struct pfn_array *pat_pa;
- int pat_nr;
-};
-
struct ccwchain {
struct list_head next;
struct ccw1 *ch_ccw;
@@ -46,35 +35,29 @@ struct ccwchain {
/* Count of the valid ccws in chain. */
int ch_len;
/* Pinned PAGEs for the original data. */
- struct pfn_array_table *ch_pat;
+ struct pfn_array *ch_pa;
};
/*
- * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
+ * pfn_array_alloc() - alloc memory for PFNs
* @pa: pfn_array on which to perform the operation
- * @mdev: the mediated device to perform pin/unpin operations
* @iova: target guest physical address
* @len: number of bytes that should be pinned from @iova
*
- * Attempt to allocate memory for PFNs, and pin user pages in memory.
+ * Attempt to allocate memory for PFNs.
*
* Usage of pfn_array:
* We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
* this structure will be filled in by this function.
*
* Returns:
- * Number of pages pinned on success.
- * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
- * returns -EINVAL.
- * If no pages were pinned, returns -errno.
+ * 0 if PFNs are allocated
+ * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
+ * -ENOMEM if alloc failed
*/
-static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
- u64 iova, unsigned int len)
+static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
{
- int i, ret = 0;
-
- if (!len)
- return 0;
+ int i;
if (pa->pa_nr || pa->pa_iova_pfn)
return -EINVAL;
@@ -94,8 +77,27 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
- for (i = 1; i < pa->pa_nr; i++)
+ pa->pa_pfn[0] = -1ULL;
+ for (i = 1; i < pa->pa_nr; i++) {
pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
+ pa->pa_pfn[i] = -1ULL;
+ }
+
+ return 0;
+}
+
+/*
+ * pfn_array_pin() - Pin user pages in memory
+ * @pa: pfn_array on which to perform the operation
+ * @mdev: the mediated device to perform pin operations
+ *
+ * Returns number of pages pinned upon success.
+ * If the pin request partially succeeds, or fails completely,
+ * all pages are left unpinned and a negative error value is returned.
+ */
+static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
+{
+ int ret = 0;
ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
@@ -112,8 +114,6 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
err_out:
pa->pa_nr = 0;
- kfree(pa->pa_iova_pfn);
- pa->pa_iova_pfn = NULL;
return ret;
}
@@ -121,60 +121,30 @@ err_out:
/* Unpin the pages before releasing the memory. */
static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
{
- vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+ /* Only unpin if any pages were pinned to begin with */
+ if (pa->pa_nr)
+ vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
pa->pa_nr = 0;
kfree(pa->pa_iova_pfn);
}
-static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
-{
- pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
- if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) {
- pat->pat_nr = 0;
- return -ENOMEM;
- }
-
- pat->pat_nr = nr;
-
- return 0;
-}
-
-static void pfn_array_table_unpin_free(struct pfn_array_table *pat,
- struct device *mdev)
-{
- int i;
-
- for (i = 0; i < pat->pat_nr; i++)
- pfn_array_unpin_free(pat->pat_pa + i, mdev);
-
- if (pat->pat_nr) {
- kfree(pat->pat_pa);
- pat->pat_pa = NULL;
- pat->pat_nr = 0;
- }
-}
-
-static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
- unsigned long iova)
+static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
{
- struct pfn_array *pa = pat->pat_pa;
unsigned long iova_pfn = iova >> PAGE_SHIFT;
- int i, j;
+ int i;
- for (i = 0; i < pat->pat_nr; i++, pa++)
- for (j = 0; j < pa->pa_nr; j++)
- if (pa->pa_iova_pfn[j] == iova_pfn)
- return true;
+ for (i = 0; i < pa->pa_nr; i++)
+ if (pa->pa_iova_pfn[i] == iova_pfn)
+ return true;
return false;
}
-/* Create the list idal words for a pfn_array_table. */
-static inline void pfn_array_table_idal_create_words(
- struct pfn_array_table *pat,
+/* Create the list of IDAL words for a pfn_array. */
+static inline void pfn_array_idal_create_words(
+ struct pfn_array *pa,
unsigned long *idaws)
{
- struct pfn_array *pa;
- int i, j, k;
+ int i;
/*
* Idal words (execept the first one) rely on the memory being 4k
@@ -183,19 +153,36 @@ static inline void pfn_array_table_idal_create_words(
* there will be no problem here to simply use the phys to create an
* idaw.
*/
- k = 0;
- for (i = 0; i < pat->pat_nr; i++) {
- pa = pat->pat_pa + i;
- for (j = 0; j < pa->pa_nr; j++) {
- idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT;
- if (k == 0)
- idaws[k] += pa->pa_iova & (PAGE_SIZE - 1);
- k++;
+
+ for (i = 0; i < pa->pa_nr; i++)
+ idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
+
+ /* Adjust the first IDAW, since it may not start on a page boundary */
+ idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
+}
+
+static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
+{
+ struct ccw0 ccw0;
+ struct ccw1 *pccw1 = source;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ ccw0 = *(struct ccw0 *)pccw1;
+ if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
+ pccw1->cmd_code = CCW_CMD_TIC;
+ pccw1->flags = 0;
+ pccw1->count = 0;
+ } else {
+ pccw1->cmd_code = ccw0.cmd_code;
+ pccw1->flags = ccw0.flags;
+ pccw1->count = ccw0.count;
}
+ pccw1->cda = ccw0.cda;
+ pccw1++;
}
}
-
/*
* Within the domain (@mdev), copy @n bytes from a guest physical
* address (@iova) to a host physical address (@to).
@@ -209,9 +196,15 @@ static long copy_from_iova(struct device *mdev,
int i, ret;
unsigned long l, m;
- ret = pfn_array_alloc_pin(&pa, mdev, iova, n);
- if (ret <= 0)
+ ret = pfn_array_alloc(&pa, iova, n);
+ if (ret < 0)
+ return ret;
+
+ ret = pfn_array_pin(&pa, mdev);
+ if (ret < 0) {
+ pfn_array_unpin_free(&pa, mdev);
return ret;
+ }
l = n;
for (i = 0; i < pa.pa_nr; i++) {
@@ -235,55 +228,60 @@ static long copy_from_iova(struct device *mdev,
return l;
}
-static long copy_ccw_from_iova(struct channel_program *cp,
- struct ccw1 *to, u64 iova,
- unsigned long len)
-{
- struct ccw0 ccw0;
- struct ccw1 *pccw1;
- int ret;
- int i;
-
- ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1));
- if (ret)
- return ret;
-
- if (!cp->orb.cmd.fmt) {
- pccw1 = to;
- for (i = 0; i < len; i++) {
- ccw0 = *(struct ccw0 *)pccw1;
- if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
- pccw1->cmd_code = CCW_CMD_TIC;
- pccw1->flags = 0;
- pccw1->count = 0;
- } else {
- pccw1->cmd_code = ccw0.cmd_code;
- pccw1->flags = ccw0.flags;
- pccw1->count = ccw0.count;
- }
- pccw1->cda = ccw0.cda;
- pccw1++;
- }
- }
-
- return ret;
-}
-
/*
* Helpers to operate ccwchain.
*/
-#define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0)
+#define ccw_is_read(_ccw) (((_ccw)->cmd_code & 0x03) == 0x02)
+#define ccw_is_read_backward(_ccw) (((_ccw)->cmd_code & 0x0F) == 0x0C)
+#define ccw_is_sense(_ccw) (((_ccw)->cmd_code & 0x0F) == CCW_CMD_BASIC_SENSE)
#define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
#define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
#define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
-
+#define ccw_is_skip(_ccw) ((_ccw)->flags & CCW_FLAG_SKIP)
#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
/*
+ * ccw_does_data_transfer()
+ *
+ * Determine whether a CCW will move any data, such that the guest pages
+ * would need to be pinned before performing the I/O.
+ *
+ * Returns 1 if yes, 0 if no.
+ */
+static inline int ccw_does_data_transfer(struct ccw1 *ccw)
+{
+ /* If the count field is zero, then no data will be transferred */
+ if (ccw->count == 0)
+ return 0;
+
+ /* If the command is a NOP, then no data will be transferred */
+ if (ccw_is_noop(ccw))
+ return 0;
+
+ /* If the skip flag is off, then data will be transferred */
+ if (!ccw_is_skip(ccw))
+ return 1;
+
+ /*
+ * If the skip flag is on, it is only meaningful if the command
+ * code is a read, read backward, sense, or sense ID. In those
+ * cases, no data will be transferred.
+ */
+ if (ccw_is_read(ccw) || ccw_is_read_backward(ccw))
+ return 0;
+
+ if (ccw_is_sense(ccw))
+ return 0;
+
+ /* The skip flag is on, but it is ignored for this command code. */
+ return 1;
+}
+
+/*
* is_cpa_within_range()
*
* @cpa: channel program address being questioned
@@ -319,7 +317,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
/* Make ccw address aligned to 8. */
size = ((sizeof(*chain) + 7L) & -8L) +
sizeof(*chain->ch_ccw) * len +
- sizeof(*chain->ch_pat) * len;
+ sizeof(*chain->ch_pa) * len;
chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
if (!chain)
return NULL;
@@ -328,7 +326,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
chain->ch_ccw = (struct ccw1 *)data;
data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
- chain->ch_pat = (struct pfn_array_table *)data;
+ chain->ch_pa = (struct pfn_array *)data;
chain->ch_len = len;
@@ -348,30 +346,12 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
{
struct ccw1 *ccw = chain->ch_ccw + idx;
- if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw))
- return;
- if (!ccw->count)
+ if (ccw_is_tic(ccw))
return;
kfree((void *)(u64)ccw->cda);
}
-/* Unpin the pages then free the memory resources. */
-static void cp_unpin_free(struct channel_program *cp)
-{
- struct ccwchain *chain, *temp;
- int i;
-
- list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
- for (i = 0; i < chain->ch_len; i++) {
- pfn_array_table_unpin_free(chain->ch_pat + i,
- cp->mdev);
- ccwchain_cda_free(chain, i);
- }
- ccwchain_free(chain);
- }
-}
-
/**
* ccwchain_calc_length - calculate the length of the ccw chain.
* @iova: guest physical address of the target ccw chain
@@ -387,25 +367,9 @@ static void cp_unpin_free(struct channel_program *cp)
*/
static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
{
- struct ccw1 *ccw, *p;
- int cnt;
-
- /*
- * Copy current chain from guest to host kernel.
- * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256).
- * So copying 2K is enough (safe).
- */
- p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL);
- if (!ccw)
- return -ENOMEM;
-
- cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX);
- if (cnt) {
- kfree(ccw);
- return cnt;
- }
+ struct ccw1 *ccw = cp->guest_cp;
+ int cnt = 0;
- cnt = 0;
do {
cnt++;
@@ -414,10 +378,8 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
* orb specified one of the unsupported formats, we defer
* checking for IDAWs in unsupported formats to here.
*/
- if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
- kfree(p);
+ if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
return -EOPNOTSUPP;
- }
/*
* We want to keep counting if the current CCW has the
@@ -436,7 +398,6 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
if (cnt == CCWCHAIN_LEN_MAX + 1)
cnt = -EINVAL;
- kfree(p);
return cnt;
}
@@ -457,17 +418,23 @@ static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
static int ccwchain_loop_tic(struct ccwchain *chain,
struct channel_program *cp);
-static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
+static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
{
struct ccwchain *chain;
- int len, ret;
+ int len;
- /* May transfer to an existing chain. */
- if (tic_target_chain_exists(tic, cp))
- return 0;
+ /* Copy 2K (the most we support today) of possible CCWs */
+ len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
+ CCWCHAIN_LEN_MAX * sizeof(struct ccw1));
+ if (len)
+ return len;
+
+ /* Convert any Format-0 CCWs to Format-1 */
+ if (!cp->orb.cmd.fmt)
+ convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX);
- /* Get chain length. */
- len = ccwchain_calc_length(tic->cda, cp);
+ /* Count the CCWs in the current chain */
+ len = ccwchain_calc_length(cda, cp);
if (len < 0)
return len;
@@ -475,14 +442,10 @@ static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
chain = ccwchain_alloc(cp, len);
if (!chain)
return -ENOMEM;
- chain->ch_iova = tic->cda;
+ chain->ch_iova = cda;
- /* Copy the new chain from user. */
- ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len);
- if (ret) {
- ccwchain_free(chain);
- return ret;
- }
+ /* Copy the actual CCWs into the new chain */
+ memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
/* Loop for tics on this new chain. */
return ccwchain_loop_tic(chain, cp);
@@ -500,7 +463,12 @@ static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
if (!ccw_is_tic(tic))
continue;
- ret = ccwchain_handle_tic(tic, cp);
+ /* May transfer to an existing chain. */
+ if (tic_target_chain_exists(tic, cp))
+ continue;
+
+ /* Build a ccwchain for the next segment */
+ ret = ccwchain_handle_ccw(tic->cda, cp);
if (ret)
return ret;
}
@@ -533,115 +501,90 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
struct channel_program *cp)
{
struct ccw1 *ccw;
- struct pfn_array_table *pat;
+ struct pfn_array *pa;
+ u64 iova;
unsigned long *idaws;
int ret;
+ int bytes = 1;
+ int idaw_nr, idal_len;
+ int i;
ccw = chain->ch_ccw + idx;
- if (!ccw->count) {
- /*
- * We just want the translation result of any direct ccw
- * to be an IDA ccw, so let's add the IDA flag for it.
- * Although the flag will be ignored by firmware.
- */
- ccw->flags |= CCW_FLAG_IDA;
- return 0;
- }
+ if (ccw->count)
+ bytes = ccw->count;
- /*
- * Pin data page(s) in memory.
- * The number of pages actually is the count of the idaws which will be
- * needed when translating a direct ccw to a idal ccw.
- */
- pat = chain->ch_pat + idx;
- ret = pfn_array_table_init(pat, 1);
- if (ret)
- goto out_init;
-
- ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
- if (ret < 0)
- goto out_unpin;
-
- /* Translate this direct ccw to a idal ccw. */
- idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
- if (!idaws) {
- ret = -ENOMEM;
- goto out_unpin;
+ /* Calculate size of IDAL */
+ if (ccw_is_idal(ccw)) {
+ /* Read first IDAW to see if it's 4K-aligned or not. */
+ /* All subsequent IDAws will be 4K-aligned. */
+ ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova));
+ if (ret)
+ return ret;
+ } else {
+ iova = ccw->cda;
}
- ccw->cda = (__u32) virt_to_phys(idaws);
- ccw->flags |= CCW_FLAG_IDA;
-
- pfn_array_table_idal_create_words(pat, idaws);
-
- return 0;
-
-out_unpin:
- pfn_array_table_unpin_free(pat, cp->mdev);
-out_init:
- ccw->cda = 0;
- return ret;
-}
-
-static int ccwchain_fetch_idal(struct ccwchain *chain,
- int idx,
- struct channel_program *cp)
-{
- struct ccw1 *ccw;
- struct pfn_array_table *pat;
- unsigned long *idaws;
- u64 idaw_iova;
- unsigned int idaw_nr, idaw_len;
- int i, ret;
-
- ccw = chain->ch_ccw + idx;
+ idaw_nr = idal_nr_words((void *)iova, bytes);
+ idal_len = idaw_nr * sizeof(*idaws);
- if (!ccw->count)
- return 0;
-
- /* Calculate size of idaws. */
- ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova));
- if (ret)
- return ret;
- idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count);
- idaw_len = idaw_nr * sizeof(*idaws);
-
- /* Pin data page(s) in memory. */
- pat = chain->ch_pat + idx;
- ret = pfn_array_table_init(pat, idaw_nr);
- if (ret)
- goto out_init;
-
- /* Translate idal ccw to use new allocated idaws. */
- idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
+ /* Allocate an IDAL from host storage */
+ idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
if (!idaws) {
ret = -ENOMEM;
- goto out_unpin;
+ goto out_init;
}
- ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len);
- if (ret)
+ /*
+ * Allocate an array of pfn's for pages to pin/translate.
+ * The number of pages is actually the count of the idaws
+ * required for the data transfer, since we only only support
+ * 4K IDAWs today.
+ */
+ pa = chain->ch_pa + idx;
+ ret = pfn_array_alloc(pa, iova, bytes);
+ if (ret < 0)
goto out_free_idaws;
- ccw->cda = virt_to_phys(idaws);
+ if (ccw_is_idal(ccw)) {
+ /* Copy guest IDAL into host IDAL */
+ ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idal_len);
+ if (ret)
+ goto out_unpin;
- for (i = 0; i < idaw_nr; i++) {
- idaw_iova = *(idaws + i);
+ /*
+ * Copy guest IDAWs into pfn_array, in case the memory they
+ * occupy is not contiguous.
+ */
+ for (i = 0; i < idaw_nr; i++)
+ pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
+ } else {
+ /*
+ * No action is required here; the iova addresses in pfn_array
+ * were initialized sequentially in pfn_array_alloc() beginning
+ * with the contents of ccw->cda.
+ */
+ }
- ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev,
- idaw_iova, 1);
+ if (ccw_does_data_transfer(ccw)) {
+ ret = pfn_array_pin(pa, cp->mdev);
if (ret < 0)
- goto out_free_idaws;
+ goto out_unpin;
+ } else {
+ pa->pa_nr = 0;
}
- pfn_array_table_idal_create_words(pat, idaws);
+ ccw->cda = (__u32) virt_to_phys(idaws);
+ ccw->flags |= CCW_FLAG_IDA;
+
+ /* Populate the IDAL with pinned/translated addresses from pfn */
+ pfn_array_idal_create_words(pa, idaws);
return 0;
+out_unpin:
+ pfn_array_unpin_free(pa, cp->mdev);
out_free_idaws:
kfree(idaws);
-out_unpin:
- pfn_array_table_unpin_free(pat, cp->mdev);
out_init:
ccw->cda = 0;
return ret;
@@ -659,15 +602,9 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
{
struct ccw1 *ccw = chain->ch_ccw + idx;
- if (ccw_is_test(ccw) || ccw_is_noop(ccw))
- return 0;
-
if (ccw_is_tic(ccw))
return ccwchain_fetch_tic(chain, idx, cp);
- if (ccw_is_idal(ccw))
- return ccwchain_fetch_idal(chain, idx, cp);
-
return ccwchain_fetch_direct(chain, idx, cp);
}
@@ -690,9 +627,7 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
*/
int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
{
- u64 iova = orb->cmd.cpa;
- struct ccwchain *chain;
- int len, ret;
+ int ret;
/*
* XXX:
@@ -705,33 +640,19 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
memcpy(&cp->orb, orb, sizeof(*orb));
cp->mdev = mdev;
- /* Get chain length. */
- len = ccwchain_calc_length(iova, cp);
- if (len < 0)
- return len;
-
- /* Alloc mem for the head chain. */
- chain = ccwchain_alloc(cp, len);
- if (!chain)
- return -ENOMEM;
- chain->ch_iova = iova;
-
- /* Copy the head chain from guest. */
- ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len);
- if (ret) {
- ccwchain_free(chain);
- return ret;
- }
-
- /* Now loop for its TICs. */
- ret = ccwchain_loop_tic(chain, cp);
+ /* Build a ccwchain for the first CCW segment */
+ ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
if (ret)
- cp_unpin_free(cp);
+ cp_free(cp);
+
/* It is safe to force: if not set but idals used
* ccwchain_calc_length returns an error.
*/
cp->orb.cmd.c64 = 1;
+ if (!ret)
+ cp->initialized = true;
+
return ret;
}
@@ -746,7 +667,20 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
*/
void cp_free(struct channel_program *cp)
{
- cp_unpin_free(cp);
+ struct ccwchain *chain, *temp;
+ int i;
+
+ if (!cp->initialized)
+ return;
+
+ cp->initialized = false;
+ list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
+ for (i = 0; i < chain->ch_len; i++) {
+ pfn_array_unpin_free(chain->ch_pa + i, cp->mdev);
+ ccwchain_cda_free(chain, i);
+ }
+ ccwchain_free(chain);
+ }
}
/**
@@ -791,6 +725,10 @@ int cp_prefetch(struct channel_program *cp)
struct ccwchain *chain;
int len, idx, ret;
+ /* this is an error in the caller */
+ if (!cp->initialized)
+ return -EINVAL;
+
list_for_each_entry(chain, &cp->ccwchain_list, next) {
len = chain->ch_len;
for (idx = 0; idx < len; idx++) {
@@ -826,6 +764,10 @@ union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
struct ccwchain *chain;
struct ccw1 *cpa;
+ /* this is an error in the caller */
+ if (!cp->initialized)
+ return NULL;
+
orb = &cp->orb;
orb->cmd.intparm = intparm;
@@ -862,6 +804,9 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
u32 cpa = scsw->cmd.cpa;
u32 ccw_head;
+ if (!cp->initialized)
+ return;
+
/*
* LATER:
* For now, only update the cmd.cpa part. We may need to deal with
@@ -870,7 +815,11 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
*/
list_for_each_entry(chain, &cp->ccwchain_list, next) {
ccw_head = (u32)(u64)chain->ch_ccw;
- if (is_cpa_within_range(cpa, ccw_head, chain->ch_len)) {
+ /*
+ * On successful execution, cpa points just beyond the end
+ * of the chain.
+ */
+ if (is_cpa_within_range(cpa, ccw_head, chain->ch_len + 1)) {
/*
* (cpa - ccw_head) is the offset value of the host
* physical ccw to its chain head.
@@ -898,10 +847,12 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
struct ccwchain *chain;
int i;
+ if (!cp->initialized)
+ return false;
+
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
- if (pfn_array_table_iova_pinned(chain->ch_pat + i,
- iova))
+ if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
return true;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index a4b74fb1aa57..7cdc38049033 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -16,11 +16,18 @@
#include "orb.h"
+/*
+ * Max length for ccw chain.
+ * XXX: Limit to 256, need to check more?
+ */
+#define CCWCHAIN_LEN_MAX 256
+
/**
* struct channel_program - manage information for channel program
* @ccwchain_list: list head of ccwchains
* @orb: orb for the currently processed ssch request
* @mdev: the mediated device to perform page pinning/unpinning
+ * @initialized: whether this instance is actually initialized
*
* @ccwchain_list is the head of a ccwchain list, that contents the
* translated result of the guest channel program that pointed out by
@@ -30,6 +37,8 @@ struct channel_program {
struct list_head ccwchain_list;
union orb orb;
struct device *mdev;
+ bool initialized;
+ struct ccw1 *guest_cp;
};
extern int cp_init(struct channel_program *cp, struct device *mdev,
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 0b3b9de45c60..2b90a5ecaeb9 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -3,9 +3,11 @@
* VFIO based Physical Subchannel device driver
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#include <linux/module.h>
@@ -23,6 +25,7 @@
struct workqueue_struct *vfio_ccw_work_q;
static struct kmem_cache *vfio_ccw_io_region;
+static struct kmem_cache *vfio_ccw_cmd_region;
/*
* Helpers
@@ -40,26 +43,30 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
if (ret != -EBUSY)
goto out_unlock;
+ iretry = 255;
do {
- iretry = 255;
ret = cio_cancel_halt_clear(sch, &iretry);
- while (ret == -EBUSY) {
- /*
- * Flush all I/O and wait for
- * cancel/halt/clear completion.
- */
- private->completion = &completion;
- spin_unlock_irq(sch->lock);
- wait_for_completion_timeout(&completion, 3*HZ);
+ if (ret == -EIO) {
+ pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ break;
+ }
+
+ /*
+ * Flush all I/O and wait for
+ * cancel/halt/clear completion.
+ */
+ private->completion = &completion;
+ spin_unlock_irq(sch->lock);
- spin_lock_irq(sch->lock);
- private->completion = NULL;
- flush_workqueue(vfio_ccw_work_q);
- ret = cio_cancel_halt_clear(sch, &iretry);
- };
+ if (ret == -EBUSY)
+ wait_for_completion_timeout(&completion, 3*HZ);
+ private->completion = NULL;
+ flush_workqueue(vfio_ccw_work_q);
+ spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
out_unlock:
@@ -84,13 +91,15 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
if (is_final)
cp_free(&private->cp);
}
+ mutex_lock(&private->io_mutex);
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
-
- if (private->io_trigger)
- eventfd_signal(private->io_trigger, 1);
+ mutex_unlock(&private->io_mutex);
if (private->mdev && is_final)
private->state = VFIO_CCW_STATE_IDLE;
+
+ if (private->io_trigger)
+ eventfd_signal(private->io_trigger, 1);
}
/*
@@ -108,7 +117,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
{
struct pmcw *pmcw = &sch->schib.pmcw;
struct vfio_ccw_private *private;
- int ret;
+ int ret = -ENOMEM;
if (pmcw->qf) {
dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
@@ -120,15 +129,24 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (!private)
return -ENOMEM;
+ private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
+ GFP_KERNEL);
+ if (!private->cp.guest_cp)
+ goto out_free;
+
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA);
- if (!private->io_region) {
- kfree(private);
- return -ENOMEM;
- }
+ if (!private->io_region)
+ goto out_free;
+
+ private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
+ GFP_KERNEL | GFP_DMA);
+ if (!private->cmd_region)
+ goto out_free;
private->sch = sch;
dev_set_drvdata(&sch->dev, private);
+ mutex_init(&private->io_mutex);
spin_lock_irq(sch->lock);
private->state = VFIO_CCW_STATE_NOT_OPER;
@@ -152,7 +170,11 @@ out_disable:
cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
- kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ if (private->cmd_region)
+ kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
+ if (private->io_region)
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ kfree(private->cp.guest_cp);
kfree(private);
return ret;
}
@@ -167,7 +189,9 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
dev_set_drvdata(&sch->dev, NULL);
+ kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ kfree(private->cp.guest_cp);
kfree(private);
return 0;
@@ -241,7 +265,7 @@ static struct css_driver vfio_ccw_sch_driver = {
static int __init vfio_ccw_sch_init(void)
{
- int ret;
+ int ret = -ENOMEM;
vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
if (!vfio_ccw_work_q)
@@ -251,20 +275,30 @@ static int __init vfio_ccw_sch_init(void)
sizeof(struct ccw_io_region), 0,
SLAB_ACCOUNT, 0,
sizeof(struct ccw_io_region), NULL);
- if (!vfio_ccw_io_region) {
- destroy_workqueue(vfio_ccw_work_q);
- return -ENOMEM;
- }
+ if (!vfio_ccw_io_region)
+ goto out_err;
+
+ vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
+ sizeof(struct ccw_cmd_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_cmd_region), NULL);
+ if (!vfio_ccw_cmd_region)
+ goto out_err;
isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) {
isc_unregister(VFIO_CCW_ISC);
- kmem_cache_destroy(vfio_ccw_io_region);
- destroy_workqueue(vfio_ccw_work_q);
+ goto out_err;
}
return ret;
+
+out_err:
+ kmem_cache_destroy(vfio_ccw_cmd_region);
+ kmem_cache_destroy(vfio_ccw_io_region);
+ destroy_workqueue(vfio_ccw_work_q);
+ return ret;
}
static void __exit vfio_ccw_sch_exit(void)
@@ -272,6 +306,7 @@ static void __exit vfio_ccw_sch_exit(void)
css_driver_unregister(&vfio_ccw_sch_driver);
isc_unregister(VFIO_CCW_ISC);
kmem_cache_destroy(vfio_ccw_io_region);
+ kmem_cache_destroy(vfio_ccw_cmd_region);
destroy_workqueue(vfio_ccw_work_q);
}
module_init(vfio_ccw_sch_init);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index cab17865aafe..49d9d3da0282 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -3,8 +3,10 @@
* Finite state machine for vfio-ccw device handling
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#include <linux/vfio.h>
@@ -28,9 +30,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
sch = private->sch;
spin_lock_irqsave(sch->lock, flags);
- private->state = VFIO_CCW_STATE_BUSY;
orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
+ if (!orb) {
+ ret = -EIO;
+ goto out;
+ }
/* Issue "Start Subchannel" */
ccode = ssch(sch->schid, orb);
@@ -42,6 +47,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
*/
sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
ret = 0;
+ private->state = VFIO_CCW_STATE_CP_PENDING;
break;
case 1: /* Status pending */
case 2: /* Busy */
@@ -64,6 +70,76 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
default:
ret = ccode;
}
+out:
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
+}
+
+static int fsm_do_halt(struct vfio_ccw_private *private)
+{
+ struct subchannel *sch;
+ unsigned long flags;
+ int ccode;
+ int ret;
+
+ sch = private->sch;
+
+ spin_lock_irqsave(sch->lock, flags);
+
+ /* Issue "Halt Subchannel" */
+ ccode = hsch(sch->schid);
+
+ switch (ccode) {
+ case 0:
+ /*
+ * Initialize device status information
+ */
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
+ ret = 0;
+ break;
+ case 1: /* Status pending */
+ case 2: /* Busy */
+ ret = -EBUSY;
+ break;
+ case 3: /* Device not operational */
+ ret = -ENODEV;
+ break;
+ default:
+ ret = ccode;
+ }
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
+}
+
+static int fsm_do_clear(struct vfio_ccw_private *private)
+{
+ struct subchannel *sch;
+ unsigned long flags;
+ int ccode;
+ int ret;
+
+ sch = private->sch;
+
+ spin_lock_irqsave(sch->lock, flags);
+
+ /* Issue "Clear Subchannel" */
+ ccode = csch(sch->schid);
+
+ switch (ccode) {
+ case 0:
+ /*
+ * Initialize device status information
+ */
+ sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
+ /* TODO: check what else we might need to clear */
+ ret = 0;
+ break;
+ case 3: /* Device not operational */
+ ret = -ENODEV;
+ break;
+ default:
+ ret = ccode;
+ }
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
@@ -102,6 +178,30 @@ static void fsm_io_busy(struct vfio_ccw_private *private,
private->io_region->ret_code = -EBUSY;
}
+static void fsm_io_retry(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ private->io_region->ret_code = -EAGAIN;
+}
+
+static void fsm_async_error(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct ccw_cmd_region *cmd_region = private->cmd_region;
+
+ pr_err("vfio-ccw: FSM: %s request from state:%d\n",
+ cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
+ cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
+ "<unknown>", private->state);
+ cmd_region->ret_code = -EIO;
+}
+
+static void fsm_async_retry(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ private->cmd_region->ret_code = -EAGAIN;
+}
+
static void fsm_disabled_irq(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
@@ -130,8 +230,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
struct mdev_device *mdev = private->mdev;
char *errstr = "request";
- private->state = VFIO_CCW_STATE_BUSY;
-
+ private->state = VFIO_CCW_STATE_CP_PROCESSING;
memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
@@ -166,22 +265,42 @@ static void fsm_io_request(struct vfio_ccw_private *private,
}
return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
- /* XXX: Handle halt. */
+ /* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
- /* XXX: Handle clear. */
+ /* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
}
err_out:
- private->state = VFIO_CCW_STATE_IDLE;
trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
io_region->ret_code, errstr);
}
/*
+ * Deal with an async request from userspace.
+ */
+static void fsm_async_request(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct ccw_cmd_region *cmd_region = private->cmd_region;
+
+ switch (cmd_region->command) {
+ case VFIO_CCW_ASYNC_CMD_HSCH:
+ cmd_region->ret_code = fsm_do_halt(private);
+ break;
+ case VFIO_CCW_ASYNC_CMD_CSCH:
+ cmd_region->ret_code = fsm_do_clear(private);
+ break;
+ default:
+ /* should not happen? */
+ cmd_region->ret_code = -EINVAL;
+ }
+}
+
+/*
* Got an interrupt for a normal io (state busy).
*/
static void fsm_irq(struct vfio_ccw_private *private,
@@ -204,21 +323,31 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_STATE_NOT_OPER] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
},
[VFIO_CCW_STATE_STANDBY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
},
[VFIO_CCW_STATE_IDLE] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_CP_PROCESSING] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
},
- [VFIO_CCW_STATE_BUSY] = {
+ [VFIO_CCW_STATE_CP_PENDING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
},
};
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index f673e106c041..5eb61116ca6f 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -3,13 +3,17 @@
* Physical device callbacks for vfio_ccw
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#include <linux/vfio.h>
#include <linux/mdev.h>
+#include <linux/nospec.h>
+#include <linux/slab.h>
#include "vfio_ccw_private.h"
@@ -130,11 +134,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
(private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_mdev_reset(mdev))
+ if (!vfio_ccw_sch_quiesce(private->sch))
private->state = VFIO_CCW_STATE_STANDBY;
/* The state will be NOT_OPER on error. */
}
+ cp_free(&private->cp);
private->mdev = NULL;
atomic_inc(&private->avail);
@@ -146,20 +151,66 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ int ret;
private->nb.notifier_call = vfio_ccw_mdev_notifier;
- return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &events, &private->nb);
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &events, &private->nb);
+ if (ret)
+ return ret;
+
+ ret = vfio_ccw_register_async_dev_regions(private);
+ if (ret)
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &private->nb);
+ return ret;
}
static void vfio_ccw_mdev_release(struct mdev_device *mdev)
{
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
+ int i;
+ if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
+ (private->state != VFIO_CCW_STATE_STANDBY)) {
+ if (!vfio_ccw_mdev_reset(mdev))
+ private->state = VFIO_CCW_STATE_STANDBY;
+ /* The state will be NOT_OPER on error. */
+ }
+
+ cp_free(&private->cp);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&private->nb);
+
+ for (i = 0; i < private->num_regions; i++)
+ private->region[i].ops->release(private, &private->region[i]);
+
+ private->num_regions = 0;
+ kfree(private->region);
+ private->region = NULL;
+}
+
+static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_io_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ mutex_lock(&private->io_mutex);
+ region = private->io_region;
+ if (copy_to_user(buf, (void *)region + pos, count))
+ ret = -EFAULT;
+ else
+ ret = count;
+ mutex_unlock(&private->io_mutex);
+ return ret;
}
static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
@@ -167,18 +218,54 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
size_t count,
loff_t *ppos)
{
+ unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
struct vfio_ccw_private *private;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
+
+ if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
+
+ switch (index) {
+ case VFIO_CCW_CONFIG_REGION_INDEX:
+ return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
+ default:
+ index -= VFIO_CCW_NUM_REGIONS;
+ return private->region[index].ops->read(private, buf, count,
+ ppos);
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_io_region *region;
+ int ret;
- if (*ppos + count > sizeof(*region))
+ if (pos + count > sizeof(*region))
return -EINVAL;
- private = dev_get_drvdata(mdev_parent_dev(mdev));
+ if (!mutex_trylock(&private->io_mutex))
+ return -EAGAIN;
+
region = private->io_region;
- if (copy_to_user(buf, (void *)region + *ppos, count))
- return -EFAULT;
+ if (copy_from_user((void *)region + pos, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
- return count;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
+ if (region->ret_code != 0)
+ private->state = VFIO_CCW_STATE_IDLE;
+ ret = (region->ret_code != 0) ? region->ret_code : count;
+
+out_unlock:
+ mutex_unlock(&private->io_mutex);
+ return ret;
}
static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
@@ -186,42 +273,47 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
size_t count,
loff_t *ppos)
{
+ unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
struct vfio_ccw_private *private;
- struct ccw_io_region *region;
-
- if (*ppos + count > sizeof(*region))
- return -EINVAL;
private = dev_get_drvdata(mdev_parent_dev(mdev));
- if (private->state != VFIO_CCW_STATE_IDLE)
- return -EACCES;
- region = private->io_region;
- if (copy_from_user((void *)region + *ppos, buf, count))
- return -EFAULT;
+ if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
- vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
- if (region->ret_code != 0) {
- private->state = VFIO_CCW_STATE_IDLE;
- return region->ret_code;
+ switch (index) {
+ case VFIO_CCW_CONFIG_REGION_INDEX:
+ return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
+ default:
+ index -= VFIO_CCW_NUM_REGIONS;
+ return private->region[index].ops->write(private, buf, count,
+ ppos);
}
- return count;
+ return -EINVAL;
}
-static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info)
+static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
+ struct mdev_device *mdev)
{
+ struct vfio_ccw_private *private;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
- info->num_regions = VFIO_CCW_NUM_REGIONS;
+ info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
info->num_irqs = VFIO_CCW_NUM_IRQS;
return 0;
}
static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
- u16 *cap_type_id,
- void **cap_type)
+ struct mdev_device *mdev,
+ unsigned long arg)
{
+ struct vfio_ccw_private *private;
+ int i;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
switch (info->index) {
case VFIO_CCW_CONFIG_REGION_INDEX:
info->offset = 0;
@@ -229,9 +321,55 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
info->flags = VFIO_REGION_INFO_FLAG_READ
| VFIO_REGION_INFO_FLAG_WRITE;
return 0;
- default:
- return -EINVAL;
+ default: /* all other regions are handled via capability chain */
+ {
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ struct vfio_region_info_cap_type cap_type = {
+ .header.id = VFIO_REGION_INFO_CAP_TYPE,
+ .header.version = 1 };
+ int ret;
+
+ if (info->index >=
+ VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
+
+ info->index = array_index_nospec(info->index,
+ VFIO_CCW_NUM_REGIONS +
+ private->num_regions);
+
+ i = info->index - VFIO_CCW_NUM_REGIONS;
+
+ info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
+ info->size = private->region[i].size;
+ info->flags = private->region[i].flags;
+
+ cap_type.type = private->region[i].type;
+ cap_type.subtype = private->region[i].subtype;
+
+ ret = vfio_info_add_capability(&caps, &cap_type.header,
+ sizeof(cap_type));
+ if (ret)
+ return ret;
+
+ info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
+ if (info->argsz < sizeof(*info) + caps.size) {
+ info->argsz = sizeof(*info) + caps.size;
+ info->cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(*info));
+ if (copy_to_user((void __user *)arg + sizeof(*info),
+ caps.buf, caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info->cap_offset = sizeof(*info);
+ }
+
+ kfree(caps.buf);
+
}
+ }
+ return 0;
}
static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
@@ -308,6 +446,32 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
}
}
+int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
+ unsigned int subtype,
+ const struct vfio_ccw_regops *ops,
+ size_t size, u32 flags, void *data)
+{
+ struct vfio_ccw_region *region;
+
+ region = krealloc(private->region,
+ (private->num_regions + 1) * sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ private->region = region;
+ private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
+ private->region[private->num_regions].subtype = subtype;
+ private->region[private->num_regions].ops = ops;
+ private->region[private->num_regions].size = size;
+ private->region[private->num_regions].flags = flags;
+ private->region[private->num_regions].data = data;
+
+ private->num_regions++;
+
+ return 0;
+}
+
static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
unsigned int cmd,
unsigned long arg)
@@ -328,7 +492,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (info.argsz < minsz)
return -EINVAL;
- ret = vfio_ccw_mdev_get_device_info(&info);
+ ret = vfio_ccw_mdev_get_device_info(&info, mdev);
if (ret)
return ret;
@@ -337,8 +501,6 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
case VFIO_DEVICE_GET_REGION_INFO:
{
struct vfio_region_info info;
- u16 cap_type_id = 0;
- void *cap_type = NULL;
minsz = offsetofend(struct vfio_region_info, offset);
@@ -348,8 +510,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (info.argsz < minsz)
return -EINVAL;
- ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id,
- &cap_type);
+ ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
if (ret)
return ret;
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 08e9a7dc9176..f1092c3dc1b1 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -3,9 +3,11 @@
* Private stuff for vfio_ccw driver
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#ifndef _VFIO_CCW_PRIVATE_H_
@@ -19,6 +21,40 @@
#include "css.h"
#include "vfio_ccw_cp.h"
+#define VFIO_CCW_OFFSET_SHIFT 10
+#define VFIO_CCW_OFFSET_TO_INDEX(off) (off >> VFIO_CCW_OFFSET_SHIFT)
+#define VFIO_CCW_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_CCW_OFFSET_SHIFT)
+#define VFIO_CCW_OFFSET_MASK (((u64)(1) << VFIO_CCW_OFFSET_SHIFT) - 1)
+
+/* capability chain handling similar to vfio-pci */
+struct vfio_ccw_private;
+struct vfio_ccw_region;
+
+struct vfio_ccw_regops {
+ ssize_t (*read)(struct vfio_ccw_private *private, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count, loff_t *ppos);
+ void (*release)(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region);
+};
+
+struct vfio_ccw_region {
+ u32 type;
+ u32 subtype;
+ const struct vfio_ccw_regops *ops;
+ void *data;
+ size_t size;
+ u32 flags;
+};
+
+int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
+ unsigned int subtype,
+ const struct vfio_ccw_regops *ops,
+ size_t size, u32 flags, void *data);
+
+int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
+
/**
* struct vfio_ccw_private
* @sch: pointer to the subchannel
@@ -28,6 +64,10 @@
* @mdev: pointer to the mediated device
* @nb: notifier for vfio events
* @io_region: MMIO region to input/output I/O arguments/results
+ * @io_mutex: protect against concurrent update of I/O regions
+ * @region: additional regions for other subchannel operations
+ * @cmd_region: MMIO region for asynchronous I/O commands other than START
+ * @num_regions: number of additional regions
* @cp: channel program for the current I/O operation
* @irb: irb info received from interrupt
* @scsw: scsw info
@@ -42,6 +82,10 @@ struct vfio_ccw_private {
struct mdev_device *mdev;
struct notifier_block nb;
struct ccw_io_region *io_region;
+ struct mutex io_mutex;
+ struct vfio_ccw_region *region;
+ struct ccw_cmd_region *cmd_region;
+ int num_regions;
struct channel_program cp;
struct irb irb;
@@ -63,7 +107,8 @@ enum vfio_ccw_state {
VFIO_CCW_STATE_NOT_OPER,
VFIO_CCW_STATE_STANDBY,
VFIO_CCW_STATE_IDLE,
- VFIO_CCW_STATE_BUSY,
+ VFIO_CCW_STATE_CP_PROCESSING,
+ VFIO_CCW_STATE_CP_PENDING,
/* last element! */
NR_VFIO_CCW_STATES
};
@@ -75,6 +120,7 @@ enum vfio_ccw_event {
VFIO_CCW_EVENT_NOT_OPER,
VFIO_CCW_EVENT_IO_REQ,
VFIO_CCW_EVENT_INTERRUPT,
+ VFIO_CCW_EVENT_ASYNC_REQ,
/* last element! */
NR_VFIO_CCW_EVENTS
};
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 1546389d71db..a76b8a8bcbbb 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -116,7 +116,7 @@ static int user_set_domain;
static struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
-static void ap_interrupt_handler(struct airq_struct *airq);
+static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
static int ap_airq_flag;
@@ -208,7 +208,6 @@ static inline int ap_query_configuration(struct ap_config_info *info)
return -EINVAL;
return ap_qci(info);
}
-EXPORT_SYMBOL(ap_query_configuration);
/**
* ap_init_configuration(): Allocate and query configuration array.
@@ -254,19 +253,37 @@ static inline int ap_test_config_card_id(unsigned int id)
}
/*
- * ap_test_config_domain(): Test, whether an AP usage domain is configured.
+ * ap_test_config_usage_domain(): Test, whether an AP usage domain
+ * is configured.
* @domain AP usage domain ID
*
* Returns 0 if the usage domain is not configured
* 1 if the usage domain is configured or
* if the configuration information is not available
*/
-static inline int ap_test_config_domain(unsigned int domain)
+int ap_test_config_usage_domain(unsigned int domain)
{
if (!ap_configuration) /* QCI not supported */
return domain < 16;
return ap_test_config(ap_configuration->aqm, domain);
}
+EXPORT_SYMBOL(ap_test_config_usage_domain);
+
+/*
+ * ap_test_config_ctrl_domain(): Test, whether an AP control domain
+ * is configured.
+ * @domain AP control domain ID
+ *
+ * Returns 1 if the control domain is configured
+ * 0 in all other cases
+ */
+int ap_test_config_ctrl_domain(unsigned int domain)
+{
+ if (!ap_configuration) /* QCI not supported */
+ return 0;
+ return ap_test_config(ap_configuration->adm, domain);
+}
+EXPORT_SYMBOL(ap_test_config_ctrl_domain);
/**
* ap_query_queue(): Check if an AP queue is available.
@@ -393,7 +410,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
* ap_interrupt_handler() - Schedule ap_tasklet on interrupt
* @airq: pointer to adapter interrupt descriptor
*/
-static void ap_interrupt_handler(struct airq_struct *airq)
+static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
{
inc_irq_stat(IRQIO_APB);
if (!ap_suspend_flag)
@@ -1267,7 +1284,7 @@ static void ap_select_domain(void)
best_domain = -1;
max_count = 0;
for (i = 0; i < AP_DOMAINS; i++) {
- if (!ap_test_config_domain(i) ||
+ if (!ap_test_config_usage_domain(i) ||
!test_bit_inv(i, ap_perms.aqm))
continue;
count = 0;
@@ -1338,16 +1355,16 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
* Helper function to be used with bus_find_dev
* matches for the card device with the given id
*/
-static int __match_card_device_with_id(struct device *dev, void *data)
+static int __match_card_device_with_id(struct device *dev, const void *data)
{
- return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data;
+ return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *) data;
}
/*
* Helper function to be used with bus_find_dev
* matches for the queue device with a given qid
*/
-static int __match_queue_device_with_qid(struct device *dev, void *data)
+static int __match_queue_device_with_qid(struct device *dev, const void *data)
{
return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
}
@@ -1356,7 +1373,7 @@ static int __match_queue_device_with_qid(struct device *dev, void *data)
* Helper function to be used with bus_find_dev
* matches any queue device with given queue id
*/
-static int __match_queue_device_with_queue_id(struct device *dev, void *data)
+static int __match_queue_device_with_queue_id(struct device *dev, const void *data)
{
return is_queue_dev(dev)
&& AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long) data;
@@ -1442,7 +1459,7 @@ static void _ap_scan_bus_adapter(int id)
(void *)(long) qid,
__match_queue_device_with_qid);
aq = dev ? to_ap_queue(dev) : NULL;
- if (!ap_test_config_domain(dom)) {
+ if (!ap_test_config_usage_domain(dom)) {
if (dev) {
/* Queue device exists but has been
* removed from configuration.
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 15a98a673c5c..6f3cf37776ca 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -251,6 +251,9 @@ void ap_wait(enum ap_wait wait);
void ap_request_timeout(struct timer_list *t);
void ap_bus_force_rescan(void);
+int ap_test_config_usage_domain(unsigned int domain);
+int ap_test_config_ctrl_domain(unsigned int domain);
+
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
void ap_queue_prepare_remove(struct ap_queue *aq);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 45eb0c14b880..7f418d2d8cdf 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -690,7 +690,7 @@ int pkey_clr2protkey(u32 keytype,
*/
if (!cpacf_test_func(&pckmo_functions, fc)) {
DEBUG_ERR("%s pckmo functions not available\n", __func__);
- return -EOPNOTSUPP;
+ return -ENODEV;
}
/* prepare param block */
@@ -1695,15 +1695,15 @@ static int __init pkey_init(void)
* are able to work with protected keys.
*/
if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
- return -EOPNOTSUPP;
+ return -ENODEV;
/* check for kmc instructions available */
if (!cpacf_query(CPACF_KMC, &kmc_functions))
- return -EOPNOTSUPP;
+ return -ENODEV;
if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256))
- return -EOPNOTSUPP;
+ return -ENODEV;
pkey_debug_init();
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index e9824c35c34f..003662aa8060 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -5,6 +5,7 @@
* Copyright IBM Corp. 2018
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
*/
#include <linux/module.h>
@@ -40,14 +41,45 @@ static struct ap_device_id ap_queue_ids[] = {
MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
+/**
+ * vfio_ap_queue_dev_probe:
+ *
+ * Allocate a vfio_ap_queue structure and associate it
+ * with the device as driver_data.
+ */
static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
{
+ struct vfio_ap_queue *q;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+ dev_set_drvdata(&apdev->device, q);
+ q->apqn = to_ap_queue(&apdev->device)->qid;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
return 0;
}
+/**
+ * vfio_ap_queue_dev_remove:
+ *
+ * Takes the matrix lock to avoid actions on this device while removing
+ * Free the associated vfio_ap_queue structure
+ */
static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
{
- /* Nothing to do yet */
+ struct vfio_ap_queue *q;
+ int apid, apqi;
+
+ mutex_lock(&matrix_dev->lock);
+ q = dev_get_drvdata(&apdev->device);
+ dev_set_drvdata(&apdev->device, NULL);
+ apid = AP_QID_CARD(q->apqn);
+ apqi = AP_QID_QUEUE(q->apqn);
+ vfio_ap_mdev_reset_queue(apid, apqi, 1);
+ vfio_ap_irq_disable(q);
+ kfree(q);
+ mutex_unlock(&matrix_dev->lock);
}
static void vfio_ap_matrix_dev_release(struct device *dev)
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 900b9cf20ca5..0604b49a4d32 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -24,6 +24,295 @@
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
+static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
+
+static int match_apqn(struct device *dev, const void *data)
+{
+ struct vfio_ap_queue *q = dev_get_drvdata(dev);
+
+ return (q->apqn == *(int *)(data)) ? 1 : 0;
+}
+
+/**
+ * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
+ * @matrix_mdev: the associated mediated matrix
+ * @apqn: The queue APQN
+ *
+ * Retrieve a queue with a specific APQN from the list of the
+ * devices of the vfio_ap_drv.
+ * Verify that the APID and the APQI are set in the matrix.
+ *
+ * Returns the pointer to the associated vfio_ap_queue
+ */
+static struct vfio_ap_queue *vfio_ap_get_queue(
+ struct ap_matrix_mdev *matrix_mdev,
+ int apqn)
+{
+ struct vfio_ap_queue *q;
+ struct device *dev;
+
+ if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
+ return NULL;
+ if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
+ return NULL;
+
+ dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &apqn, match_apqn);
+ if (!dev)
+ return NULL;
+ q = dev_get_drvdata(dev);
+ q->matrix_mdev = matrix_mdev;
+ put_device(dev);
+
+ return q;
+}
+
+/**
+ * vfio_ap_wait_for_irqclear
+ * @apqn: The AP Queue number
+ *
+ * Checks the IRQ bit for the status of this APQN using ap_tapq.
+ * Returns if the ap_tapq function succeeded and the bit is clear.
+ * Returns if ap_tapq function failed with invalid, deconfigured or
+ * checkstopped AP.
+ * Otherwise retries up to 5 times after waiting 20ms.
+ *
+ */
+static void vfio_ap_wait_for_irqclear(int apqn)
+{
+ struct ap_queue_status status;
+ int retry = 5;
+
+ do {
+ status = ap_tapq(apqn, NULL);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (!status.irq_enabled)
+ return;
+ /* Fall through */
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
+ status.response_code, apqn);
+ return;
+ }
+ } while (--retry);
+
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
+ __func__, status.response_code, apqn);
+}
+
+/**
+ * vfio_ap_free_aqic_resources
+ * @q: The vfio_ap_queue
+ *
+ * Unregisters the ISC in the GIB when the saved ISC not invalid.
+ * Unpin the guest's page holding the NIB when it exist.
+ * Reset the saved_pfn and saved_isc to invalid values.
+ *
+ */
+static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
+{
+ if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev)
+ kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
+ if (q->saved_pfn && q->matrix_mdev)
+ vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
+ &q->saved_pfn, 1);
+ q->saved_pfn = 0;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+}
+
+/**
+ * vfio_ap_irq_disable
+ * @q: The vfio_ap_queue
+ *
+ * Uses ap_aqic to disable the interruption and in case of success, reset
+ * in progress or IRQ disable command already proceeded: calls
+ * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
+ * and calls vfio_ap_free_aqic_resources() to free the resources associated
+ * with the AP interrupt handling.
+ *
+ * In the case the AP is busy, or a reset is in progress,
+ * retries after 20ms, up to 5 times.
+ *
+ * Returns if ap_aqic function failed with invalid, deconfigured or
+ * checkstopped AP.
+ */
+struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
+{
+ struct ap_qirq_ctrl aqic_gisa = {};
+ struct ap_queue_status status;
+ int retries = 5;
+
+ do {
+ status = ap_aqic(q->apqn, aqic_gisa, NULL);
+ switch (status.response_code) {
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ case AP_RESPONSE_NORMAL:
+ vfio_ap_wait_for_irqclear(q->apqn);
+ goto end_free;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_INVALID_ADDRESS:
+ default:
+ /* All cases in default means AP not operational */
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+ goto end_free;
+ }
+ } while (retries--);
+
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+end_free:
+ vfio_ap_free_aqic_resources(q);
+ q->matrix_mdev = NULL;
+ return status;
+}
+
+/**
+ * vfio_ap_setirq: Enable Interruption for a APQN
+ *
+ * @dev: the device associated with the ap_queue
+ * @q: the vfio_ap_queue holding AQIC parameters
+ *
+ * Pin the NIB saved in *q
+ * Register the guest ISC to GIB interface and retrieve the
+ * host ISC to issue the host side PQAP/AQIC
+ *
+ * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the
+ * vfio_pin_pages failed.
+ *
+ * Otherwise return the ap_queue_status returned by the ap_aqic(),
+ * all retry handling will be done by the guest.
+ */
+static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
+ int isc,
+ unsigned long nib)
+{
+ struct ap_qirq_ctrl aqic_gisa = {};
+ struct ap_queue_status status = {};
+ struct kvm_s390_gisa *gisa;
+ struct kvm *kvm;
+ unsigned long h_nib, g_pfn, h_pfn;
+ int ret;
+
+ g_pfn = nib >> PAGE_SHIFT;
+ ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &h_pfn);
+ switch (ret) {
+ case 1:
+ break;
+ default:
+ status.response_code = AP_RESPONSE_INVALID_ADDRESS;
+ return status;
+ }
+
+ kvm = q->matrix_mdev->kvm;
+ gisa = kvm->arch.gisa_int.origin;
+
+ h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
+ aqic_gisa.gisc = isc;
+ aqic_gisa.isc = kvm_s390_gisc_register(kvm, isc);
+ aqic_gisa.ir = 1;
+ aqic_gisa.gisa = (uint64_t)gisa >> 4;
+
+ status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ /* See if we did clear older IRQ configuration */
+ vfio_ap_free_aqic_resources(q);
+ q->saved_pfn = g_pfn;
+ q->saved_isc = isc;
+ break;
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ /* We could not modify IRQ setings: clear new configuration */
+ vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1);
+ kvm_s390_gisc_unregister(kvm, isc);
+ break;
+ default:
+ pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
+ status.response_code);
+ vfio_ap_irq_disable(q);
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * handle_pqap: PQAP instruction callback
+ *
+ * @vcpu: The vcpu on which we received the PQAP instruction
+ *
+ * Get the general register contents to initialize internal variables.
+ * REG[0]: APQN
+ * REG[1]: IR and ISC
+ * REG[2]: NIB
+ *
+ * Response.status may be set to following Response Code:
+ * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
+ * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
+ * - AP_RESPONSE_NORMAL (0) : in case of successs
+ * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
+ * We take the matrix_dev lock to ensure serialization on queues and
+ * mediated device access.
+ *
+ * Return 0 if we could handle the request inside KVM.
+ * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
+ */
+static int handle_pqap(struct kvm_vcpu *vcpu)
+{
+ uint64_t status;
+ uint16_t apqn;
+ struct vfio_ap_queue *q;
+ struct ap_queue_status qstatus = {
+ .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
+ struct ap_matrix_mdev *matrix_mdev;
+
+ /* If we do not use the AIV facility just go to userland */
+ if (!(vcpu->arch.sie_block->eca & ECA_AIV))
+ return -EOPNOTSUPP;
+
+ apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
+ mutex_lock(&matrix_dev->lock);
+
+ if (!vcpu->kvm->arch.crypto.pqap_hook)
+ goto out_unlock;
+ matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
+ struct ap_matrix_mdev, pqap_hook);
+
+ q = vfio_ap_get_queue(matrix_mdev, apqn);
+ if (!q)
+ goto out_unlock;
+
+ status = vcpu->run->s.regs.gprs[1];
+
+ /* If IR bit(16) is set we enable the interrupt */
+ if ((status >> (63 - 16)) & 0x01)
+ qstatus = vfio_ap_irq_enable(q, status & 0x07,
+ vcpu->run->s.regs.gprs[2]);
+ else
+ qstatus = vfio_ap_irq_disable(q);
+
+out_unlock:
+ memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
+ vcpu->run->s.regs.gprs[1] >>= 32;
+ mutex_unlock(&matrix_dev->lock);
+ return 0;
+}
+
static void vfio_ap_matrix_init(struct ap_config_info *info,
struct ap_matrix *matrix)
{
@@ -45,8 +334,11 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
return -ENOMEM;
}
+ matrix_mdev->mdev = mdev;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
mdev_set_drvdata(mdev, matrix_mdev);
+ matrix_mdev->pqap_hook.hook = handle_pqap;
+ matrix_mdev->pqap_hook.owner = THIS_MODULE;
mutex_lock(&matrix_dev->lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->lock);
@@ -62,6 +354,7 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
return -EBUSY;
mutex_lock(&matrix_dev->lock);
+ vfio_ap_mdev_reset_queues(mdev);
list_del(&matrix_mdev->node);
mutex_unlock(&matrix_dev->lock);
@@ -754,11 +1047,42 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
}
matrix_mdev->kvm = kvm;
+ kvm_get_kvm(kvm);
+ kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
mutex_unlock(&matrix_dev->lock);
return 0;
}
+/*
+ * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
+ *
+ * @nb: The notifier block
+ * @action: Action to be taken
+ * @data: data associated with the request
+ *
+ * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
+ * pinned before). Other requests are ignored.
+ *
+ */
+static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
+
+ if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
+ struct vfio_iommu_type1_dma_unmap *unmap = data;
+ unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
+
+ vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -790,15 +1114,36 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
- unsigned int retry)
+static void vfio_ap_irq_disable_apqn(int apqn)
+{
+ struct device *dev;
+ struct vfio_ap_queue *q;
+
+ dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &apqn, match_apqn);
+ if (dev) {
+ q = dev_get_drvdata(dev);
+ vfio_ap_irq_disable(q);
+ put_device(dev);
+ }
+}
+
+int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+ unsigned int retry)
{
struct ap_queue_status status;
+ int retry2 = 2;
+ int apqn = AP_MKQID(apid, apqi);
do {
- status = ap_zapq(AP_MKQID(apid, apqi));
+ status = ap_zapq(apqn);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
+ while (!status.queue_empty && retry2--) {
+ msleep(20);
+ status = ap_tapq(apqn, NULL);
+ }
+ WARN_ON_ONCE(retry <= 0);
return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
@@ -832,6 +1177,7 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
*/
if (ret)
rc = ret;
+ vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi));
}
}
@@ -858,20 +1204,37 @@ static int vfio_ap_mdev_open(struct mdev_device *mdev)
return ret;
}
- return 0;
+ matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
+ events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &events, &matrix_mdev->iommu_notifier);
+ if (!ret)
+ return ret;
+
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &matrix_mdev->group_notifier);
+ module_put(THIS_MODULE);
+ return ret;
}
static void vfio_ap_mdev_release(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- if (matrix_mdev->kvm)
+ mutex_lock(&matrix_dev->lock);
+ if (matrix_mdev->kvm) {
kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+ vfio_ap_mdev_reset_queues(mdev);
+ kvm_put_kvm(matrix_mdev->kvm);
+ matrix_mdev->kvm = NULL;
+ }
+ mutex_unlock(&matrix_dev->lock);
- vfio_ap_mdev_reset_queues(mdev);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &matrix_mdev->iommu_notifier);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
&matrix_mdev->group_notifier);
- matrix_mdev->kvm = NULL;
module_put(THIS_MODULE);
}
@@ -900,6 +1263,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
{
int ret;
+ mutex_lock(&matrix_dev->lock);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
ret = vfio_ap_mdev_get_device_info(arg);
@@ -911,6 +1275,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
ret = -EOPNOTSUPP;
break;
}
+ mutex_unlock(&matrix_dev->lock);
return ret;
}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 76b7f98e47e9..f46dde56b464 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -4,6 +4,7 @@
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
* Halil Pasic <pasic@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
*
* Copyright IBM Corp. 2018
*/
@@ -16,6 +17,7 @@
#include <linux/mdev.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/kvm_host.h>
#include "ap_bus.h"
@@ -80,10 +82,23 @@ struct ap_matrix_mdev {
struct list_head node;
struct ap_matrix matrix;
struct notifier_block group_notifier;
+ struct notifier_block iommu_notifier;
struct kvm *kvm;
+ struct kvm_s390_module_hook pqap_hook;
+ struct mdev_device *mdev;
};
extern int vfio_ap_mdev_register(void);
extern void vfio_ap_mdev_unregister(void);
+int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+ unsigned int retry);
+struct vfio_ap_queue {
+ struct ap_matrix_mdev *matrix_mdev;
+ unsigned long saved_pfn;
+ int apqn;
+#define VFIO_AP_ISC_INVALID 0xff
+ unsigned char saved_isc;
+};
+struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q);
#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 689c2af7026a..1058b4b5cc1e 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -525,7 +525,7 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
filp->private_data = (void *) perms;
atomic_inc(&zcrypt_open_count);
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
/**
@@ -659,6 +659,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
if (mex->outputdatalength < mex->inputdatalength) {
+ func_code = 0;
rc = -EINVAL;
goto out;
}
@@ -742,6 +743,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
trace_s390_zcrypt_req(crt, TP_ICARSACRT);
if (crt->outputdatalength < crt->inputdatalength) {
+ func_code = 0;
rc = -EINVAL;
goto out;
}
@@ -820,7 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
struct ap_message ap_msg;
unsigned int weight, pref_weight;
unsigned int func_code;
- unsigned short *domain;
+ unsigned short *domain, tdom;
int qid = 0, rc = -ENODEV;
struct module *mod;
@@ -832,6 +834,17 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
if (rc)
goto out;
+ /*
+ * If a valid target domain is set and this domain is NOT a usage
+ * domain but a control only domain, use the default domain as target.
+ */
+ tdom = *domain;
+ if (tdom >= 0 && tdom < AP_DOMAINS &&
+ !ap_test_config_usage_domain(tdom) &&
+ ap_test_config_ctrl_domain(tdom) &&
+ ap_domain_index >= 0)
+ tdom = ap_domain_index;
+
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
@@ -854,8 +867,8 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
/* check if device is online and eligible */
if (!zq->online ||
!zq->ops->send_cprb ||
- ((*domain != (unsigned short) AUTOSELECT) &&
- (*domain != AP_QID_QUEUE(zq->queue->qid))))
+ (tdom != (unsigned short) AUTOSELECT &&
+ tdom != AP_QID_QUEUE(zq->queue->qid)))
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
@@ -951,6 +964,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
if (!targets) {
+ func_code = 0;
rc = -ENOMEM;
goto out;
}
@@ -958,6 +972,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
if (copy_from_user(targets, uptr,
target_num * sizeof(*targets))) {
+ func_code = 0;
rc = -EFAULT;
goto out_free;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 0cbcc238ef98..12fe9deb265e 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -567,6 +567,10 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
*fcode = payload_hdr->func_val & 0xFFFF;
+ /* enable special processing based on the cprbs flags special bit */
+ if (msg->cprbx.flags & 0x20)
+ ap_msg->special = 1;
+
return 0;
}
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 7c5a25ddf832..ced896d1534a 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -7,10 +7,10 @@ config LCS
prompt "Lan Channel Station Interface"
depends on CCW && NETDEVICES && (ETHERNET || FDDI)
help
- Select this option if you want to use LCS networking on IBM System z.
- This device driver supports FDDI (IEEE 802.7) and Ethernet.
- To compile as a module, choose M. The module name is lcs.
- If you do not know what it is, it's safe to choose Y.
+ Select this option if you want to use LCS networking on IBM System z.
+ This device driver supports FDDI (IEEE 802.7) and Ethernet.
+ To compile as a module, choose M. The module name is lcs.
+ If you do not know what it is, it's safe to choose Y.
config CTCM
def_tristate m
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 0aab90817326..66eac2b9704d 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <net/smc.h>
+#include <asm/pci_insn.h>
#define UTIL_STR_LEN 16
@@ -194,8 +195,6 @@ struct ism_dev {
struct pci_dev *pdev;
struct smcd_dev *smcd;
- void __iomem *ctl;
-
struct ism_sba *sba;
dma_addr_t sba_dma_addr;
DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
@@ -209,13 +208,37 @@ struct ism_dev {
#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
((dmb) | (idx) << 24 | (sf) << 23 | (offset))
+static inline void __ism_read_cmd(struct ism_dev *ism, void *data,
+ unsigned long offset, unsigned long len)
+{
+ struct zpci_dev *zdev = to_zpci(ism->pdev);
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, 8);
+
+ while (len > 0) {
+ __zpci_load(data, req, offset);
+ offset += 8;
+ data += 8;
+ len -= 8;
+ }
+}
+
+static inline void __ism_write_cmd(struct ism_dev *ism, void *data,
+ unsigned long offset, unsigned long len)
+{
+ struct zpci_dev *zdev = to_zpci(ism->pdev);
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, len);
+
+ if (len)
+ __zpci_store_block(data, req, offset);
+}
+
static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data,
unsigned int size)
{
struct zpci_dev *zdev = to_zpci(ism->pdev);
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size);
- return zpci_write_block(req, data, dmb_req);
+ return __zpci_store_block(data, req, dmb_req);
}
#endif /* S390_ISM_H */
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 3e132592c1fe..4fc2056bd227 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -38,19 +38,18 @@ static int ism_cmd(struct ism_dev *ism, void *cmd)
struct ism_req_hdr *req = cmd;
struct ism_resp_hdr *resp = cmd;
- memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req));
- memcpy_toio(ism->ctl, req, sizeof(*req));
+ __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
+ __ism_write_cmd(ism, req, 0, sizeof(*req));
WRITE_ONCE(resp->ret, ISM_ERROR);
- memcpy_fromio(resp, ism->ctl, sizeof(*resp));
+ __ism_read_cmd(ism, resp, 0, sizeof(*resp));
if (resp->ret) {
debug_text_event(ism_debug_info, 0, "cmd failure");
debug_event(ism_debug_info, 0, resp, sizeof(*resp));
goto out;
}
- memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp),
- resp->len - sizeof(*resp));
+ __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
out:
return resp->ret;
}
@@ -512,13 +511,9 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_disable;
- ism->ctl = pci_iomap(pdev, 2, 0);
- if (!ism->ctl)
- goto err_resource;
-
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret)
- goto err_unmap;
+ goto err_resource;
dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
dma_set_max_seg_size(&pdev->dev, SZ_1M);
@@ -527,7 +522,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
ISM_NR_DMBS);
if (!ism->smcd)
- goto err_unmap;
+ goto err_resource;
ism->smcd->priv = ism;
ret = ism_dev_init(ism);
@@ -538,8 +533,6 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_free:
smcd_free_dev(ism->smcd);
-err_unmap:
- pci_iounmap(pdev, ism->ctl);
err_resource:
pci_release_mem_regions(pdev);
err_disable:
@@ -568,7 +561,6 @@ static void ism_remove(struct pci_dev *pdev)
ism_dev_exit(ism);
smcd_free_dev(ism->smcd);
- pci_iounmap(pdev, ism->ctl);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
dev_set_drvdata(&pdev->dev, NULL);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index c851cf6e01c4..c7ee07ce3615 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -10,6 +10,7 @@
#ifndef __QETH_CORE_H__
#define __QETH_CORE_H__
+#include <linux/completion.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
@@ -21,8 +22,11 @@
#include <linux/hashtable.h>
#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <net/dst.h>
+#include <net/ip6_fib.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
#include <net/addrconf.h>
@@ -58,7 +62,7 @@ struct qeth_dbf_info {
debug_info_t *id;
};
-#define QETH_DBF_CTRL_LEN 256
+#define QETH_DBF_CTRL_LEN 256U
#define QETH_DBF_TEXT(name, level, text) \
debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text)
@@ -163,6 +167,12 @@ struct qeth_vnicc_info {
bool rx_bcast_enabled;
};
+static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa,
+ enum qeth_ipa_setadp_cmd func)
+{
+ return (ipa->supported_funcs & func);
+}
+
static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
enum qeth_ipa_funcs func)
{
@@ -176,9 +186,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
}
#define qeth_adp_supported(c, f) \
- qeth_is_ipa_supported(&c->options.adp, f)
-#define qeth_adp_enabled(c, f) \
- qeth_is_ipa_enabled(&c->options.adp, f)
+ qeth_is_adp_supported(&c->options.adp, f)
#define qeth_is_supported(c, f) \
qeth_is_ipa_supported(&c->options.ipa4, f)
#define qeth_is_enabled(c, f) \
@@ -217,6 +225,9 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/* QDIO queue and buffer handling */
/*****************************************************************************/
#define QETH_MAX_QUEUES 4
+#define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */
+#define QETH_IQD_MCAST_TXQ 0
+#define QETH_IQD_MIN_UCAST_TXQ 1
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64
#define QETH_IN_BUF_COUNT_HSDEFAULT 128
@@ -365,34 +376,6 @@ enum qeth_header_ids {
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
-enum qeth_qdio_buffer_states {
- /*
- * inbound: read out by driver; owned by hardware in order to be filled
- * outbound: owned by driver in order to be filled
- */
- QETH_QDIO_BUF_EMPTY,
- /*
- * inbound: filled by hardware; owned by driver in order to be read out
- * outbound: filled by driver; owned by hardware in order to be sent
- */
- QETH_QDIO_BUF_PRIMED,
- /*
- * inbound: not applicable
- * outbound: identified to be pending in TPQ
- */
- QETH_QDIO_BUF_PENDING,
- /*
- * inbound: not applicable
- * outbound: found in completion queue
- */
- QETH_QDIO_BUF_IN_CQ,
- /*
- * inbound: not applicable
- * outbound: handled via transfer pending / completion queue
- */
- QETH_QDIO_BUF_HANDLED_DELAYED,
-};
-
enum qeth_qdio_info_states {
QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED,
@@ -424,6 +407,19 @@ struct qeth_qdio_q {
int next_buf_to_init;
};
+enum qeth_qdio_out_buffer_state {
+ /* Owned by driver, in order to be filled. */
+ QETH_QDIO_BUF_EMPTY,
+ /* Filled by driver; owned by hardware in order to be sent. */
+ QETH_QDIO_BUF_PRIMED,
+ /* Identified to be pending in TPQ. */
+ QETH_QDIO_BUF_PENDING,
+ /* Found in completion queue. */
+ QETH_QDIO_BUF_IN_CQ,
+ /* Handled via transfer pending / completion queue. */
+ QETH_QDIO_BUF_HANDLED_DELAYED,
+};
+
struct qeth_qdio_out_buffer {
struct qdio_buffer *buffer;
atomic_t state;
@@ -462,7 +458,6 @@ struct qeth_card_stats {
u64 rx_errors;
u64 rx_dropped;
u64 rx_multicast;
- u64 tx_errors;
};
struct qeth_out_q_stats {
@@ -477,6 +472,7 @@ struct qeth_out_q_stats {
u64 skbs_linearized_fail;
u64 tso_bytes;
u64 packing_mode_switch;
+ u64 stopped;
/* rtnl_link_stats64 */
u64 tx_packets;
@@ -490,14 +486,12 @@ struct qeth_qdio_out_q {
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
struct qeth_out_q_stats stats;
- int queue_no;
+ u8 next_buf_to_fill;
+ u8 max_elements;
+ u8 queue_no;
+ u8 do_pack;
struct qeth_card *card;
atomic_t state;
- int do_pack;
- /*
- * index of buffer to be filled by driver; state EMPTY or PACKING
- */
- int next_buf_to_fill;
/*
* number of buffers that are currently filled (PRIMED)
* -> these buffers are hardware-owned
@@ -507,6 +501,11 @@ struct qeth_qdio_out_q {
atomic_t set_pci_flags_count;
};
+static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
+{
+ return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
+}
+
struct qeth_qdio_info {
atomic_t state;
/* input */
@@ -528,21 +527,13 @@ struct qeth_qdio_info {
};
/**
- * buffer stuff for read channel
- */
-#define QETH_CMD_BUFFER_NO 8
-
-/**
* channel state machine
*/
enum qeth_channel_states {
CH_STATE_UP,
CH_STATE_DOWN,
- CH_STATE_ACTIVATING,
CH_STATE_HALTED,
CH_STATE_STOPPED,
- CH_STATE_RCD,
- CH_STATE_RCD_DONE,
};
/**
* card state machine
@@ -557,15 +548,11 @@ enum qeth_card_states {
* Protocol versions
*/
enum qeth_prot_versions {
+ QETH_PROT_NONE = 0x0000,
QETH_PROT_IPV4 = 0x0004,
QETH_PROT_IPV6 = 0x0006,
};
-enum qeth_cmd_buffer_state {
- BUF_STATE_FREE,
- BUF_STATE_LOCKED,
-};
-
enum qeth_cq {
QETH_CQ_DISABLED = 0,
QETH_CQ_ENABLED = 1,
@@ -579,36 +566,42 @@ struct qeth_ipato {
struct list_head entries;
};
-struct qeth_channel;
+struct qeth_channel {
+ struct ccw_device *ccwdev;
+ enum qeth_channel_states state;
+ atomic_t irq_pending;
+};
struct qeth_cmd_buffer {
- enum qeth_cmd_buffer_state state;
+ unsigned int length;
+ refcount_t ref_count;
struct qeth_channel *channel;
struct qeth_reply *reply;
+ long timeout;
unsigned char *data;
- void (*callback)(struct qeth_card *card, struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob);
+ void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
+ void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
};
+static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob)
+{
+ refcount_inc(&iob->ref_count);
+}
+
static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
{
return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
}
-/**
- * definition of a qeth channel, used for read and write
- */
-struct qeth_channel {
- enum qeth_channel_states state;
- struct ccw1 *ccw;
- spinlock_t iob_lock;
- wait_queue_head_t wait_q;
- struct ccw_device *ccwdev;
-/*command buffer for control data*/
- struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
- atomic_t irq_pending;
- int io_buf_no;
-};
+static inline struct ccw1 *__ccw_from_cmd(struct qeth_cmd_buffer *iob)
+{
+ return (struct ccw1 *)(iob->data + ALIGN(iob->length, 8));
+}
+
+static inline bool qeth_trylock_channel(struct qeth_channel *channel)
+{
+ return atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0;
+}
/**
* OSA card related definitions
@@ -631,17 +624,15 @@ struct qeth_seqno {
__u32 pdu_hdr;
__u32 pdu_hdr_ack;
__u16 ipa;
- __u32 pkt_seqno;
};
struct qeth_reply {
struct list_head list;
- wait_queue_head_t wait_q;
+ struct completion received;
int (*callback)(struct qeth_card *, struct qeth_reply *,
unsigned long);
u32 seqno;
unsigned long offset;
- atomic_t received;
int rc;
void *param;
refcount_t refcnt;
@@ -663,7 +654,8 @@ struct qeth_card_info {
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
u8 open_when_online:1;
- int guestlan;
+ u8 use_v1_blkt:1;
+ u8 is_vm_nic:1;
int mac_bits;
enum qeth_card_types type;
enum qeth_link_types link_type;
@@ -723,9 +715,6 @@ struct qeth_discipline {
void (*remove) (struct ccwgroup_device *);
int (*set_online) (struct ccwgroup_device *);
int (*set_offline) (struct ccwgroup_device *);
- int (*freeze)(struct ccwgroup_device *);
- int (*thaw) (struct ccwgroup_device *);
- int (*restore)(struct ccwgroup_device *);
int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
int (*control_event_handler)(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -762,6 +751,7 @@ struct qeth_card {
enum qeth_card_states state;
spinlock_t lock;
struct ccwgroup_device *gdev;
+ struct qeth_cmd_buffer *read_cmd;
struct qeth_channel read;
struct qeth_channel write;
struct qeth_channel data;
@@ -774,18 +764,19 @@ struct qeth_card {
struct qeth_card_options options;
struct workqueue_struct *event_wq;
+ struct workqueue_struct *cmd_wq;
wait_queue_head_t wait_q;
- spinlock_t mclock;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
+ struct mutex ip_lock;
DECLARE_HASHTABLE(ip_mc_htable, 4);
+ struct work_struct rx_mode_work;
struct work_struct kernel_thread_starter;
spinlock_t thread_mask_lock;
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
- spinlock_t ip_lock;
struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
@@ -827,6 +818,15 @@ static inline bool qeth_netdev_is_registered(struct net_device *dev)
return dev->netdev_ops != NULL;
}
+static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
+{
+ if (txq == QETH_IQD_MCAST_TXQ)
+ return dev->num_tx_queues - 1;
+ if (txq == dev->num_tx_queues - 1)
+ return QETH_IQD_MCAST_TXQ;
+ return txq;
+}
+
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
@@ -869,6 +869,27 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
}
}
+static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
+{
+ u8 *addr = eth_hdr(skb)->h_dest;
+
+ if (is_multicast_ether_addr(addr))
+ return is_broadcast_ether_addr(addr) ? RTN_BROADCAST :
+ RTN_MULTICAST;
+ return RTN_UNICAST;
+}
+
+static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct rt6_info *rt;
+
+ rt = (struct rt6_info *) dst;
+ if (dst)
+ dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
+ return dst;
+}
+
static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
u8 flags)
{
@@ -903,12 +924,12 @@ static inline int qeth_is_diagass_supported(struct qeth_card *card,
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
- u16 cmd_code, long data,
+ u16 cmd_code, u32 *data,
enum qeth_prot_versions prot);
/* IPv4 variant */
static inline int qeth_send_simple_setassparms(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
- u16 cmd_code, long data)
+ u16 cmd_code, u32 *data)
{
return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
data, QETH_PROT_IPV4);
@@ -916,24 +937,13 @@ static inline int qeth_send_simple_setassparms(struct qeth_card *card,
static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
- u16 cmd_code, long data)
+ u16 cmd_code, u32 *data)
{
return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
data, QETH_PROT_IPV6);
}
-int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
- int ipv);
-static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
- struct sk_buff *skb,
- int ipv, int cast_type)
-{
- if (IS_IQD(card) && cast_type != RTN_UNICAST)
- return card->qdio.out_qs[card->qdio.no_out_queues - 1];
- if (!card->qdio.do_prio_queueing)
- return card->qdio.out_qs[card->qdio.default_out_queue];
- return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)];
-}
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
@@ -968,8 +978,23 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
void *);
-struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
- enum qeth_ipa_cmds, enum qeth_prot_versions);
+struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
+ enum qeth_ipa_cmds cmd_code,
+ enum qeth_prot_versions prot,
+ unsigned int data_length);
+struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
+ unsigned int length, unsigned int ccws,
+ long timeout);
+struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ u16 cmd_code,
+ unsigned int data_length,
+ enum qeth_prot_versions prot);
+struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
+ enum qeth_diags_cmds sub_cmd,
+ unsigned int data_length);
+void qeth_put_cmd(struct qeth_cmd_buffer *iob);
+
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
@@ -978,17 +1003,13 @@ int qeth_poll(struct napi_struct *napi, int budget);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
-void qeth_clear_cmd_buffers(struct qeth_channel *);
-void qeth_clear_qdio_buffers(struct qeth_card *);
+void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
-void qeth_prepare_control_data(struct qeth_card *, int,
- struct qeth_cmd_buffer *);
-void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
+void qeth_notify_reply(struct qeth_reply *reply, int reason);
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
u16 cmd_length);
-struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_query_card_info(struct qeth_card *card,
@@ -1005,10 +1026,6 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
-struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
- enum qeth_ipa_funcs,
- __u16, __u16,
- enum qeth_prot_versions);
int qeth_set_features(struct net_device *, netdev_features_t);
void qeth_enable_hw_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
@@ -1016,16 +1033,17 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
+u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+ u8 cast_type, struct net_device *sb_dev);
int qeth_open(struct net_device *dev);
int qeth_stop(struct net_device *dev);
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv, int cast_type,
+ struct qeth_qdio_out_q *queue, int ipv,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, int cast_type,
- unsigned int data_len));
+ int ipv, unsigned int data_len));
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 44bd6f04c145..4d0caeebc802 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -20,6 +20,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
+#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
@@ -61,13 +62,11 @@ static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
static struct lock_class_key qdio_out_skb_queue_key;
-static void qeth_send_control_data_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob);
-static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
+static void qeth_issue_next_read_cb(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob);
static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
-static void qeth_free_qdio_buffers(struct qeth_card *);
+static void qeth_free_qdio_queues(struct qeth_card *card);
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification);
@@ -85,7 +84,7 @@ static void qeth_close_dev_handler(struct work_struct *work)
static const char *qeth_get_cardname(struct qeth_card *card)
{
- if (card->info.guestlan) {
+ if (IS_VM_NIC(card)) {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return " Virtual NIC QDIO";
@@ -120,7 +119,7 @@ static const char *qeth_get_cardname(struct qeth_card *card)
/* max length to be returned: 14 */
const char *qeth_get_cardname_short(struct qeth_card *card)
{
- if (card->info.guestlan) {
+ if (IS_VM_NIC(card)) {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return "Virt.NIC QDIO";
@@ -292,7 +291,7 @@ static int qeth_cq_init(struct qeth_card *card)
int rc;
if (card->options.cq == QETH_CQ_ENABLED) {
- QETH_DBF_TEXT(SETUP, 2, "cqinit");
+ QETH_CARD_TEXT(card, 2, "cqinit");
qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
QDIO_MAX_BUFFERS_PER_Q);
card->qdio.c_q->next_buf_to_init = 127;
@@ -300,7 +299,7 @@ static int qeth_cq_init(struct qeth_card *card)
card->qdio.no_in_queues - 1, 0,
127);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
goto out;
}
}
@@ -317,7 +316,7 @@ static int qeth_alloc_cq(struct qeth_card *card)
int i;
struct qdio_outbuf_state *outbuf_states;
- QETH_DBF_TEXT(SETUP, 2, "cqon");
+ QETH_CARD_TEXT(card, 2, "cqon");
card->qdio.c_q = qeth_alloc_qdio_queue();
if (!card->qdio.c_q) {
rc = -1;
@@ -339,11 +338,11 @@ static int qeth_alloc_cq(struct qeth_card *card)
outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
}
} else {
- QETH_DBF_TEXT(SETUP, 2, "nocq");
+ QETH_CARD_TEXT(card, 2, "nocq");
card->qdio.c_q = NULL;
card->qdio.no_in_queues = 1;
}
- QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
+ QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
rc = 0;
out:
return rc;
@@ -486,40 +485,39 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
queue == card->qdio.no_in_queues - 1;
}
-static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data)
+static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
+ void *data)
{
ccw->cmd_code = cmd_code;
- ccw->flags = CCW_FLAG_SLI;
+ ccw->flags = flags | CCW_FLAG_SLI;
ccw->count = len;
ccw->cda = (__u32) __pa(data);
}
static int __qeth_issue_next_read(struct qeth_card *card)
{
- struct qeth_channel *channel = &card->read;
- struct qeth_cmd_buffer *iob;
+ struct qeth_cmd_buffer *iob = card->read_cmd;
+ struct qeth_channel *channel = iob->channel;
+ struct ccw1 *ccw = __ccw_from_cmd(iob);
int rc;
QETH_CARD_TEXT(card, 5, "issnxrd");
if (channel->state != CH_STATE_UP)
return -EIO;
- iob = qeth_get_buffer(channel);
- if (!iob) {
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
- CARD_DEVID(card));
- return -ENOMEM;
- }
- qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
+
+ memset(iob->data, 0, iob->length);
+ qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
+ iob->callback = qeth_issue_next_read_cb;
+ /* keep the cmd alive after completion: */
+ qeth_get_cmd(iob);
+
QETH_CARD_TEXT(card, 6, "noirqpnd");
- rc = ccw_device_start(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0);
+ rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
if (rc) {
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
rc, CARD_DEVID(card));
atomic_set(&channel->irq_pending, 0);
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
wake_up(&card->wait_q);
@@ -542,11 +540,10 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
{
struct qeth_reply *reply;
- reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
+ reply = kzalloc(sizeof(*reply), GFP_KERNEL);
if (reply) {
refcount_set(&reply->refcnt, 1);
- atomic_set(&reply->received, 0);
- init_waitqueue_head(&reply->wait_q);
+ init_completion(&reply->received);
}
return reply;
}
@@ -576,11 +573,12 @@ static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply)
spin_unlock_irq(&card->lock);
}
-static void qeth_notify_reply(struct qeth_reply *reply)
+void qeth_notify_reply(struct qeth_reply *reply, int reason)
{
- atomic_inc(&reply->received);
- wake_up(&reply->wait_q);
+ reply->rc = reason;
+ complete(&reply->received);
}
+EXPORT_SYMBOL_GPL(qeth_notify_reply);
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
@@ -664,10 +662,8 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
QETH_CARD_TEXT(card, 4, "clipalst");
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(reply, &card->cmd_waiter_list, list) {
- reply->rc = -EIO;
- qeth_notify_reply(reply);
- }
+ list_for_each_entry(reply, &card->cmd_waiter_list, list)
+ qeth_notify_reply(reply, -EIO);
spin_unlock_irqrestore(&card->lock, flags);
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
@@ -675,9 +671,6 @@ EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
static int qeth_check_idx_response(struct qeth_card *card,
unsigned char *buffer)
{
- if (!buffer)
- return 0;
-
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
if ((buffer[2] & 0xc0) == 0xc0) {
QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
@@ -696,93 +689,62 @@ static int qeth_check_idx_response(struct qeth_card *card,
return 0;
}
-static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
+void qeth_put_cmd(struct qeth_cmd_buffer *iob)
{
- __u8 index;
-
- index = channel->io_buf_no;
- do {
- if (channel->iob[index].state == BUF_STATE_FREE) {
- channel->iob[index].state = BUF_STATE_LOCKED;
- channel->io_buf_no = (channel->io_buf_no + 1) %
- QETH_CMD_BUFFER_NO;
- memset(channel->iob[index].data, 0, QETH_BUFSIZE);
- return channel->iob + index;
- }
- index = (index + 1) % QETH_CMD_BUFFER_NO;
- } while (index != channel->io_buf_no);
-
- return NULL;
-}
-
-void qeth_release_buffer(struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&channel->iob_lock, flags);
- iob->state = BUF_STATE_FREE;
- iob->callback = qeth_send_control_data_cb;
- if (iob->reply) {
- qeth_put_reply(iob->reply);
- iob->reply = NULL;
+ if (refcount_dec_and_test(&iob->ref_count)) {
+ if (iob->reply)
+ qeth_put_reply(iob->reply);
+ kfree(iob->data);
+ kfree(iob);
}
- spin_unlock_irqrestore(&channel->iob_lock, flags);
- wake_up(&channel->wait_q);
}
-EXPORT_SYMBOL_GPL(qeth_release_buffer);
+EXPORT_SYMBOL_GPL(qeth_put_cmd);
static void qeth_release_buffer_cb(struct qeth_card *card,
- struct qeth_channel *channel,
struct qeth_cmd_buffer *iob)
{
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
}
static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
{
struct qeth_reply *reply = iob->reply;
- if (reply) {
- reply->rc = rc;
- qeth_notify_reply(reply);
- }
- qeth_release_buffer(iob->channel, iob);
+ if (reply)
+ qeth_notify_reply(reply, rc);
+ qeth_put_cmd(iob);
}
-static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
+struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
+ unsigned int length, unsigned int ccws,
+ long timeout)
{
- struct qeth_cmd_buffer *buffer = NULL;
- unsigned long flags;
+ struct qeth_cmd_buffer *iob;
- spin_lock_irqsave(&channel->iob_lock, flags);
- buffer = __qeth_get_buffer(channel);
- spin_unlock_irqrestore(&channel->iob_lock, flags);
- return buffer;
-}
+ if (length > QETH_BUFSIZE)
+ return NULL;
-struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
-{
- struct qeth_cmd_buffer *buffer;
- wait_event(channel->wait_q,
- ((buffer = qeth_get_buffer(channel)) != NULL));
- return buffer;
-}
-EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
+ iob = kzalloc(sizeof(*iob), GFP_KERNEL);
+ if (!iob)
+ return NULL;
-void qeth_clear_cmd_buffers(struct qeth_channel *channel)
-{
- int cnt;
+ iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
+ GFP_KERNEL | GFP_DMA);
+ if (!iob->data) {
+ kfree(iob);
+ return NULL;
+ }
- for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
- qeth_release_buffer(channel, &channel->iob[cnt]);
- channel->io_buf_no = 0;
+ refcount_set(&iob->ref_count, 1);
+ iob->channel = channel;
+ iob->timeout = timeout;
+ iob->length = length;
+ return iob;
}
-EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
+EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
-static void qeth_send_control_data_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
+static void qeth_issue_next_read_cb(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
{
struct qeth_ipa_cmd *cmd = NULL;
struct qeth_reply *reply = NULL;
@@ -846,18 +808,16 @@ static void qeth_send_control_data_cb(struct qeth_card *card,
}
}
- if (rc <= 0) {
- reply->rc = rc;
- qeth_notify_reply(reply);
- }
-
+ if (rc <= 0)
+ qeth_notify_reply(reply, rc);
qeth_put_reply(reply);
out:
memcpy(&card->seqno.pdu_hdr_ack,
QETH_PDU_HEADER_SEQ_NO(iob->data),
QETH_SEQ_NO_LENGTH);
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
+ __qeth_issue_next_read(card);
}
static int qeth_set_thread_start_bit(struct qeth_card *card,
@@ -984,7 +944,7 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
}
static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
- unsigned long intparm, struct irb *irb)
+ struct irb *irb)
{
if (!IS_ERR(irb))
return 0;
@@ -1001,12 +961,6 @@ static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
" on the device\n");
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
- if (intparm == QETH_RCD_PARM) {
- if (card->data.ccwdev == cdev) {
- card->data.state = CH_STATE_DOWN;
- wake_up(&card->wait_q);
- }
- }
return -ETIMEDOUT;
default:
QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
@@ -1049,7 +1003,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
if (qeth_intparm_is_iob(intparm))
iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
- rc = qeth_check_irb_error(card, cdev, intparm, irb);
+ rc = qeth_check_irb_error(card, cdev, irb);
if (rc) {
/* IO was terminated, free its resources. */
if (iob)
@@ -1067,11 +1021,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
channel->state = CH_STATE_HALTED;
- /*let's wake up immediately on data channel*/
- if ((channel == &card->data) && (intparm != 0) &&
- (intparm != QETH_RCD_PARM))
- goto out;
-
if (intparm == QETH_CLEAR_CHANNEL_PARM) {
QETH_CARD_TEXT(card, 6, "clrchpar");
/* we don't have to handle this further */
@@ -1101,10 +1050,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
print_hex_dump(KERN_WARNING, "qeth: sense data ",
DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
}
- if (intparm == QETH_RCD_PARM) {
- channel->state = CH_STATE_DOWN;
- goto out;
- }
+
rc = qeth_get_problem(card, cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
@@ -1116,18 +1062,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
}
- if (intparm == QETH_RCD_PARM) {
- channel->state = CH_STATE_RCD_DONE;
- goto out;
- }
- if (channel == &card->data)
- return;
- if (channel == &card->read &&
- channel->state == CH_STATE_UP)
- __qeth_issue_next_read(card);
-
if (iob && iob->callback)
- iob->callback(card, iob->channel, iob);
+ iob->callback(card, iob);
out:
wake_up(&card->wait_q);
@@ -1173,20 +1109,19 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
qeth_release_skbs(buf);
- for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
+ for (i = 0; i < queue->max_elements; ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
kmem_cache_free(qeth_core_header_cache,
buf->buffer->element[i].addr);
buf->is_header[i] = 0;
}
- qeth_scrub_qdio_buffer(buf->buffer,
- QETH_MAX_BUFFER_ELEMENTS(queue->card));
+ qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
buf->next_element_to_fill = 0;
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
-static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
+static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
{
int j;
@@ -1202,19 +1137,18 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
}
}
-void qeth_clear_qdio_buffers(struct qeth_card *card)
+void qeth_drain_output_queues(struct qeth_card *card)
{
int i;
QETH_CARD_TEXT(card, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- if (card->qdio.out_qs[i]) {
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
- }
+ if (card->qdio.out_qs[i])
+ qeth_drain_output_queue(card->qdio.out_qs[i], false);
}
}
-EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
+EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
static void qeth_free_buffer_pool(struct qeth_card *card)
{
@@ -1232,121 +1166,90 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
static void qeth_clean_channel(struct qeth_channel *channel)
{
struct ccw_device *cdev = channel->ccwdev;
- int cnt;
QETH_DBF_TEXT(SETUP, 2, "freech");
spin_lock_irq(get_ccwdev_lock(cdev));
cdev->handler = NULL;
spin_unlock_irq(get_ccwdev_lock(cdev));
-
- for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
- kfree(channel->iob[cnt].data);
- kfree(channel->ccw);
}
-static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
+static void qeth_setup_channel(struct qeth_channel *channel)
{
struct ccw_device *cdev = channel->ccwdev;
- int cnt;
QETH_DBF_TEXT(SETUP, 2, "setupch");
- channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
- if (!channel->ccw)
- return -ENOMEM;
channel->state = CH_STATE_DOWN;
atomic_set(&channel->irq_pending, 0);
- init_waitqueue_head(&channel->wait_q);
spin_lock_irq(get_ccwdev_lock(cdev));
cdev->handler = qeth_irq;
spin_unlock_irq(get_ccwdev_lock(cdev));
+}
- if (!alloc_buffers)
- return 0;
+static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
+{
+ unsigned int count = single ? 1 : card->dev->num_tx_queues;
+ int rc;
- for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
- channel->iob[cnt].data = kmalloc(QETH_BUFSIZE,
- GFP_KERNEL | GFP_DMA);
- if (channel->iob[cnt].data == NULL)
- break;
- channel->iob[cnt].state = BUF_STATE_FREE;
- channel->iob[cnt].channel = channel;
- channel->iob[cnt].callback = qeth_send_control_data_cb;
- }
- if (cnt < QETH_CMD_BUFFER_NO) {
- qeth_clean_channel(channel);
- return -ENOMEM;
- }
- channel->io_buf_no = 0;
- spin_lock_init(&channel->iob_lock);
+ rtnl_lock();
+ rc = netif_set_real_num_tx_queues(card->dev, count);
+ rtnl_unlock();
- return 0;
-}
+ if (rc)
+ return rc;
-static void qeth_set_single_write_queues(struct qeth_card *card)
-{
- if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
- (card->qdio.no_out_queues == 4))
- qeth_free_qdio_buffers(card);
+ if (card->qdio.no_out_queues == count)
+ return 0;
- card->qdio.no_out_queues = 1;
- if (card->qdio.default_out_queue != 0)
- dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
+ if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
+ qeth_free_qdio_queues(card);
- card->qdio.default_out_queue = 0;
-}
+ if (count == 1)
+ dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
-static void qeth_set_multiple_write_queues(struct qeth_card *card)
-{
- if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
- (card->qdio.no_out_queues == 1)) {
- qeth_free_qdio_buffers(card);
- card->qdio.default_out_queue = 2;
- }
- card->qdio.no_out_queues = 4;
+ card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
+ card->qdio.no_out_queues = count;
+ return 0;
}
-static void qeth_update_from_chp_desc(struct qeth_card *card)
+static int qeth_update_from_chp_desc(struct qeth_card *card)
{
struct ccw_device *ccwdev;
struct channel_path_desc_fmt0 *chp_dsc;
+ int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "chp_desc");
+ QETH_CARD_TEXT(card, 2, "chp_desc");
ccwdev = card->data.ccwdev;
chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
if (!chp_dsc)
- goto out;
+ return -ENOMEM;
card->info.func_level = 0x4100 + chp_dsc->desc;
- if (card->info.type == QETH_CARD_TYPE_IQD)
- goto out;
- /* CHPP field bit 6 == 1 -> single queue */
- if ((chp_dsc->chpp & 0x02) == 0x02)
- qeth_set_single_write_queues(card);
- else
- qeth_set_multiple_write_queues(card);
-out:
+ if (IS_OSD(card) || IS_OSX(card))
+ /* CHPP field bit 6 == 1 -> single queue */
+ rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
+
kfree(chp_dsc);
- QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
- QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
+ QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
+ QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
+ return rc;
}
static void qeth_init_qdio_info(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 4, "intqdinf");
+ QETH_CARD_TEXT(card, 4, "intqdinf");
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- card->qdio.no_out_queues = QETH_MAX_QUEUES;
/* inbound */
card->qdio.no_in_queues = 1;
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
- if (card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(card))
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
else
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
@@ -1404,14 +1307,11 @@ static void qeth_start_kernel_thread(struct work_struct *work)
static void qeth_buffer_reclaim_work(struct work_struct *);
static void qeth_setup_card(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 2, "setupcrd");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "setupcrd");
card->info.type = CARD_RDEV(card)->id.driver_info;
card->state = CARD_STATE_DOWN;
- spin_lock_init(&card->mclock);
spin_lock_init(&card->lock);
- spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
@@ -1451,24 +1351,23 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
CARD_WDEV(card) = gdev->cdev[1];
CARD_DDEV(card) = gdev->cdev[2];
- card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
+ card->event_wq = alloc_ordered_workqueue("%s_event", 0,
+ dev_name(&gdev->dev));
if (!card->event_wq)
goto out_wq;
- if (qeth_setup_channel(&card->read, true))
- goto out_ip;
- if (qeth_setup_channel(&card->write, true))
- goto out_channel;
- if (qeth_setup_channel(&card->data, false))
- goto out_data;
+
+ card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
+ if (!card->read_cmd)
+ goto out_read_cmd;
+
+ qeth_setup_channel(&card->read);
+ qeth_setup_channel(&card->write);
+ qeth_setup_channel(&card->data);
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
-out_data:
- qeth_clean_channel(&card->write);
-out_channel:
- qeth_clean_channel(&card->read);
-out_ip:
+out_read_cmd:
destroy_workqueue(card->event_wq);
out_wq:
dev_set_drvdata(&gdev->dev, NULL);
@@ -1571,7 +1470,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
- if (card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(card))
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_HALT);
else
@@ -1594,60 +1493,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
}
EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
-static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
- int *length)
-{
- struct ciw *ciw;
- char *rcd_buf;
- int ret;
- struct qeth_channel *channel = &card->data;
-
- /*
- * scan for RCD command in extended SenseID data
- */
- ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
- if (!ciw || ciw->cmd == 0)
- return -EOPNOTSUPP;
- rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
- if (!rcd_buf)
- return -ENOMEM;
-
- qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
- channel->state = CH_STATE_RCD;
- spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- QETH_RCD_PARM, LPM_ANYPATH, 0,
- QETH_RCD_TIMEOUT);
- spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
- if (!ret)
- wait_event(card->wait_q,
- (channel->state == CH_STATE_RCD_DONE ||
- channel->state == CH_STATE_DOWN));
- if (channel->state == CH_STATE_DOWN)
- ret = -EIO;
- else
- channel->state = CH_STATE_DOWN;
- if (ret) {
- kfree(rcd_buf);
- *buffer = NULL;
- *length = 0;
- } else {
- *length = ciw->count;
- *buffer = rcd_buf;
- }
- return ret;
-}
-
-static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
-{
- QETH_DBF_TEXT(SETUP, 2, "cfgunit");
- card->info.chpid = prcd[30];
- card->info.unit_addr2 = prcd[31];
- card->info.cula = prcd[63];
- card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
- (prcd[0x11] == _ascebc['M']));
-}
-
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
@@ -1657,7 +1502,7 @@ static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
char userid[80];
int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "vmlayer");
+ QETH_CARD_TEXT(card, 2, "vmlayer");
cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
if (rc)
@@ -1700,7 +1545,7 @@ out:
kfree(response);
kfree(request);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
+ QETH_CARD_TEXT_(card, 2, "err%x", rc);
return disc;
}
@@ -1709,34 +1554,31 @@ static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
- if (card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSN)
+ if (IS_OSM(card) || IS_OSN(card))
disc = QETH_DISCIPLINE_LAYER2;
- else if (card->info.guestlan)
- disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
- QETH_DISCIPLINE_LAYER3 :
- qeth_vm_detect_layer(card);
+ else if (IS_VM_NIC(card))
+ disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
+ qeth_vm_detect_layer(card);
switch (disc) {
case QETH_DISCIPLINE_LAYER2:
- QETH_DBF_TEXT(SETUP, 3, "force l2");
+ QETH_CARD_TEXT(card, 3, "force l2");
break;
case QETH_DISCIPLINE_LAYER3:
- QETH_DBF_TEXT(SETUP, 3, "force l3");
+ QETH_CARD_TEXT(card, 3, "force l3");
break;
default:
- QETH_DBF_TEXT(SETUP, 3, "force no");
+ QETH_CARD_TEXT(card, 3, "force no");
}
return disc;
}
-static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
+static void qeth_set_blkt_defaults(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
+ QETH_CARD_TEXT(card, 2, "cfgblkt");
- if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
- prcd[76] >= 0xF1 && prcd[76] <= 0xF4) {
+ if (card->info.use_v1_blkt) {
card->info.blkt.time_total = 0;
card->info.blkt.inter_packet = 0;
card->info.blkt.inter_packet_jumbo = 0;
@@ -1771,121 +1613,13 @@ static void qeth_init_func_level(struct qeth_card *card)
}
}
-static int qeth_idx_activate_get_answer(struct qeth_card *card,
- struct qeth_channel *channel,
- void (*reply_cb)(struct qeth_card *,
- struct qeth_channel *,
- struct qeth_cmd_buffer *))
-{
- struct qeth_cmd_buffer *iob;
- int rc;
-
- QETH_DBF_TEXT(SETUP, 2, "idxanswr");
- iob = qeth_get_buffer(channel);
- if (!iob)
- return -ENOMEM;
- iob->callback = reply_cb;
- qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
-
- wait_event(card->wait_q,
- atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
- QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
- spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, QETH_TIMEOUT);
- spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
-
- if (rc) {
- QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- atomic_set(&channel->irq_pending, 0);
- qeth_release_buffer(channel, iob);
- wake_up(&card->wait_q);
- return rc;
- }
- rc = wait_event_interruptible_timeout(card->wait_q,
- channel->state == CH_STATE_UP, QETH_TIMEOUT);
- if (rc == -ERESTARTSYS)
- return rc;
- if (channel->state != CH_STATE_UP) {
- rc = -ETIME;
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- } else
- rc = 0;
- return rc;
-}
-
-static int qeth_idx_activate_channel(struct qeth_card *card,
- struct qeth_channel *channel,
- void (*reply_cb)(struct qeth_card *,
- struct qeth_channel *,
- struct qeth_cmd_buffer *))
+static void qeth_idx_finalize_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
{
- struct qeth_cmd_buffer *iob;
- __u16 temp;
- __u8 tmp;
- int rc;
- struct ccw_dev_id temp_devid;
-
- QETH_DBF_TEXT(SETUP, 2, "idxactch");
-
- iob = qeth_get_buffer(channel);
- if (!iob)
- return -ENOMEM;
- iob->callback = reply_cb;
- qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
- iob->data);
- if (channel == &card->write) {
- memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+ memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
+ QETH_SEQ_NO_LENGTH);
+ if (iob->channel == &card->write)
card->seqno.trans_hdr++;
- } else {
- memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
- }
- tmp = ((u8)card->dev->dev_port) | 0x80;
- memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
- memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
- &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
- memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
- &card->info.func_level, sizeof(__u16));
- ccw_device_get_id(CARD_DDEV(card), &temp_devid);
- memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
- temp = (card->info.cula << 8) + card->info.unit_addr2;
- memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
-
- wait_event(card->wait_q,
- atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
- QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
- spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, QETH_TIMEOUT);
- spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
-
- if (rc) {
- QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
- rc);
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- atomic_set(&channel->irq_pending, 0);
- qeth_release_buffer(channel, iob);
- wake_up(&card->wait_q);
- return rc;
- }
- rc = wait_event_interruptible_timeout(card->wait_q,
- channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
- if (rc == -ERESTARTSYS)
- return rc;
- if (channel->state != CH_STATE_ACTIVATING) {
- dev_warn(&channel->ccwdev->dev, "The qeth device driver"
- " failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
- CCW_DEVID(channel->ccwdev));
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
- return -ETIME;
- }
- return qeth_idx_activate_get_answer(card, channel, reply_cb);
}
static int qeth_peer_func_level(int level)
@@ -1897,117 +1631,41 @@ static int qeth_peer_func_level(int level)
return level;
}
-static void qeth_idx_write_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
+static void qeth_mpc_finalize_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
{
- __u16 temp;
+ qeth_idx_finalize_cmd(card, iob);
- QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
-
- if (channel->state == CH_STATE_DOWN) {
- channel->state = CH_STATE_ACTIVATING;
- goto out;
- }
+ memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
+ &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
+ card->seqno.pdu_hdr++;
+ memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
+ &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
- if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
- dev_err(&channel->ccwdev->dev,
- "The adapter is used exclusively by another "
- "host\n");
- else
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
- CCW_DEVID(channel->ccwdev));
- goto out;
- }
- memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
- if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
- CCW_DEVID(channel->ccwdev),
- card->info.func_level, temp);
- goto out;
- }
- channel->state = CH_STATE_UP;
-out:
- qeth_release_buffer(channel, iob);
+ iob->reply->seqno = QETH_IDX_COMMAND_SEQNO;
+ iob->callback = qeth_release_buffer_cb;
}
-static void qeth_idx_read_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
+static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
+ void *data,
+ unsigned int data_length)
{
- __u16 temp;
-
- QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
- if (channel->state == CH_STATE_DOWN) {
- channel->state = CH_STATE_ACTIVATING;
- goto out;
- }
-
- if (qeth_check_idx_response(card, iob->data))
- goto out;
-
- if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
- case QETH_IDX_ACT_ERR_EXCL:
- dev_err(&channel->ccwdev->dev,
- "The adapter is used exclusively by another "
- "host\n");
- break;
- case QETH_IDX_ACT_ERR_AUTH:
- case QETH_IDX_ACT_ERR_AUTH_USER:
- dev_err(&channel->ccwdev->dev,
- "Setting the device online failed because of "
- "insufficient authorization\n");
- break;
- default:
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
- CCW_DEVID(channel->ccwdev));
- }
- QETH_CARD_TEXT_(card, 2, "idxread%c",
- QETH_IDX_ACT_CAUSE_CODE(iob->data));
- goto out;
- }
-
- memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
- if (temp != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
- CCW_DEVID(channel->ccwdev),
- card->info.func_level, temp);
- goto out;
- }
- memcpy(&card->token.issuer_rm_r,
- QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
- QETH_MPC_TOKEN_LENGTH);
- memcpy(&card->info.mcl_level[0],
- QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
- channel->state = CH_STATE_UP;
-out:
- qeth_release_buffer(channel, iob);
-}
+ struct qeth_cmd_buffer *iob;
-void qeth_prepare_control_data(struct qeth_card *card, int len,
- struct qeth_cmd_buffer *iob)
-{
- qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
- iob->callback = qeth_release_buffer_cb;
+ iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
+ if (!iob)
+ return NULL;
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
- card->seqno.trans_hdr++;
- memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
- &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
- card->seqno.pdu_hdr++;
- memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
- &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
- QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
+ memcpy(iob->data, data, data_length);
+ qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
+ iob->data);
+ iob->finalize = qeth_mpc_finalize_cmd;
+ return iob;
}
-EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
/**
* qeth_send_control_data() - send control command to the card
* @card: qeth_card structure pointer
- * @len: size of the command buffer
* @iob: qeth_cmd_buffer pointer
* @reply_cb: callback function pointer
* @cb_card: pointer to the qeth_card structure
@@ -2027,7 +1685,7 @@ EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
* field 'param' of the structure qeth_reply.
*/
-static int qeth_send_control_data(struct qeth_card *card, int len,
+static int qeth_send_control_data(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
int (*reply_cb)(struct qeth_card *cb_card,
struct qeth_reply *cb_reply,
@@ -2035,50 +1693,43 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
void *reply_param)
{
struct qeth_channel *channel = iob->channel;
+ long timeout = iob->timeout;
int rc;
struct qeth_reply *reply = NULL;
- unsigned long timeout, event_timeout;
- struct qeth_ipa_cmd *cmd = NULL;
QETH_CARD_TEXT(card, 2, "sendctl");
- if (card->read_or_write_problem) {
- qeth_release_buffer(channel, iob);
- return -EIO;
- }
reply = qeth_alloc_reply(card);
if (!reply) {
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
return -ENOMEM;
}
reply->callback = reply_cb;
reply->param = reply_param;
- /* pairs with qeth_release_buffer(): */
+ /* pairs with qeth_put_cmd(): */
qeth_get_reply(reply);
iob->reply = reply;
- while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
-
- if (IS_IPA(iob->data)) {
- cmd = __ipa_cmd(iob);
- cmd->hdr.seqno = card->seqno.ipa++;
- reply->seqno = cmd->hdr.seqno;
- event_timeout = QETH_IPA_TIMEOUT;
- } else {
- reply->seqno = QETH_IDX_COMMAND_SEQNO;
- event_timeout = QETH_TIMEOUT;
+ timeout = wait_event_interruptible_timeout(card->wait_q,
+ qeth_trylock_channel(channel),
+ timeout);
+ if (timeout <= 0) {
+ qeth_put_reply(reply);
+ qeth_put_cmd(iob);
+ return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
}
- qeth_prepare_control_data(card, len, iob);
- qeth_enqueue_reply(card, reply);
+ if (iob->finalize)
+ iob->finalize(card, iob);
+ QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
- timeout = jiffies + event_timeout;
+ qeth_enqueue_reply(card, reply);
QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, event_timeout);
+ rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
+ (addr_t) iob, 0, 0, timeout);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
@@ -2086,36 +1737,227 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
QETH_CARD_TEXT_(card, 2, " err%d", rc);
qeth_dequeue_reply(card, reply);
qeth_put_reply(reply);
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
return rc;
}
- /* we have only one long running ipassist, since we can ensure
- process context of this command we can sleep */
- if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
- cmd->hdr.prot_version == QETH_PROT_IPV4) {
- if (!wait_event_timeout(reply->wait_q,
- atomic_read(&reply->received), event_timeout))
- goto time_err;
- } else {
- while (!atomic_read(&reply->received)) {
- if (time_after(jiffies, timeout))
- goto time_err;
- cpu_relax();
- }
- }
+ timeout = wait_for_completion_interruptible_timeout(&reply->received,
+ timeout);
+ if (timeout <= 0)
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
qeth_dequeue_reply(card, reply);
- rc = reply->rc;
+ if (!rc)
+ rc = reply->rc;
qeth_put_reply(reply);
return rc;
+}
-time_err:
- qeth_dequeue_reply(card, reply);
- qeth_put_reply(reply);
- return -ETIME;
+static void qeth_read_conf_data_cb(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
+{
+ unsigned char *prcd = iob->data;
+
+ QETH_CARD_TEXT(card, 2, "cfgunit");
+ card->info.chpid = prcd[30];
+ card->info.unit_addr2 = prcd[31];
+ card->info.cula = prcd[63];
+ card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) &&
+ (prcd[0x11] == _ascebc['M']));
+ card->info.use_v1_blkt = prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
+ prcd[76] >= 0xF1 && prcd[76] <= 0xF4;
+
+ qeth_notify_reply(iob->reply, 0);
+ qeth_put_cmd(iob);
+}
+
+static int qeth_read_conf_data(struct qeth_card *card)
+{
+ struct qeth_channel *channel = &card->data;
+ struct qeth_cmd_buffer *iob;
+ struct ciw *ciw;
+
+ /* scan for RCD command in extended SenseID data */
+ ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
+ if (!ciw || ciw->cmd == 0)
+ return -EOPNOTSUPP;
+
+ iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
+ if (!iob)
+ return -ENOMEM;
+
+ iob->callback = qeth_read_conf_data_cb;
+ qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
+ iob->data);
+
+ return qeth_send_control_data(card, iob, NULL, NULL);
+}
+
+static int qeth_idx_check_activate_response(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ int rc;
+
+ rc = qeth_check_idx_response(card, iob->data);
+ if (rc)
+ return rc;
+
+ if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
+ return 0;
+
+ /* negative reply: */
+ QETH_CARD_TEXT_(card, 2, "idxneg%c",
+ QETH_IDX_ACT_CAUSE_CODE(iob->data));
+
+ switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
+ case QETH_IDX_ACT_ERR_EXCL:
+ dev_err(&channel->ccwdev->dev,
+ "The adapter is used exclusively by another host\n");
+ return -EBUSY;
+ case QETH_IDX_ACT_ERR_AUTH:
+ case QETH_IDX_ACT_ERR_AUTH_USER:
+ dev_err(&channel->ccwdev->dev,
+ "Setting the device online failed because of insufficient authorization\n");
+ return -EPERM;
+ default:
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+ CCW_DEVID(channel->ccwdev));
+ return -EIO;
+ }
+}
+
+static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
+{
+ struct qeth_channel *channel = iob->channel;
+ u16 peer_level;
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "idxrdcb");
+
+ rc = qeth_idx_check_activate_response(card, channel, iob);
+ if (rc)
+ goto out;
+
+ memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+ if (peer_level != qeth_peer_func_level(card->info.func_level)) {
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, peer_level);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&card->token.issuer_rm_r,
+ QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+ QETH_MPC_TOKEN_LENGTH);
+ memcpy(&card->info.mcl_level[0],
+ QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
+
+out:
+ qeth_notify_reply(iob->reply, rc);
+ qeth_put_cmd(iob);
+}
+
+static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
+{
+ struct qeth_channel *channel = iob->channel;
+ u16 peer_level;
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "idxwrcb");
+
+ rc = qeth_idx_check_activate_response(card, channel, iob);
+ if (rc)
+ goto out;
+
+ memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+ if ((peer_level & ~0x0100) !=
+ qeth_peer_func_level(card->info.func_level)) {
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, peer_level);
+ rc = -EINVAL;
+ }
+
+out:
+ qeth_notify_reply(iob->reply, rc);
+ qeth_put_cmd(iob);
+}
+
+static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
+{
+ u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
+ u8 port = ((u8)card->dev->dev_port) | 0x80;
+ struct ccw1 *ccw = __ccw_from_cmd(iob);
+ struct ccw_dev_id dev_id;
+
+ qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
+ iob->data);
+ qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
+ ccw_device_get_id(CARD_DDEV(card), &dev_id);
+ iob->finalize = qeth_idx_finalize_cmd;
+
+ memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
+ memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+ &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
+ &card->info.func_level, 2);
+ memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
+ memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
+}
+
+static int qeth_idx_activate_read_channel(struct qeth_card *card)
+{
+ struct qeth_channel *channel = &card->read;
+ struct qeth_cmd_buffer *iob;
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "idxread");
+
+ iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
+ if (!iob)
+ return -ENOMEM;
+
+ memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
+ qeth_idx_setup_activate_cmd(card, iob);
+ iob->callback = qeth_idx_activate_read_channel_cb;
+
+ rc = qeth_send_control_data(card, iob, NULL, NULL);
+ if (rc)
+ return rc;
+
+ channel->state = CH_STATE_UP;
+ return 0;
+}
+
+static int qeth_idx_activate_write_channel(struct qeth_card *card)
+{
+ struct qeth_channel *channel = &card->write;
+ struct qeth_cmd_buffer *iob;
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "idxwrite");
+
+ iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
+ if (!iob)
+ return -ENOMEM;
+
+ memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
+ qeth_idx_setup_activate_cmd(card, iob);
+ iob->callback = qeth_idx_activate_write_channel_cb;
+
+ rc = qeth_send_control_data(card, iob, NULL, NULL);
+ if (rc)
+ return rc;
+
+ channel->state = CH_STATE_UP;
+ return 0;
}
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -2123,7 +1965,7 @@ static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
{
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
+ QETH_CARD_TEXT(card, 2, "cmenblcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.cm_filter_r,
@@ -2134,21 +1976,20 @@ static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
static int qeth_cm_enable(struct qeth_card *card)
{
- int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "cmenable");
+ QETH_CARD_TEXT(card, 2, "cmenable");
+
+ iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
+ if (!iob)
+ return -ENOMEM;
- iob = qeth_wait_for_buffer(&card->write);
- memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
&card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
- rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
- qeth_cm_enable_cb, NULL);
- return rc;
+ return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
}
static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -2156,7 +1997,7 @@ static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
{
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
+ QETH_CARD_TEXT(card, 2, "cmsetpcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.cm_connection_r,
@@ -2167,22 +2008,21 @@ static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
static int qeth_cm_setup(struct qeth_card *card)
{
- int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "cmsetup");
+ QETH_CARD_TEXT(card, 2, "cmsetup");
+
+ iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
+ if (!iob)
+ return -ENOMEM;
- iob = qeth_wait_for_buffer(&card->write);
- memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
&card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
&card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
- rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
- qeth_cm_setup_cb, NULL);
- return rc;
+ return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
}
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
@@ -2206,7 +2046,7 @@ static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
/* adjust RX buffer size to new max MTU: */
card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
if (dev->max_mtu && dev->max_mtu != max_mtu)
- qeth_free_qdio_buffers(card);
+ qeth_free_qdio_queues(card);
} else {
if (dev->mtu)
new_mtu = dev->mtu;
@@ -2247,13 +2087,13 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
__u8 link_type;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
+ QETH_CARD_TEXT(card, 2, "ulpenacb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.ulp_filter_r,
QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
- if (card->info.type == QETH_CARD_TYPE_IQD) {
+ if (IS_IQD(card)) {
memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
mtu = qeth_get_mtu_outof_framesize(framesize);
} else {
@@ -2268,7 +2108,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
card->info.link_type = link_type;
} else
card->info.link_type = 0;
- QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
+ QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
return 0;
}
@@ -2286,11 +2126,11 @@ static int qeth_ulp_enable(struct qeth_card *card)
u16 max_mtu;
int rc;
- /*FIXME: trace view callbacks*/
- QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
+ QETH_CARD_TEXT(card, 2, "ulpenabl");
- iob = qeth_wait_for_buffer(&card->write);
- memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
+ iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
+ if (!iob)
+ return -ENOMEM;
*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
@@ -2298,8 +2138,7 @@ static int qeth_ulp_enable(struct qeth_card *card)
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
&card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
- rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
- qeth_ulp_enable_cb, &max_mtu);
+ rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
if (rc)
return rc;
return qeth_update_max_mtu(card, max_mtu);
@@ -2310,7 +2149,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
{
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
+ QETH_CARD_TEXT(card, 2, "ulpstpcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.ulp_connection_r,
@@ -2318,7 +2157,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
QETH_MPC_TOKEN_LENGTH);
if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
3)) {
- QETH_DBF_TEXT(SETUP, 2, "olmlimit");
+ QETH_CARD_TEXT(card, 2, "olmlimit");
dev_err(&card->gdev->dev, "A connection could not be "
"established because of an OLM limit\n");
return -EMLINK;
@@ -2328,15 +2167,15 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
static int qeth_ulp_setup(struct qeth_card *card)
{
- int rc;
__u16 temp;
struct qeth_cmd_buffer *iob;
struct ccw_dev_id dev_id;
- QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
+ QETH_CARD_TEXT(card, 2, "ulpsetup");
- iob = qeth_wait_for_buffer(&card->write);
- memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
+ iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
+ if (!iob)
+ return -ENOMEM;
memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
@@ -2349,9 +2188,7 @@ static int qeth_ulp_setup(struct qeth_card *card)
memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
temp = (card->info.cula << 8) + card->info.unit_addr2;
memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
- rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
- qeth_ulp_setup_cb, NULL);
- return rc;
+ return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
}
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
@@ -2377,12 +2214,12 @@ static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
if (!q)
return;
- qeth_clear_outq_buffers(q, 1);
+ qeth_drain_output_queue(q, true);
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
kfree(q);
}
-static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
+static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
{
struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
@@ -2396,17 +2233,17 @@ static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
return q;
}
-static int qeth_alloc_qdio_buffers(struct qeth_card *card)
+static int qeth_alloc_qdio_queues(struct qeth_card *card)
{
int i, j;
- QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
+ QETH_CARD_TEXT(card, 2, "allcqdbf");
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
- QETH_DBF_TEXT(SETUP, 2, "inq");
+ QETH_CARD_TEXT(card, 2, "inq");
card->qdio.in_q = qeth_alloc_qdio_queue();
if (!card->qdio.in_q)
goto out_nomem;
@@ -2417,11 +2254,12 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
/* outbound */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
+ card->qdio.out_qs[i] = qeth_alloc_output_queue();
if (!card->qdio.out_qs[i])
goto out_freeoutq;
- QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
- QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
+ QETH_CARD_TEXT_(card, 2, "outq %i", i);
+ QETH_CARD_HEX(card, 2, &card->qdio.out_qs[i], sizeof(void *));
+ card->qdio.out_qs[i]->card = card;
card->qdio.out_qs[i]->queue_no = i;
/* give outbound qeth_qdio_buffers their qdio_buffers */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
@@ -2458,7 +2296,7 @@ out_nomem:
return -ENOMEM;
}
-static void qeth_free_qdio_buffers(struct qeth_card *card)
+static void qeth_free_qdio_queues(struct qeth_card *card)
{
int i, j;
@@ -2511,84 +2349,83 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
static int qeth_qdio_activate(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 3, "qdioact");
+ QETH_CARD_TEXT(card, 3, "qdioact");
return qdio_activate(CARD_DDEV(card));
}
static int qeth_dm_act(struct qeth_card *card)
{
- int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "dmact");
+ QETH_CARD_TEXT(card, 2, "dmact");
- iob = qeth_wait_for_buffer(&card->write);
- memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
+ iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
+ if (!iob)
+ return -ENOMEM;
memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
- rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
- return rc;
+ return qeth_send_control_data(card, iob, NULL, NULL);
}
static int qeth_mpc_initialize(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(SETUP, 2, "mpcinit");
+ QETH_CARD_TEXT(card, 2, "mpcinit");
rc = qeth_issue_next_read(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
return rc;
}
rc = qeth_cm_enable(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "2err%d", rc);
goto out_qdio;
}
rc = qeth_cm_setup(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
goto out_qdio;
}
rc = qeth_ulp_enable(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "4err%d", rc);
goto out_qdio;
}
rc = qeth_ulp_setup(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
goto out_qdio;
}
- rc = qeth_alloc_qdio_buffers(card);
+ rc = qeth_alloc_qdio_queues(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
goto out_qdio;
}
rc = qeth_qdio_establish(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
- qeth_free_qdio_buffers(card);
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
+ qeth_free_qdio_queues(card);
goto out_qdio;
}
rc = qeth_qdio_activate(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "7err%d", rc);
goto out_qdio;
}
rc = qeth_dm_act(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "8err%d", rc);
goto out_qdio;
}
return 0;
out_qdio:
- qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+ qeth_qdio_clear_card(card, !IS_IQD(card));
qdio_free(CARD_DDEV(card));
return rc;
}
@@ -2611,8 +2448,7 @@ void qeth_print_status_message(struct qeth_card *card)
}
/* fallthrough */
case QETH_CARD_TYPE_IQD:
- if ((card->info.guestlan) ||
- (card->info.mcl_level[0] & 0x80)) {
+ if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
card->info.mcl_level[0] = (char) _ebcasc[(__u8)
card->info.mcl_level[0]];
card->info.mcl_level[1] = (char) _ebcasc[(__u8)
@@ -2733,10 +2569,10 @@ static int qeth_init_input_buffer(struct qeth_card *card,
int qeth_init_qdio_queues(struct qeth_card *card)
{
- int i, j;
+ unsigned int i;
int rc;
- QETH_DBF_TEXT(SETUP, 2, "initqdqs");
+ QETH_CARD_TEXT(card, 2, "initqdqs");
/* inbound queue */
qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
@@ -2750,7 +2586,7 @@ int qeth_init_qdio_queues(struct qeth_card *card)
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
card->qdio.in_buf_pool.buf_count - 1);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
return rc;
}
@@ -2762,54 +2598,39 @@ int qeth_init_qdio_queues(struct qeth_card *card)
/* outbound queue */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs,
- QDIO_MAX_BUFFERS_PER_Q);
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
- qeth_clear_output_buffer(card->qdio.out_qs[i],
- card->qdio.out_qs[i]->bufs[j]);
- }
- card->qdio.out_qs[i]->card = card;
- card->qdio.out_qs[i]->next_buf_to_fill = 0;
- card->qdio.out_qs[i]->do_pack = 0;
- atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
- atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
- atomic_set(&card->qdio.out_qs[i]->state,
- QETH_OUT_Q_UNLOCKED);
+ struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
+
+ qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+ queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+ queue->next_buf_to_fill = 0;
+ queue->do_pack = 0;
+ atomic_set(&queue->used_buffers, 0);
+ atomic_set(&queue->set_pci_flags_count, 0);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
}
return 0;
}
EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
-static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
+static void qeth_ipa_finalize_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
{
- switch (link_type) {
- case QETH_LINK_TYPE_HSTR:
- return 2;
- default:
- return 1;
- }
-}
+ qeth_mpc_finalize_cmd(card, iob);
-static void qeth_fill_ipacmd_header(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd,
- enum qeth_ipa_cmds command,
- enum qeth_prot_versions prot)
-{
- cmd->hdr.command = command;
- cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
- /* cmd->hdr.seqno is set by qeth_send_control_data() */
- cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
- cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
- cmd->hdr.prim_version_no = IS_LAYER2(card) ? 2 : 1;
- cmd->hdr.param_count = 1;
- cmd->hdr.prot_version = prot;
+ /* override with IPA-specific values: */
+ __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa;
+ iob->reply->seqno = card->seqno.ipa++;
}
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
u16 cmd_length)
{
- u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length;
u8 prot_type = qeth_mpc_select_prot_type(card);
+ u16 total_length = iob->length;
+
+ qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
+ iob->data);
+ iob->finalize = qeth_ipa_finalize_cmd;
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
@@ -2822,25 +2643,35 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
-struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
- enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
+struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
+ enum qeth_ipa_cmds cmd_code,
+ enum qeth_prot_versions prot,
+ unsigned int data_length)
{
+ enum qeth_link_types link_type = card->info.link_type;
struct qeth_cmd_buffer *iob;
+ struct qeth_ipacmd_hdr *hdr;
- iob = qeth_get_buffer(&card->write);
- if (iob) {
- qeth_prepare_ipa_cmd(card, iob, sizeof(struct qeth_ipa_cmd));
- qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
- } else {
- dev_warn(&card->gdev->dev,
- "The qeth driver ran out of channel command buffers\n");
- QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
- CARD_DEVID(card));
- }
+ data_length += offsetof(struct qeth_ipa_cmd, data);
+ iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
+ QETH_IPA_TIMEOUT);
+ if (!iob)
+ return NULL;
+ qeth_prepare_ipa_cmd(card, iob, data_length);
+
+ hdr = &__ipa_cmd(iob)->hdr;
+ hdr->command = cmd_code;
+ hdr->initiator = IPA_CMD_INITIATOR_HOST;
+ /* hdr->seqno is set by qeth_send_control_data() */
+ hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1;
+ hdr->rel_adapter_no = (u8) card->dev->dev_port;
+ hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
+ hdr->param_count = 1;
+ hdr->prot_version = prot;
return iob;
}
-EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
+EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -2861,15 +2692,18 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
unsigned long),
void *reply_param)
{
- u16 length;
int rc;
QETH_CARD_TEXT(card, 4, "sendipa");
+ if (card->read_or_write_problem) {
+ qeth_put_cmd(iob);
+ return -EIO;
+ }
+
if (reply_cb == NULL)
reply_cb = qeth_send_ipa_cmd_cb;
- memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2);
- rc = qeth_send_control_data(card, length, iob, reply_cb, reply_param);
+ rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
if (rc == -ETIME) {
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
@@ -2893,9 +2727,9 @@ static int qeth_send_startlan(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "strtlan");
+ QETH_CARD_TEXT(card, 2, "strtlan");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
@@ -2921,7 +2755,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
card->info.link_type =
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
- QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
+ QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
}
card->options.adp.supported_funcs =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
@@ -2929,21 +2763,24 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
}
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
- __u32 command, __u32 cmdlen)
+ enum qeth_ipa_setadp_cmd adp_cmd,
+ unsigned int data_length)
{
+ struct qeth_ipacmd_setadpparms_hdr *hdr;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
- QETH_PROT_IPV4);
- if (iob) {
- cmd = __ipa_cmd(iob);
- cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
- cmd->data.setadapterparms.hdr.command_code = command;
- cmd->data.setadapterparms.hdr.used_total = 1;
- cmd->data.setadapterparms.hdr.seq_no = 1;
- }
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
+ data_length +
+ offsetof(struct qeth_ipacmd_setadpparms,
+ data));
+ if (!iob)
+ return NULL;
+ hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
+ hdr->cmdlength = sizeof(*hdr) + data_length;
+ hdr->command_code = adp_cmd;
+ hdr->used_total = 1;
+ hdr->seq_no = 1;
return iob;
}
@@ -2954,7 +2791,7 @@ static int qeth_query_setadapterparms(struct qeth_card *card)
QETH_CARD_TEXT(card, 3, "queryadp");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
- sizeof(struct qeth_ipacmd_setadpparms));
+ SETADP_DATA_SIZEOF(query_cmds_supp));
if (!iob)
return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
@@ -2966,7 +2803,7 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "qipasscb");
+ QETH_CARD_TEXT(card, 2, "qipasscb");
cmd = (struct qeth_ipa_cmd *) data;
@@ -2975,7 +2812,7 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
break;
case IPA_RC_NOTSUPP:
case IPA_RC_L2_UNSUPPORTED_CMD:
- QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
+ QETH_CARD_TEXT(card, 2, "ipaunsup");
card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
return -EOPNOTSUPP;
@@ -3003,8 +2840,8 @@ static int qeth_query_ipassists(struct qeth_card *card,
int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
+ QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
if (!iob)
return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
@@ -3041,14 +2878,32 @@ int qeth_query_switch_attributes(struct qeth_card *card,
return -EOPNOTSUPP;
if (!netif_carrier_ok(card->dev))
return -ENOMEDIUM;
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
- sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob,
qeth_query_switch_attributes_cb, sw_info);
}
+struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
+ enum qeth_diags_cmds sub_cmd,
+ unsigned int data_length)
+{
+ struct qeth_ipacmd_diagass *cmd;
+ struct qeth_cmd_buffer *iob;
+
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
+ DIAG_HDR_LEN + data_length);
+ if (!iob)
+ return NULL;
+
+ cmd = &__ipa_cmd(iob)->data.diagass;
+ cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
+ cmd->subcmd = sub_cmd;
+ return iob;
+}
+EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
+
static int qeth_query_setdiagass_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@@ -3067,15 +2922,11 @@ static int qeth_query_setdiagass_cb(struct qeth_card *card,
static int qeth_query_setdiagass(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "qdiagass");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ QETH_CARD_TEXT(card, 2, "qdiagass");
+ iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
if (!iob)
return -ENOMEM;
- cmd = __ipa_cmd(iob);
- cmd->data.diagass.subcmd_len = 16;
- cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
}
@@ -3122,13 +2973,11 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "diagtrap");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ QETH_CARD_TEXT(card, 2, "diagtrap");
+ iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
- cmd->data.diagass.subcmd_len = 80;
- cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
cmd->data.diagass.type = 1;
cmd->data.diagass.action = action;
switch (action) {
@@ -3251,13 +3100,6 @@ static void qeth_handle_send_error(struct qeth_card *card,
int sbalf15 = buffer->buffer->element[15].sflags;
QETH_CARD_TEXT(card, 6, "hdsnderr");
- if (card->info.type == QETH_CARD_TYPE_IQD) {
- if (sbalf15 == 0) {
- qdio_err = 0;
- } else {
- qdio_err = 1;
- }
- }
qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
if (!qdio_err)
@@ -3348,7 +3190,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (queue->bufstates)
queue->bufstates[bidx].user = buf;
- if (queue->card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(queue->card))
continue;
if (!queue->do_pack) {
@@ -3378,11 +3220,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
QETH_TXQ_STAT_ADD(queue, bufs, count);
- netif_trans_update(queue->card->dev);
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
- atomic_add(count, &queue->used_buffers);
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
if (rc) {
@@ -3422,7 +3262,6 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
* do_send_packet. So, we check if there is a
* packing buffer to be flushed here.
*/
- netif_stop_queue(queue->card->dev);
index = queue->next_buf_to_fill;
q_was_packing = queue->do_pack;
/* queue->do_pack may change */
@@ -3467,7 +3306,7 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
goto out;
}
- qeth_free_qdio_buffers(card);
+ qeth_free_qdio_queues(card);
card->options.cq = cq;
rc = 0;
}
@@ -3493,7 +3332,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
if (qdio_err) {
- netif_stop_queue(card->dev);
+ netif_tx_stop_all_queues(card->dev);
qeth_schedule_recovery(card);
return;
}
@@ -3549,12 +3388,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
struct qeth_qdio_out_buffer *buffer;
+ struct net_device *dev = card->dev;
+ struct netdev_queue *txq;
int i;
QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_FATAL) {
QETH_CARD_TEXT(card, 2, "achkcond");
- netif_stop_queue(card->dev);
+ netif_tx_stop_all_queues(dev);
qeth_schedule_recovery(card);
return;
}
@@ -3580,7 +3421,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
/* prepare the queue slot for re-use: */
qeth_scrub_qdio_buffer(buffer->buffer,
- QETH_MAX_BUFFER_ELEMENTS(card));
+ queue->max_elements);
if (qeth_init_qdio_out_buf(queue, bidx)) {
QETH_CARD_TEXT(card, 2, "outofbuf");
qeth_schedule_recovery(card);
@@ -3600,33 +3441,32 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
atomic_sub(count, &queue->used_buffers);
/* check if we need to do something on this outbound queue */
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
qeth_check_outbound_queue(queue);
- netif_wake_queue(queue->card->dev);
-}
-
-/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
-static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
-{
- if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
- return 2;
- return queue_num;
+ if (IS_IQD(card))
+ __queue = qeth_iqd_translate_txq(dev, __queue);
+ txq = netdev_get_tx_queue(dev, __queue);
+ /* xmit may have observed the full-condition, but not yet stopped the
+ * txq. In which case the code below won't trigger. So before returning,
+ * xmit will re-check the txq's fill level and wake it up if needed.
+ */
+ if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
+ netif_tx_wake_queue(txq);
}
/**
* Note: Function assumes that we have 4 outbound queues.
*/
-int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
- int ipv)
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
{
- __be16 *tci;
+ struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
u8 tos;
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_TOS:
case QETH_PRIO_Q_ING_PREC:
- switch (ipv) {
+ switch (qeth_get_ip_version(skb)) {
case 4:
tos = ipv4_get_dsfield(ip_hdr(skb));
break;
@@ -3637,9 +3477,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
return card->qdio.default_out_queue;
}
if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
- return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
+ return ~tos >> 6 & 3;
if (tos & IPTOS_MINCOST)
- return qeth_cut_iqd_prio(card, 3);
+ return 3;
if (tos & IPTOS_RELIABILITY)
return 2;
if (tos & IPTOS_THROUGHPUT)
@@ -3650,12 +3490,11 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
case QETH_PRIO_Q_ING_SKB:
if (skb->priority > 5)
return 0;
- return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
+ return ~skb->priority >> 1 & 3;
case QETH_PRIO_Q_ING_VLAN:
- tci = &((struct ethhdr *)skb->data)->h_proto;
- if (be16_to_cpu(*tci) == ETH_P_8021Q)
- return qeth_cut_iqd_prio(card,
- ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
+ if (veth->h_vlan_proto == htons(ETH_P_8021Q))
+ return ~ntohs(veth->h_vlan_TCI) >>
+ (VLAN_PRIO_SHIFT + 1) & 3;
break;
default:
break;
@@ -3729,8 +3568,8 @@ static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
unsigned int hdr_len, unsigned int proto_len,
unsigned int *elements)
{
- const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card);
const unsigned int contiguous = proto_len ? proto_len : 1;
+ const unsigned int max_elements = queue->max_elements;
unsigned int __elements;
addr_t start, end;
bool push_ok;
@@ -3748,8 +3587,8 @@ check_layout:
__elements = 1 + qeth_count_elements(skb, proto_len);
else
__elements = qeth_count_elements(skb, 0);
- } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
- /* Push HW header into a new page. */
+ } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
+ /* Push HW header into preceding page, flush with skb->data. */
push_ok = true;
__elements = 1 + qeth_count_elements(skb, 0);
} else {
@@ -3803,18 +3642,16 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
int element = buf->next_element_to_fill;
int length = skb_headlen(skb) - offset;
char *data = skb->data + offset;
- int length_here, cnt;
+ unsigned int elem_length, cnt;
/* map linear part into buffer element(s) */
while (length > 0) {
- /* length_here is the remaining amount of data in this page */
- length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
- if (length < length_here)
- length_here = length;
+ elem_length = min_t(unsigned int, length,
+ PAGE_SIZE - offset_in_page(data));
buffer->element[element].addr = data;
- buffer->element[element].length = length_here;
- length -= length_here;
+ buffer->element[element].length = elem_length;
+ length -= elem_length;
if (is_first_elem) {
is_first_elem = false;
if (length || skb_is_nonlinear(skb))
@@ -3827,7 +3664,8 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
}
- data += length_here;
+
+ data += elem_length;
element++;
}
@@ -3838,17 +3676,16 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
data = skb_frag_address(frag);
length = skb_frag_size(frag);
while (length > 0) {
- length_here = PAGE_SIZE -
- ((unsigned long) data % PAGE_SIZE);
- if (length < length_here)
- length_here = length;
+ elem_length = min_t(unsigned int, length,
+ PAGE_SIZE - offset_in_page(data));
buffer->element[element].addr = data;
- buffer->element[element].length = length_here;
+ buffer->element[element].length = elem_length;
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
- length -= length_here;
- data += length_here;
+
+ length -= elem_length;
+ data += elem_length;
element++;
}
}
@@ -3867,11 +3704,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
* from qeth_core_header_cache.
* @offset: when mapping the skb, start at skb->data + offset
* @hd_len: if > 0, build a dedicated header element of this size
+ * flush: Prepare the buffer to be flushed, regardless of its fill level.
*/
static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len)
+ unsigned int offset, unsigned int hd_len,
+ bool flush)
{
struct qdio_buffer *buffer = buf->buffer;
bool is_first_elem = true;
@@ -3900,8 +3739,8 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
QETH_TXQ_STAT_INC(queue, skbs_pack);
/* If the buffer still has free elements, keep using it. */
- if (buf->next_element_to_fill <
- QETH_MAX_BUFFER_ELEMENTS(queue->card))
+ if (!flush &&
+ buf->next_element_to_fill < queue->max_elements)
return 0;
}
@@ -3918,15 +3757,31 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
{
int index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
+ struct netdev_queue *txq;
+ bool stopped = false;
- /*
- * check if buffer is empty to make sure that we do not 'overtake'
- * ourselves and try to fill a buffer that is already primed
+ /* Just a sanity check, the wake/stop logic should ensure that we always
+ * get a free buffer.
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
+
+ txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb));
+
+ if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
+ /* If a TX completion happens right _here_ and misses to wake
+ * the txq, then our re-check below will catch the race.
+ */
+ QETH_TXQ_STAT_INC(queue, stopped);
+ netif_tx_stop_queue(txq);
+ stopped = true;
+ }
+
+ qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped);
qeth_flush_buffers(queue, index, 1);
+
+ if (stopped && !qeth_out_queue_is_full(queue))
+ netif_tx_start_queue(txq);
return 0;
}
@@ -3936,6 +3791,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
int elements_needed)
{
struct qeth_qdio_out_buffer *buffer;
+ struct netdev_queue *txq;
+ bool stopped = false;
int start_index;
int flush_count = 0;
int do_pack = 0;
@@ -3947,21 +3804,24 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
start_index = queue->next_buf_to_fill;
buffer = queue->bufs[queue->next_buf_to_fill];
- /*
- * check if buffer is empty to make sure that we do not 'overtake'
- * ourselves and try to fill a buffer that is already primed
+
+ /* Just a sanity check, the wake/stop logic should ensure that we always
+ * get a free buffer.
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
+
+ txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
+
/* check if we need to switch packing state of this queue */
qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack) {
do_pack = 1;
/* does packet fit in current buffer? */
- if ((QETH_MAX_BUFFER_ELEMENTS(card) -
- buffer->next_element_to_fill) < elements_needed) {
+ if (buffer->next_element_to_fill + elements_needed >
+ queue->max_elements) {
/* ... no -> set state PRIMED */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
flush_count++;
@@ -3969,8 +3829,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
(queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
buffer = queue->bufs[queue->next_buf_to_fill];
- /* we did a step forward, so check buffer state
- * again */
+
+ /* We stepped forward, so sanity-check again: */
if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY) {
qeth_flush_buffers(queue, start_index,
@@ -3983,8 +3843,18 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
}
- flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset,
- hd_len);
+ if (buffer->next_element_to_fill == 0 &&
+ atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
+ /* If a TX completion happens right _here_ and misses to wake
+ * the txq, then our re-check below will catch the race.
+ */
+ QETH_TXQ_STAT_INC(queue, stopped);
+ netif_tx_stop_queue(txq);
+ stopped = true;
+ }
+
+ flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len,
+ stopped);
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
@@ -4015,6 +3885,8 @@ out:
if (do_pack)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
+ if (stopped && !qeth_out_queue_is_full(queue))
+ netif_tx_start_queue(txq);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet);
@@ -4036,11 +3908,10 @@ static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
}
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv, int cast_type,
+ struct qeth_qdio_out_q *queue, int ipv,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, int cast_type,
- unsigned int data_len))
+ int ipv, unsigned int data_len))
{
unsigned int proto_len, hw_hdr_len;
unsigned int frame_len = skb->len;
@@ -4074,7 +3945,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
data_offset = push_len + proto_len;
}
memset(hdr, 0, hw_hdr_len);
- fill_header(queue, hdr, skb, ipv, cast_type, frame_len);
+ fill_header(queue, hdr, skb, ipv, frame_len);
if (is_tso)
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb, proto_len);
@@ -4101,9 +3972,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
} else {
if (!push_len)
kmem_cache_free(qeth_core_header_cache, hdr);
- if (rc == -EBUSY)
- /* roll back to ETH header */
- skb_pull(skb, push_len);
}
return rc;
}
@@ -4146,7 +4014,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
- sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8);
+ SETADP_DATA_SIZEOF(mode));
if (!iob)
return;
cmd = __ipa_cmd(iob);
@@ -4186,8 +4054,7 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
QETH_CARD_TEXT(card, 4, "chgmac");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
- sizeof(struct qeth_ipacmd_setadpparms_hdr) +
- sizeof(struct qeth_change_addr));
+ SETADP_DATA_SIZEOF(change_addr));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -4214,10 +4081,8 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
qeth_setadpparms_inspect_rc(cmd);
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
- QETH_DBF_TEXT_(SETUP, 2, "setaccb");
- QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
- QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
- cmd->data.setadapterparms.hdr.return_code);
+ QETH_CARD_TEXT_(card, 2, "rc=%d",
+ cmd->data.setadapterparms.hdr.return_code);
if (cmd->data.setadapterparms.hdr.return_code !=
SET_ACCESS_CTRL_RC_SUCCESS)
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
@@ -4297,12 +4162,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setacctl");
- QETH_DBF_TEXT_(SETUP, 2, "setacctl");
- QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
-
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
- sizeof(struct qeth_ipacmd_setadpparms_hdr) +
- sizeof(struct qeth_set_access_ctrl));
+ SETADP_DATA_SIZEOF(set_access_ctrl));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -4311,7 +4172,7 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
&fallback);
- QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
+ QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
return rc;
}
@@ -4321,9 +4182,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
QETH_CARD_TEXT(card, 4, "setactlo");
- if ((card->info.type == QETH_CARD_TYPE_OSD ||
- card->info.type == QETH_CARD_TYPE_OSX) &&
- qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
+ if ((IS_OSD(card) || IS_OSX(card)) &&
+ qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation, fallback);
if (rc) {
@@ -4348,7 +4208,6 @@ void qeth_tx_timeout(struct net_device *dev)
card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "txtimeo");
- QETH_CARD_STAT_INC(card, tx_errors);
qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);
@@ -4460,18 +4319,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
return -ENOSPC;
}
QETH_CARD_TEXT_(card, 4, "snore%i",
- cmd->data.setadapterparms.hdr.used_total);
+ cmd->data.setadapterparms.hdr.used_total);
QETH_CARD_TEXT_(card, 4, "sseqn%i",
- cmd->data.setadapterparms.hdr.seq_no);
+ cmd->data.setadapterparms.hdr.seq_no);
/*copy entries to user buffer*/
memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
qinfo->udata_offset += data_len;
- /* check if all replies received ... */
- QETH_CARD_TEXT_(card, 4, "srtot%i",
- cmd->data.setadapterparms.hdr.used_total);
- QETH_CARD_TEXT_(card, 4, "srseq%i",
- cmd->data.setadapterparms.hdr.seq_no);
if (cmd->data.setadapterparms.hdr.seq_no <
cmd->data.setadapterparms.hdr.used_total)
return 1;
@@ -4480,54 +4334,43 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
{
+ struct qeth_snmp_ureq __user *ureq;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
- struct qeth_snmp_ureq *ureq;
unsigned int req_len;
struct qeth_arp_query_info qinfo = {0, };
int rc = 0;
QETH_CARD_TEXT(card, 3, "snmpcmd");
- if (card->info.guestlan)
+ if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
IS_LAYER3(card))
return -EOPNOTSUPP;
- /* skip 4 bytes (data_len struct member) to get req_len */
- if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
+ ureq = (struct qeth_snmp_ureq __user *) udata;
+ if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
+ get_user(req_len, &ureq->hdr.req_len))
+ return -EFAULT;
+
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
+ if (!iob)
+ return -ENOMEM;
+
+ if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
+ &ureq->cmd, req_len)) {
+ qeth_put_cmd(iob);
return -EFAULT;
- if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
- sizeof(struct qeth_ipacmd_hdr) -
- sizeof(struct qeth_ipacmd_setadpparms_hdr)))
- return -EINVAL;
- ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
- if (IS_ERR(ureq)) {
- QETH_CARD_TEXT(card, 2, "snmpnome");
- return PTR_ERR(ureq);
}
- qinfo.udata_len = ureq->hdr.data_len;
+
qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
if (!qinfo.udata) {
- kfree(ureq);
+ qeth_put_cmd(iob);
return -ENOMEM;
}
qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
- QETH_SNMP_SETADP_CMDLENGTH + req_len);
- if (!iob) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* for large requests, fix-up the length fields: */
- qeth_prepare_ipa_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len);
-
- cmd = __ipa_cmd(iob);
- memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
@@ -4536,8 +4379,7 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
}
-out:
- kfree(ureq);
+
kfree(qinfo.udata);
return rc;
}
@@ -4603,8 +4445,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
}
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
- sizeof(struct qeth_ipacmd_setadpparms_hdr) +
- sizeof(struct qeth_query_oat));
+ SETADP_DATA_SIZEOF(query_oat));
if (!iob) {
rc = -ENOMEM;
goto out_free;
@@ -4666,8 +4507,7 @@ int qeth_query_card_info(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "qcrdinfo");
if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
return -EOPNOTSUPP;
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
- sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
@@ -4689,7 +4529,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
struct ccw_dev_id id;
int rc;
- QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
+ QETH_CARD_TEXT(card, 2, "vmreqmac");
request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
@@ -4714,13 +4554,13 @@ int qeth_vm_request_mac(struct qeth_card *card)
if (request->resp_buf_len < sizeof(*response) ||
response->version != request->resp_version) {
rc = -EIO;
- QETH_DBF_TEXT(SETUP, 2, "badresp");
- QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len,
- sizeof(request->resp_buf_len));
+ QETH_CARD_TEXT(card, 2, "badresp");
+ QETH_CARD_HEX(card, 2, &request->resp_buf_len,
+ sizeof(request->resp_buf_len));
} else if (!is_valid_ether_addr(response->mac)) {
rc = -EINVAL;
- QETH_DBF_TEXT(SETUP, 2, "badmac");
- QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN);
+ QETH_CARD_TEXT(card, 2, "badmac");
+ QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
} else {
ether_addr_copy(card->dev->dev_addr, response->mac);
}
@@ -4732,54 +4572,40 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
-static int qeth_get_qdio_q_format(struct qeth_card *card)
-{
- if (card->info.type == QETH_CARD_TYPE_IQD)
- return QDIO_IQDIO_QFMT;
- else
- return QDIO_QETH_QFMT;
-}
-
static void qeth_determine_capabilities(struct qeth_card *card)
{
int rc;
- int length;
- char *prcd;
struct ccw_device *ddev;
int ddev_offline = 0;
- QETH_DBF_TEXT(SETUP, 2, "detcapab");
+ QETH_CARD_TEXT(card, 2, "detcapab");
ddev = CARD_DDEV(card);
if (!ddev->online) {
ddev_offline = 1;
rc = ccw_device_set_online(ddev);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
goto out;
}
}
- rc = qeth_read_conf_data(card, (void **) &prcd, &length);
+ rc = qeth_read_conf_data(card);
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
CARD_DEVID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
goto out_offline;
}
- qeth_configure_unitaddr(card, prcd);
- if (ddev_offline)
- qeth_configure_blkt_default(card, prcd);
- kfree(prcd);
rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
- QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
- QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1);
- QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2);
- QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3);
- QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
+ QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
+ QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
+ QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
+ QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
+ QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
@@ -4827,7 +4653,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
int i, j, k;
int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "qdioest");
+ QETH_CARD_TEXT(card, 2, "qdioest");
qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
GFP_KERNEL);
@@ -4878,7 +4704,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
memset(&init_data, 0, sizeof(struct qdio_initialize));
init_data.cdev = CARD_DDEV(card);
- init_data.q_format = qeth_get_qdio_q_format(card);
+ init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
+ QDIO_QETH_QFMT;
init_data.qib_param_field_format = 0;
init_data.qib_param_field = qib_param_field;
init_data.no_input_qs = card->qdio.no_in_queues;
@@ -4890,8 +4717,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.input_sbal_addr_array = in_sbal_ptrs;
init_data.output_sbal_addr_array = out_sbal_ptrs;
init_data.output_sbal_state_array = card->qdio.out_bufstates;
- init_data.scan_threshold =
- (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
+ init_data.scan_threshold = IS_IQD(card) ? 1 : 32;
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
@@ -4931,13 +4757,13 @@ out_free_nothing:
static void qeth_core_free_card(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 2, "freecrd");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "freecrd");
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
qeth_clean_channel(&card->data);
+ qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
- qeth_free_qdio_buffers(card);
+ qeth_free_qdio_queues(card);
unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card);
@@ -4984,14 +4810,16 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
int retries = 3;
int rc;
- QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
+ QETH_CARD_TEXT(card, 2, "hrdsetup");
atomic_set(&card->force_alloc_skb, 0);
- qeth_update_from_chp_desc(card);
+ rc = qeth_update_from_chp_desc(card);
+ if (rc)
+ return rc;
retry:
if (retries < 3)
QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
CARD_DEVID(card));
- rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+ rc = qeth_qdio_clear_card(card, !IS_IQD(card));
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
@@ -5007,10 +4835,10 @@ retry:
goto retriable;
retriable:
if (rc == -ERESTARTSYS) {
- QETH_DBF_TEXT(SETUP, 2, "break1");
+ QETH_CARD_TEXT(card, 2, "break1");
return rc;
} else if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
if (--retries < 0)
goto out;
else
@@ -5019,23 +4847,25 @@ retriable:
qeth_determine_capabilities(card);
qeth_init_tokens(card);
qeth_init_func_level(card);
- rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb);
- if (rc == -ERESTARTSYS) {
- QETH_DBF_TEXT(SETUP, 2, "break2");
+
+ rc = qeth_idx_activate_read_channel(card);
+ if (rc == -EINTR) {
+ QETH_CARD_TEXT(card, 2, "break2");
return rc;
} else if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
if (--retries < 0)
goto out;
else
goto retry;
}
- rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb);
- if (rc == -ERESTARTSYS) {
- QETH_DBF_TEXT(SETUP, 2, "break3");
+
+ rc = qeth_idx_activate_write_channel(card);
+ if (rc == -EINTR) {
+ QETH_CARD_TEXT(card, 2, "break3");
return rc;
} else if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "4err%d", rc);
if (--retries < 0)
goto out;
else
@@ -5044,13 +4874,13 @@ retriable:
card->read_or_write_problem = 0;
rc = qeth_mpc_initialize(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
goto out;
}
rc = qeth_send_startlan(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
if (rc == -ENETDOWN) {
dev_warn(&card->gdev->dev, "The LAN is offline\n");
*carrier_ok = false;
@@ -5077,14 +4907,14 @@ retriable:
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "7err%d", rc);
goto out;
}
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "8err%d", rc);
goto out;
}
}
@@ -5171,7 +5001,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
return NULL;
if (((skb_len >= card->options.rx_sg_cb) &&
- (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
+ !IS_OSN(card) &&
(!atomic_read(&card->force_alloc_skb))) ||
(card->options.cq == QETH_CQ_ENABLED))
use_rx_sg = 1;
@@ -5344,42 +5174,47 @@ EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
- __u16 cmd_code, __u16 len,
+ u16 cmd_code,
+ unsigned int data_length,
enum qeth_prot_versions prot)
{
+ struct qeth_ipacmd_setassparms *setassparms;
+ struct qeth_ipacmd_setassparms_hdr *hdr;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 4, "getasscm");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
+ data_length +
+ offsetof(struct qeth_ipacmd_setassparms,
+ data));
+ if (!iob)
+ return NULL;
- if (iob) {
- cmd = __ipa_cmd(iob);
- cmd->data.setassparms.hdr.assist_no = ipa_func;
- cmd->data.setassparms.hdr.length = 8 + len;
- cmd->data.setassparms.hdr.command_code = cmd_code;
- }
+ setassparms = &__ipa_cmd(iob)->data.setassparms;
+ setassparms->assist_no = ipa_func;
+ hdr = &setassparms->hdr;
+ hdr->length = sizeof(*hdr) + data_length;
+ hdr->command_code = cmd_code;
return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
- u16 cmd_code, long data,
+ u16 cmd_code, u32 *data,
enum qeth_prot_versions prot)
{
- int length = 0;
+ unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
- if (data)
- length = sizeof(__u32);
iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
if (!iob)
return -ENOMEM;
- __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
+ if (data)
+ __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
@@ -5562,13 +5397,17 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
- dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
+ dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
+ ether_setup, QETH_MAX_QUEUES, 1);
+ break;
+ case QETH_CARD_TYPE_OSM:
+ dev = alloc_etherdev(0);
break;
case QETH_CARD_TYPE_OSN:
dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
break;
default:
- dev = alloc_etherdev(0);
+ dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
}
if (!dev)
@@ -5590,8 +5429,14 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
- if (IS_IQD(card))
+ if (IS_IQD(card)) {
dev->features |= NETIF_F_SG;
+ if (netif_set_real_num_tx_queues(dev,
+ QETH_IQD_MIN_TXQ)) {
+ free_netdev(dev);
+ return NULL;
+ }
+ }
}
return dev;
@@ -5641,15 +5486,19 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
}
qeth_setup_card(card);
- qeth_update_from_chp_desc(card);
-
card->dev = qeth_alloc_netdev(card);
if (!card->dev) {
rc = -ENOMEM;
goto err_card;
}
+ card->qdio.no_out_queues = card->dev->num_tx_queues;
+ rc = qeth_update_from_chp_desc(card);
+ if (rc)
+ goto err_chp_desc;
qeth_determine_capabilities(card);
+ qeth_set_blkt_defaults(card);
+
enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) {
case QETH_DISCIPLINE_UNDETERMINED:
@@ -5661,9 +5510,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
if (rc)
goto err_load;
- gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
- ? card->discipline->devtype
- : &qeth_osn_devtype;
+ gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
+ card->discipline->devtype;
rc = card->discipline->setup(card->gdev);
if (rc)
goto err_disc;
@@ -5675,6 +5523,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
err_disc:
qeth_core_free_discipline(card);
err_load:
+err_chp_desc:
free_netdev(card->dev);
err_card:
qeth_core_free_card(card);
@@ -5687,7 +5536,7 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- QETH_DBF_TEXT(SETUP, 2, "removedv");
+ QETH_CARD_TEXT(card, 2, "removedv");
if (card->discipline) {
card->discipline->remove(gdev);
@@ -5706,10 +5555,8 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
enum qeth_discipline_id def_discipline;
if (!card->discipline) {
- if (card->info.type == QETH_CARD_TYPE_IQD)
- def_discipline = QETH_DISCIPLINE_LAYER3;
- else
- def_discipline = QETH_DISCIPLINE_LAYER2;
+ def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
+ QETH_DISCIPLINE_LAYER2;
rc = qeth_core_load_discipline(card, def_discipline);
if (rc)
goto err;
@@ -5737,32 +5584,34 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev)
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
- qeth_clear_qdio_buffers(card);
+ qeth_drain_output_queues(card);
qdio_free(CARD_DDEV(card));
}
-static int qeth_core_freeze(struct ccwgroup_device *gdev)
+static int qeth_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- if (card->discipline && card->discipline->freeze)
- return card->discipline->freeze(gdev);
- return 0;
-}
-static int qeth_core_thaw(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- if (card->discipline && card->discipline->thaw)
- return card->discipline->thaw(gdev);
+ qeth_set_allowed_threads(card, 0, 1);
+ wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+ if (gdev->state == CCWGROUP_OFFLINE)
+ return 0;
+
+ card->discipline->set_offline(gdev);
return 0;
}
-static int qeth_core_restore(struct ccwgroup_device *gdev)
+static int qeth_resume(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- if (card->discipline && card->discipline->restore)
- return card->discipline->restore(gdev);
- return 0;
+ int rc;
+
+ rc = card->discipline->set_online(gdev);
+
+ qeth_set_allowed_threads(card, 0xffffffff, 0);
+ if (rc)
+ dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n");
+ return rc;
}
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
@@ -5803,9 +5652,9 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.shutdown = qeth_core_shutdown,
.prepare = NULL,
.complete = NULL,
- .freeze = qeth_core_freeze,
- .thaw = qeth_core_thaw,
- .restore = qeth_core_restore,
+ .freeze = qeth_suspend,
+ .thaw = qeth_resume,
+ .restore = qeth_resume,
};
struct qeth_card *qeth_get_card_by_busid(char *bus_id)
@@ -5837,13 +5686,10 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_GET_CARD_TYPE:
- if ((card->info.type == QETH_CARD_TYPE_OSD ||
- card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSX) &&
- !card->info.guestlan)
+ if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
+ !IS_VM_NIC(card))
return 1;
- else
- return 0;
+ return 0;
case SIOCGMIIPHY:
mii_data = if_mii(rq);
mii_data->phy_id = 0;
@@ -5887,8 +5733,8 @@ static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
enum qeth_prot_versions prot)
{
- return qeth_send_simple_setassparms_prot(card, cstype,
- IPA_CMD_ASS_STOP, 0, prot);
+ return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
+ NULL, prot);
}
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
@@ -5919,7 +5765,8 @@ static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
return -EOPNOTSUPP;
}
- iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 4,
+ iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
+ SETASS_DATA_SIZEOF(flags_32bit),
prot);
if (!iob) {
qeth_set_csum_off(card, cstype, prot);
@@ -5976,7 +5823,7 @@ static int qeth_set_tso_off(struct qeth_card *card,
enum qeth_prot_versions prot)
{
return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
- IPA_CMD_ASS_STOP, 0, prot);
+ IPA_CMD_ASS_STOP, NULL, prot);
}
static int qeth_set_tso_on(struct qeth_card *card,
@@ -6002,7 +5849,8 @@ static int qeth_set_tso_on(struct qeth_card *card,
}
iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
- IPA_CMD_ASS_ENABLE, sizeof(caps), prot);
+ IPA_CMD_ASS_ENABLE,
+ SETASS_DATA_SIZEOF(caps), prot);
if (!iob) {
qeth_set_tso_off(card, prot);
return -ENOMEM;
@@ -6089,8 +5937,8 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
netdev_features_t changed = dev->features ^ features;
int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "setfeat");
- QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
+ QETH_CARD_TEXT(card, 2, "setfeat");
+ QETH_CARD_HEX(card, 2, &features, sizeof(features));
if ((changed & NETIF_F_IP_CSUM)) {
rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
@@ -6136,7 +5984,7 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(SETUP, 2, "fixfeat");
+ QETH_CARD_TEXT(card, 2, "fixfeat");
if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
features &= ~NETIF_F_IP_CSUM;
if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
@@ -6149,7 +5997,7 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO6;
- QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
+ QETH_CARD_HEX(card, 2, &features, sizeof(features));
return features;
}
EXPORT_SYMBOL_GPL(qeth_fix_features);
@@ -6193,7 +6041,6 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_errors = card->stats.rx_errors;
stats->rx_dropped = card->stats.rx_dropped;
stats->multicast = card->stats.rx_multicast;
- stats->tx_errors = card->stats.tx_errors;
for (i = 0; i < card->qdio.no_out_queues; i++) {
queue = card->qdio.out_qs[i];
@@ -6206,6 +6053,15 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);
+u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+ u8 cast_type, struct net_device *sb_dev)
+{
+ if (cast_type != RTN_UNICAST)
+ return QETH_IQD_MCAST_TXQ;
+ return QETH_IQD_MIN_UCAST_TXQ;
+}
+EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
+
int qeth_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -6216,7 +6072,7 @@ int qeth_open(struct net_device *dev)
return -EIO;
card->data.state = CH_STATE_UP;
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
napi_enable(&card->napi);
local_bh_disable();
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index f8c5d4a9be13..75b5834ed28d 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -31,14 +31,12 @@ extern unsigned char IPA_PDU_HEADER[];
#define QETH_CLEAR_CHANNEL_PARM -10
#define QETH_HALT_CHANNEL_PARM -11
-#define QETH_RCD_PARM -12
static inline bool qeth_intparm_is_iob(unsigned long intparm)
{
switch (intparm) {
case QETH_CLEAR_CHANNEL_PARM:
case QETH_HALT_CHANNEL_PARM:
- case QETH_RCD_PARM:
case 0:
return false;
}
@@ -82,7 +80,7 @@ enum qeth_card_types {
#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM)
#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX)
-#define IS_VM_NIC(card) ((card)->info.guestlan)
+#define IS_VM_NIC(card) ((card)->info.is_vm_nic)
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
/* only the first two bytes are looked at in qeth_get_cardname_short */
@@ -381,9 +379,7 @@ struct qeth_ipacmd_layer2setdelvlan {
__u16 vlan_id;
} __attribute__ ((packed));
-
struct qeth_ipacmd_setassparms_hdr {
- __u32 assist_no;
__u16 length;
__u16 command_code;
__u16 return_code;
@@ -428,6 +424,7 @@ struct qeth_tso_start_data {
/* SETASSPARMS IPA Command: */
struct qeth_ipacmd_setassparms {
+ u32 assist_no;
struct qeth_ipacmd_setassparms_hdr hdr;
union {
__u32 flags_32bit;
@@ -439,6 +436,8 @@ struct qeth_ipacmd_setassparms {
} data;
} __attribute__ ((packed));
+#define SETASS_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setassparms,\
+ data.field)
/* SETRTG IPA Command: ****************************************************/
struct qeth_set_routing {
@@ -526,8 +525,6 @@ struct qeth_query_switch_attributes {
#define QETH_SETADP_FLAGS_VIRTUAL_MAC 0x80 /* for CHANGE_ADDR_READ_MAC */
struct qeth_ipacmd_setadpparms_hdr {
- u32 supp_hw_cmds;
- u32 reserved1;
u16 cmdlength;
u16 reserved2;
u32 command_code;
@@ -539,6 +536,7 @@ struct qeth_ipacmd_setadpparms_hdr {
};
struct qeth_ipacmd_setadpparms {
+ struct qeth_ipa_caps hw_cmds;
struct qeth_ipacmd_setadpparms_hdr hdr;
union {
struct qeth_query_cmds_supp query_cmds_supp;
@@ -552,6 +550,9 @@ struct qeth_ipacmd_setadpparms {
} data;
} __attribute__ ((packed));
+#define SETADP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setadpparms,\
+ data.field)
+
/* CREATE_ADDR IPA Command: ***********************************************/
struct qeth_create_destroy_address {
__u8 unique_id[8];
@@ -598,6 +599,11 @@ struct qeth_ipacmd_diagass {
__u8 cdata[64];
} __attribute__ ((packed));
+#define DIAG_HDR_LEN offsetofend(struct qeth_ipacmd_diagass, ext)
+#define DIAG_SUB_HDR_LEN (offsetofend(struct qeth_ipacmd_diagass, ext) -\
+ offsetof(struct qeth_ipacmd_diagass, \
+ subcmd_len))
+
/* VNIC Characteristics IPA Command: *****************************************/
/* IPA commands/sub commands for VNICC */
#define IPA_VNICC_QUERY_CHARS 0x00000000L
@@ -624,12 +630,6 @@ struct qeth_ipacmd_diagass {
/* VNICC header */
struct qeth_ipacmd_vnicc_hdr {
- u32 sup;
- u32 cur;
-};
-
-/* VNICC sub command header */
-struct qeth_vnicc_sub_hdr {
u16 data_length;
u16 reserved;
u32 sub_command;
@@ -654,15 +654,18 @@ struct qeth_vnicc_getset_timeout {
/* complete VNICC IPA command message */
struct qeth_ipacmd_vnicc {
+ struct qeth_ipa_caps vnicc_cmds;
struct qeth_ipacmd_vnicc_hdr hdr;
- struct qeth_vnicc_sub_hdr sub_hdr;
union {
struct qeth_vnicc_query_cmds query_cmds;
struct qeth_vnicc_set_char set_char;
struct qeth_vnicc_getset_timeout getset_timeout;
- };
+ } data;
};
+#define VNICC_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_vnicc,\
+ data.field)
+
/* SETBRIDGEPORT IPA Command: *********************************************/
enum qeth_ipa_sbp_cmd {
IPA_SBP_QUERY_COMMANDS_SUPPORTED = 0x00000000L,
@@ -688,8 +691,6 @@ struct mac_addr_lnid {
} __packed;
struct qeth_ipacmd_sbp_hdr {
- __u32 supported_sbp_cmds;
- __u32 enabled_sbp_cmds;
__u16 cmdlength;
__u16 reserved1;
__u32 command_code;
@@ -704,16 +705,10 @@ struct qeth_sbp_query_cmds_supp {
__u32 reserved;
} __packed;
-struct qeth_sbp_reset_role {
-} __packed;
-
struct qeth_sbp_set_primary {
struct net_if_token token;
} __packed;
-struct qeth_sbp_set_secondary {
-} __packed;
-
struct qeth_sbp_port_entry {
__u8 role;
__u8 state;
@@ -739,17 +734,19 @@ struct qeth_sbp_state_change {
} __packed;
struct qeth_ipacmd_setbridgeport {
+ struct qeth_ipa_caps sbp_cmds;
struct qeth_ipacmd_sbp_hdr hdr;
union {
struct qeth_sbp_query_cmds_supp query_cmds_supp;
- struct qeth_sbp_reset_role reset_role;
struct qeth_sbp_set_primary set_primary;
- struct qeth_sbp_set_secondary set_secondary;
struct qeth_sbp_query_ports query_ports;
struct qeth_sbp_state_change state_change;
} data;
} __packed;
+#define SBP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setbridgeport,\
+ data.field)
+
/* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/
/* Bitmask for entry->change_code. Both bits may be raised. */
enum qeth_ipa_addr_change_code {
@@ -808,6 +805,8 @@ struct qeth_ipa_cmd {
} data;
} __attribute__ ((packed));
+#define IPA_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipa_cmd, data.field)
+
/*
* special command for ARP processing.
* this is not included in setassparms command before, because we get
@@ -825,10 +824,6 @@ enum qeth_ipa_arp_return_codes {
extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
-#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
- sizeof(struct qeth_ipacmd_setadpparms_hdr))
-#define QETH_SNMP_SETADP_CMDLENGTH 16
-
/* Helper functions */
#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
(cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 56deeb6f7bc0..9f392497d570 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -198,6 +198,9 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
if (!card)
return -EINVAL;
+ if (IS_IQD(card))
+ return -EOPNOTSUPP;
+
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -239,10 +242,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 2;
} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
- if (card->info.type == QETH_CARD_TYPE_IQD) {
- rc = -EPERM;
- goto out;
- }
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 3;
} else if (sysfs_streq(buf, "no_prio_queueing")) {
@@ -480,8 +479,7 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (card->info.type != QETH_CARD_TYPE_OSD &&
- card->info.type != QETH_CARD_TYPE_OSX) {
+ if (!IS_OSD(card) && !IS_OSX(card)) {
rc = -EOPNOTSUPP;
dev_err(&card->gdev->dev, "Adapter does not "
"support QDIO data connection isolation\n");
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 93a53fed4cf8..4166eb29f0bd 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -38,6 +38,7 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail),
QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
+ QETH_TXQ_STAT("Queue stopped", stopped),
};
static const struct qeth_stats card_stats[] = {
@@ -154,6 +155,21 @@ static void qeth_get_drvinfo(struct net_device *dev,
CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
}
+static void qeth_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ channels->max_rx = dev->num_rx_queues;
+ channels->max_tx = card->qdio.no_out_queues;
+ channels->max_other = 0;
+ channels->max_combined = 0;
+ channels->rx_count = dev->real_num_rx_queues;
+ channels->tx_count = dev->real_num_tx_queues;
+ channels->other_count = 0;
+ channels->combined_count = 0;
+}
+
/* Helper function to fill 'advertising' and 'supported' which are the same. */
/* Autoneg and full-duplex are supported and advertised unconditionally. */
/* Always advertise and support all speeds up to specified, and only one */
@@ -359,6 +375,7 @@ const struct ethtool_ops qeth_ethtool_ops = {
.get_ethtool_stats = qeth_get_ethtool_stats,
.get_sset_count = qeth_get_sset_count,
.get_drvinfo = qeth_get_drvinfo,
+ .get_channels = qeth_get_channels,
.get_link_ksettings = qeth_get_link_ksettings,
};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index c3067fd3bd9e..fd64bc3f4062 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -85,7 +85,8 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "L2sdmac");
- iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
+ IPA_DATA_SIZEOF(setdelmac));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -149,35 +150,23 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
return rc;
}
-static void qeth_l2_del_all_macs(struct qeth_card *card)
+static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
{
struct qeth_mac *mac;
struct hlist_node *tmp;
int i;
- spin_lock_bh(&card->mclock);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
hash_del(&mac->hnode);
kfree(mac);
}
- spin_unlock_bh(&card->mclock);
-}
-
-static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
-{
- if (card->info.type == QETH_CARD_TYPE_OSN)
- return RTN_UNICAST;
- if (is_broadcast_ether_addr(skb->data))
- return RTN_BROADCAST;
- if (is_multicast_ether_addr(skb->data))
- return RTN_MULTICAST;
- return RTN_UNICAST;
}
static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, int cast_type, unsigned int data_len)
+ int ipv, unsigned int data_len)
{
+ int cast_type = qeth_get_ether_cast_type(skb);
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
hdr->hdr.l2.pkt_length = data_len;
@@ -253,7 +242,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
- iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
+ IPA_DATA_SIZEOF(setdelvlan));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -287,26 +277,23 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
static void qeth_l2_stop_card(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP , 2, "stopcard");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "stopcard");
qeth_set_allowed_threads(card, 0, 1);
+ cancel_work_sync(&card->rx_mode_work);
+ qeth_l2_drain_rx_mode_cache(card);
+
if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l2_del_all_macs(card);
qeth_clear_ipacmd_list(card);
card->state = CARD_STATE_HARDSETUP;
}
if (card->state == CARD_STATE_HARDSETUP) {
qeth_qdio_clear_card(card, 0);
- qeth_clear_qdio_buffers(card);
+ qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
- if (card->state == CARD_STATE_DOWN) {
- qeth_clear_cmd_buffers(&card->read);
- qeth_clear_cmd_buffers(&card->write);
- }
flush_workqueue(card->event_wq);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
@@ -334,13 +321,11 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
case QETH_HEADER_TYPE_LAYER2:
skb->protocol = eth_type_trans(skb, skb->dev);
qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
- if (skb->protocol == htons(ETH_P_802_2))
- *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
len = skb->len;
napi_gro_receive(&card->napi, skb);
break;
case QETH_HEADER_TYPE_OSN:
- if (card->info.type == QETH_CARD_TYPE_OSN) {
+ if (IS_OSN(card)) {
skb_push(skb, sizeof(struct qeth_hdr));
skb_copy_to_linear_data(skb, hdr,
sizeof(struct qeth_hdr));
@@ -367,8 +352,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "l2reqmac");
- QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
+ QETH_CARD_TEXT(card, 2, "l2reqmac");
if (MACHINE_IS_VM) {
rc = qeth_vm_request_mac(card);
@@ -376,7 +360,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
goto out;
QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
CARD_DEVID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "err%04x", rc);
/* fall back to alternative mechanism: */
}
@@ -386,18 +370,17 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
goto out;
QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
CARD_DEVID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
/* fall back once more: */
}
/* some devices don't support a custom MAC address: */
- if (card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSX)
+ if (IS_OSM(card) || IS_OSX(card))
return (rc) ? rc : -EADDRNOTAVAIL;
eth_hw_addr_random(card->dev);
out:
- QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, card->dev->addr_len);
+ QETH_CARD_HEX(card, 2, card->dev->dev_addr, card->dev->addr_len);
return 0;
}
@@ -481,7 +464,7 @@ static void qeth_promisc_to_bridge(struct qeth_card *card)
role = QETH_SBP_ROLE_NONE;
rc = qeth_bridgeport_setrole(card, role);
- QETH_DBF_TEXT_(SETUP, 2, "bpm%c%04x",
+ QETH_CARD_TEXT_(card, 2, "bpm%c%04x",
(promisc_mode == SET_PROMISC_MODE_ON) ? '+' : '-', rc);
if (!rc) {
card->options.sbp.role = role;
@@ -515,9 +498,11 @@ static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
hash_add(card->mac_htable, &mac->hnode, mac_hash);
}
-static void qeth_l2_set_rx_mode(struct net_device *dev)
+static void qeth_l2_rx_mode_work(struct work_struct *work)
{
- struct qeth_card *card = dev->ml_priv;
+ struct qeth_card *card = container_of(work, struct qeth_card,
+ rx_mode_work);
+ struct net_device *dev = card->dev;
struct netdev_hw_addr *ha;
struct qeth_mac *mac;
struct hlist_node *tmp;
@@ -526,12 +511,12 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
QETH_CARD_TEXT(card, 3, "setmulti");
- spin_lock_bh(&card->mclock);
-
+ netif_addr_lock_bh(dev);
netdev_for_each_mc_addr(ha, dev)
qeth_l2_add_mac(card, ha);
netdev_for_each_uc_addr(ha, dev)
qeth_l2_add_mac(card, ha);
+ netif_addr_unlock_bh(dev);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
switch (mac->disp_flag) {
@@ -554,8 +539,6 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
}
}
- spin_unlock_bh(&card->mclock);
-
if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
qeth_setadp_promisc_mode(card);
else
@@ -586,7 +569,7 @@ static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
}
elements += qeth_count_elements(skb, hd_len);
- if (elements > QETH_MAX_BUFFER_ELEMENTS(card)) {
+ if (elements > queue->max_elements) {
rc = -E2BIG;
goto out;
}
@@ -603,37 +586,44 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- int cast_type = qeth_l2_get_cast_type(card, skb);
- int ipv = qeth_get_ip_version(skb);
+ u16 txq = skb_get_queue_mapping(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc;
- queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
-
- netif_stop_queue(dev);
+ if (IS_IQD(card))
+ txq = qeth_iqd_translate_txq(dev, txq);
+ queue = card->qdio.out_qs[txq];
if (IS_OSN(card))
rc = qeth_l2_xmit_osn(card, skb, queue);
else
- rc = qeth_xmit(card, skb, queue, ipv, cast_type,
+ rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
qeth_l2_fill_header);
if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
- } else if (rc == -EBUSY) {
- return NETDEV_TX_BUSY;
- } /* else fall through */
+ }
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
}
+static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (IS_IQD(card))
+ return qeth_iqd_select_queue(dev, skb,
+ qeth_get_ether_cast_type(skb),
+ sb_dev);
+ return qeth_get_priority_queue(card, skb);
+}
+
static const struct device_type qeth_l2_devtype = {
.name = "qeth_layer2",
.groups = qeth_l2_attr_groups,
@@ -653,6 +643,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
}
hash_init(card->mac_htable);
+ INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
return 0;
}
@@ -673,12 +664,20 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
unregister_netdev(card->dev);
}
+static void qeth_l2_set_rx_mode(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ schedule_work(&card->rx_mode_work);
+}
+
static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
+ .ndo_select_queue = qeth_l2_select_queue,
.ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -721,7 +720,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
- if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
+ if (IS_OSD(card) && !IS_VM_NIC(card)) {
card->dev->features |= NETIF_F_SG;
/* OSA 3S and earlier has no RX/TX support */
if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
@@ -793,12 +792,11 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
- QETH_DBF_TEXT(SETUP, 2, "setonlin");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "setonlin");
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -829,10 +827,9 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
qeth_print_status_message(card);
/* softsetup */
- QETH_DBF_TEXT(SETUP, 2, "softsetp");
+ QETH_CARD_TEXT(card, 2, "softsetp");
- if ((card->info.type == QETH_CARD_TYPE_OSD) ||
- (card->info.type == QETH_CARD_TYPE_OSX)) {
+ if (IS_OSD(card) || IS_OSX(card)) {
rc = qeth_l2_start_ipassists(card);
if (rc)
goto out_remove;
@@ -840,7 +837,7 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
rc = qeth_init_qdio_queues(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -880,7 +877,6 @@ out_remove:
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
qdio_free(CARD_DDEV(card));
- card->state = CARD_STATE_DOWN;
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
@@ -895,8 +891,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
- QETH_DBF_TEXT(SETUP, 3, "setoffl");
- QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 3, "setoffl");
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -917,7 +912,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
if (!rc)
rc = (rc2) ? rc2 : rc3;
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
qdio_free(CARD_DDEV(card));
/* let user_space know that device is offline */
@@ -970,33 +965,6 @@ static void __exit qeth_l2_exit(void)
pr_info("unregister layer 2 discipline\n");
}
-static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-
- qeth_set_allowed_threads(card, 0, 1);
- wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (gdev->state == CCWGROUP_OFFLINE)
- return 0;
-
- qeth_l2_set_offline(gdev);
- return 0;
-}
-
-static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc;
-
- rc = qeth_l2_set_online(gdev);
-
- qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (rc)
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- return rc;
-}
-
/* Returns zero if the command is successfully "consumed" */
static int qeth_l2_control_event(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
@@ -1026,50 +994,16 @@ struct qeth_discipline qeth_l2_discipline = {
.remove = qeth_l2_remove_device,
.set_online = qeth_l2_set_online,
.set_offline = qeth_l2_set_offline,
- .freeze = qeth_l2_pm_suspend,
- .thaw = qeth_l2_pm_resume,
- .restore = qeth_l2_pm_resume,
.do_ioctl = NULL,
.control_event_handler = qeth_l2_control_event,
};
EXPORT_SYMBOL_GPL(qeth_l2_discipline);
-static int qeth_osn_send_control_data(struct qeth_card *card, int len,
- struct qeth_cmd_buffer *iob)
+static void qeth_osn_assist_cb(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
{
- struct qeth_channel *channel = iob->channel;
- int rc = 0;
-
- QETH_CARD_TEXT(card, 5, "osndctrd");
-
- wait_event(card->wait_q,
- atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
- qeth_prepare_control_data(card, len, iob);
- QETH_CARD_TEXT(card, 6, "osnoirqp");
- spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
- spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
- if (rc) {
- QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
- "ccw_device_start rc = %i\n", rc);
- QETH_CARD_TEXT_(card, 2, " err%d", rc);
- qeth_release_buffer(channel, iob);
- atomic_set(&channel->irq_pending, 0);
- wake_up(&card->wait_q);
- }
- return rc;
-}
-
-static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob)
-{
- u16 length;
-
- QETH_CARD_TEXT(card, 4, "osndipa");
-
- memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2);
- return qeth_osn_send_control_data(card, length, iob);
+ qeth_notify_reply(iob->reply, 0);
+ qeth_put_cmd(iob);
}
int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
@@ -1077,6 +1011,8 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
struct qeth_cmd_buffer *iob;
struct qeth_card *card;
+ if (data_len < 0)
+ return -EINVAL;
if (!dev)
return -ENODEV;
card = dev->ml_priv;
@@ -1085,10 +1021,16 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
QETH_CARD_TEXT(card, 2, "osnsdmc");
if (!qeth_card_hw_is_reachable(card))
return -ENODEV;
- iob = qeth_wait_for_buffer(&card->write);
+
+ iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_len, 1,
+ QETH_IPA_TIMEOUT);
+ if (!iob)
+ return -ENOMEM;
+
qeth_prepare_ipa_cmd(card, iob, (u16) data_len);
memcpy(__ipa_cmd(iob), data, data_len);
- return qeth_osn_send_ipa_cmd(card, iob);
+ iob->callback = qeth_osn_assist_cb;
+ return qeth_send_ipa_cmd(card, iob, NULL, NULL);
}
EXPORT_SYMBOL(qeth_osn_assist);
@@ -1454,23 +1396,25 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
enum qeth_ipa_sbp_cmd sbp_cmd,
- unsigned int cmd_length)
+ unsigned int data_length)
{
- enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ?
- IPA_CMD_SETBRIDGEPORT_IQD :
- IPA_CMD_SETBRIDGEPORT_OSA;
+ enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD :
+ IPA_CMD_SETBRIDGEPORT_OSA;
+ struct qeth_ipacmd_sbp_hdr *hdr;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
- iob = qeth_get_ipacmd_buffer(card, ipa_cmd, 0);
+ iob = qeth_ipa_alloc_cmd(card, ipa_cmd, QETH_PROT_NONE,
+ data_length +
+ offsetof(struct qeth_ipacmd_setbridgeport,
+ data));
if (!iob)
return iob;
- cmd = __ipa_cmd(iob);
- cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
- cmd_length;
- cmd->data.sbp.hdr.command_code = sbp_cmd;
- cmd->data.sbp.hdr.used_total = 1;
- cmd->data.sbp.hdr.seq_no = 1;
+
+ hdr = &__ipa_cmd(iob)->data.sbp.hdr;
+ hdr->cmdlength = sizeof(*hdr) + data_length;
+ hdr->command_code = sbp_cmd;
+ hdr->used_total = 1;
+ hdr->seq_no = 1;
return iob;
}
@@ -1505,7 +1449,7 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "brqsuppo");
iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED,
- sizeof(struct qeth_sbp_query_cmds_supp));
+ SBP_DATA_SIZEOF(query_cmds_supp));
if (!iob)
return;
@@ -1597,23 +1541,21 @@ static int qeth_bridgeport_set_cb(struct qeth_card *card,
*/
int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
{
- int cmdlength;
struct qeth_cmd_buffer *iob;
enum qeth_ipa_sbp_cmd setcmd;
+ unsigned int cmdlength = 0;
QETH_CARD_TEXT(card, 2, "brsetrol");
switch (role) {
case QETH_SBP_ROLE_NONE:
setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
- cmdlength = sizeof(struct qeth_sbp_reset_role);
break;
case QETH_SBP_ROLE_PRIMARY:
setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
- cmdlength = sizeof(struct qeth_sbp_set_primary);
+ cmdlength = SBP_DATA_SIZEOF(set_primary);
break;
case QETH_SBP_ROLE_SECONDARY:
setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
- cmdlength = sizeof(struct qeth_sbp_set_secondary);
break;
default:
return -EINVAL;
@@ -1679,7 +1621,7 @@ static void qeth_bridgeport_an_set_cb(void *priv,
l2entry = (struct qdio_brinfo_entry_l2 *)entry;
code = IPA_ADDR_CHANGE_CODE_MACADDR;
- if (l2entry->addr_lnid.lnid)
+ if (l2entry->addr_lnid.lnid < VLAN_N_VID)
code |= IPA_ADDR_CHANGE_CODE_VLANID;
qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
(struct net_if_token *)&l2entry->nit,
@@ -1763,10 +1705,6 @@ static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
struct _qeth_l2_vnicc_request_cbctl {
u32 sub_cmd;
struct {
- u32 vnic_char;
- u32 timeout;
- } param;
- struct {
union{
u32 *sup_cmds;
u32 *timeout;
@@ -1788,80 +1726,52 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
if (cmd->hdr.return_code)
return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code);
/* return results to caller */
- card->options.vnicc.sup_chars = rep->hdr.sup;
- card->options.vnicc.cur_chars = rep->hdr.cur;
+ card->options.vnicc.sup_chars = rep->vnicc_cmds.supported;
+ card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
if (cbctl->sub_cmd == IPA_VNICC_QUERY_CMDS)
- *cbctl->result.sup_cmds = rep->query_cmds.sup_cmds;
+ *cbctl->result.sup_cmds = rep->data.query_cmds.sup_cmds;
if (cbctl->sub_cmd == IPA_VNICC_GET_TIMEOUT)
- *cbctl->result.timeout = rep->getset_timeout.timeout;
+ *cbctl->result.timeout = rep->data.getset_timeout.timeout;
return 0;
}
-/* generic VNICC request */
-static int qeth_l2_vnicc_request(struct qeth_card *card,
- struct _qeth_l2_vnicc_request_cbctl *cbctl)
+static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card,
+ u32 vnicc_cmd,
+ unsigned int data_length)
{
- struct qeth_ipacmd_vnicc *req;
+ struct qeth_ipacmd_vnicc_hdr *hdr;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 2, "vniccreq");
- /* get new buffer for request */
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_VNICC, 0);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_VNICC, QETH_PROT_NONE,
+ data_length +
+ offsetof(struct qeth_ipacmd_vnicc, data));
if (!iob)
- return -ENOMEM;
-
- /* create header for request */
- cmd = __ipa_cmd(iob);
- req = &cmd->data.vnicc;
+ return NULL;
- /* create sub command header for request */
- req->sub_hdr.data_length = sizeof(req->sub_hdr);
- req->sub_hdr.sub_command = cbctl->sub_cmd;
-
- /* create sub command specific request fields */
- switch (cbctl->sub_cmd) {
- case IPA_VNICC_QUERY_CHARS:
- break;
- case IPA_VNICC_QUERY_CMDS:
- req->sub_hdr.data_length += sizeof(req->query_cmds);
- req->query_cmds.vnic_char = cbctl->param.vnic_char;
- break;
- case IPA_VNICC_ENABLE:
- case IPA_VNICC_DISABLE:
- req->sub_hdr.data_length += sizeof(req->set_char);
- req->set_char.vnic_char = cbctl->param.vnic_char;
- break;
- case IPA_VNICC_SET_TIMEOUT:
- req->getset_timeout.timeout = cbctl->param.timeout;
- /* fallthrough */
- case IPA_VNICC_GET_TIMEOUT:
- req->sub_hdr.data_length += sizeof(req->getset_timeout);
- req->getset_timeout.vnic_char = cbctl->param.vnic_char;
- break;
- default:
- qeth_release_buffer(iob->channel, iob);
- return -EOPNOTSUPP;
- }
-
- /* send request */
- return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, cbctl);
+ hdr = &__ipa_cmd(iob)->data.vnicc.hdr;
+ hdr->data_length = sizeof(*hdr) + data_length;
+ hdr->sub_command = vnicc_cmd;
+ return iob;
}
/* VNICC query VNIC characteristics request */
static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
{
struct _qeth_l2_vnicc_request_cbctl cbctl;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "vniccqch");
+ iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CHARS, 0);
+ if (!iob)
+ return -ENOMEM;
/* prepare callback control */
cbctl.sub_cmd = IPA_VNICC_QUERY_CHARS;
- QETH_CARD_TEXT(card, 2, "vniccqch");
- return qeth_l2_vnicc_request(card, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
}
/* VNICC query sub commands request */
@@ -1869,14 +1779,21 @@ static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
u32 *sup_cmds)
{
struct _qeth_l2_vnicc_request_cbctl cbctl;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "vniccqcm");
+ iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CMDS,
+ VNICC_DATA_SIZEOF(query_cmds));
+ if (!iob)
+ return -ENOMEM;
+
+ __ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
/* prepare callback control */
cbctl.sub_cmd = IPA_VNICC_QUERY_CMDS;
- cbctl.param.vnic_char = vnic_char;
cbctl.result.sup_cmds = sup_cmds;
- QETH_CARD_TEXT(card, 2, "vniccqcm");
- return qeth_l2_vnicc_request(card, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
}
/* VNICC enable/disable characteristic request */
@@ -1884,31 +1801,47 @@ static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char,
u32 cmd)
{
struct _qeth_l2_vnicc_request_cbctl cbctl;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "vniccedc");
+ iob = qeth_l2_vnicc_build_cmd(card, cmd, VNICC_DATA_SIZEOF(set_char));
+ if (!iob)
+ return -ENOMEM;
+
+ __ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char;
/* prepare callback control */
cbctl.sub_cmd = cmd;
- cbctl.param.vnic_char = vnic_char;
- QETH_CARD_TEXT(card, 2, "vniccedc");
- return qeth_l2_vnicc_request(card, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
}
/* VNICC get/set timeout for characteristic request */
static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
u32 cmd, u32 *timeout)
{
+ struct qeth_vnicc_getset_timeout *getset_timeout;
struct _qeth_l2_vnicc_request_cbctl cbctl;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "vniccgst");
+ iob = qeth_l2_vnicc_build_cmd(card, cmd,
+ VNICC_DATA_SIZEOF(getset_timeout));
+ if (!iob)
+ return -ENOMEM;
+
+ getset_timeout = &__ipa_cmd(iob)->data.vnicc.data.getset_timeout;
+ getset_timeout->vnic_char = vnicc;
+
+ if (cmd == IPA_VNICC_SET_TIMEOUT)
+ getset_timeout->timeout = *timeout;
/* prepare callback control */
cbctl.sub_cmd = cmd;
- cbctl.param.vnic_char = vnicc;
- if (cmd == IPA_VNICC_SET_TIMEOUT)
- cbctl.param.timeout = *timeout;
if (cmd == IPA_VNICC_GET_TIMEOUT)
cbctl.result.timeout = timeout;
- QETH_CARD_TEXT(card, 2, "vniccgst");
- return qeth_l2_vnicc_request(card, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
}
/* set current VNICC flag state; called from sysfs store function */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 53712cf26406..2dd99f103671 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -32,7 +32,6 @@
#include <net/route.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
-#include <net/ip6_fib.h>
#include <net/iucv/af_iucv.h>
#include <linux/hashtable.h>
@@ -246,9 +245,9 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
*/
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
rc = qeth_l3_register_addr_entry(card, addr);
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
addr->in_progress = 0;
} else
rc = qeth_l3_register_addr_entry(card, addr);
@@ -268,6 +267,30 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
return rc;
}
+static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr,
+ bool add)
+{
+ int rc;
+
+ mutex_lock(&card->ip_lock);
+ rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr);
+ mutex_unlock(&card->ip_lock);
+
+ return rc;
+}
+
+static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card)
+{
+ struct qeth_ipaddr *addr;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ }
+}
+
static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
{
struct qeth_ipaddr *addr;
@@ -276,7 +299,7 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
QETH_CARD_TEXT(card, 4, "clearip");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (!recover) {
@@ -287,19 +310,9 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
addr->disp_flag = QETH_DISP_ADDR_ADD;
}
- spin_unlock_bh(&card->ip_lock);
-
- spin_lock_bh(&card->mclock);
-
- hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
- hash_del(&addr->hnode);
- kfree(addr);
- }
-
- spin_unlock_bh(&card->mclock);
-
-
+ mutex_unlock(&card->ip_lock);
}
+
static void qeth_l3_recover_ip(struct qeth_card *card)
{
struct qeth_ipaddr *addr;
@@ -309,15 +322,15 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
QETH_CARD_TEXT(card, 4, "recovrip");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
rc = qeth_l3_register_addr_entry(card, addr);
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
addr->in_progress = 0;
} else
rc = qeth_l3_register_addr_entry(card, addr);
@@ -333,8 +346,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
}
}
- spin_unlock_bh(&card->ip_lock);
-
+ mutex_unlock(&card->ip_lock);
}
static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -364,7 +376,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setdelmc");
- iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto,
+ IPA_DATA_SIZEOF(setdelipm));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -416,7 +429,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setdelip");
- iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto,
+ IPA_DATA_SIZEOF(setdelip6));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -448,7 +462,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 4, "setroutg");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETRTG, prot,
+ IPA_DATA_SIZEOF(setrtg));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -461,7 +476,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
static int qeth_l3_correct_routing_type(struct qeth_card *card,
enum qeth_routing_types *type, enum qeth_prot_versions prot)
{
- if (card->info.type == QETH_CARD_TYPE_IQD) {
+ if (IS_IQD(card)) {
switch (*type) {
case NO_ROUTER:
case PRIMARY_CONNECTOR:
@@ -559,7 +574,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
{
struct qeth_ipato_entry *ipatoe, *tmp;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
list_del(&ipatoe->entry);
@@ -567,7 +582,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
}
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
int qeth_l3_add_ipato_entry(struct qeth_card *card,
@@ -578,7 +593,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "addipato");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != new->proto)
@@ -596,7 +611,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
qeth_l3_update_ipato(card);
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
return rc;
}
@@ -610,7 +625,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "delipato");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
@@ -625,7 +640,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
}
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
return rc;
}
@@ -634,7 +649,6 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr addr;
- int rc;
qeth_l3_init_ipaddr(&addr, type, proto);
if (proto == QETH_PROT_IPV4)
@@ -642,16 +656,13 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
else
memcpy(&addr.u.a6.addr, ip, 16);
- spin_lock_bh(&card->ip_lock);
- rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
- spin_unlock_bh(&card->ip_lock);
- return rc;
+ return qeth_l3_modify_ip(card, &addr, add);
}
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
{
struct qeth_ipaddr addr;
- int rc, i;
+ unsigned int i;
qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
addr.u.a6.addr.s6_addr[0] = 0xfe;
@@ -659,10 +670,7 @@ int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
for (i = 0; i < 8; i++)
addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i];
- spin_lock_bh(&card->ip_lock);
- rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
- spin_unlock_bh(&card->ip_lock);
- return rc;
+ return qeth_l3_modify_ip(card, &addr, add);
}
static int qeth_l3_register_addr_entry(struct qeth_card *card,
@@ -736,7 +744,7 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "setadprm");
+ QETH_CARD_TEXT(card, 2, "setadprm");
if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
rc = qeth_setadpparms_change_macaddr(card);
@@ -761,7 +769,7 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
return 0;
}
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Starting ARP processing support for %s failed\n",
@@ -784,7 +792,7 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
}
rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc)
dev_warn(&card->gdev->dev,
"Starting source MAC-address support for %s failed\n",
@@ -805,7 +813,7 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
}
rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Starting VLAN support for %s failed\n",
@@ -830,7 +838,7 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
}
rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Starting multicast support for %s failed\n",
@@ -844,23 +852,24 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
{
+ u32 ipv6_data = 3;
int rc;
QETH_CARD_TEXT(card, 3, "softipv6");
- if (card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(card))
goto out;
- rc = qeth_send_simple_setassparms(card, IPA_IPV6,
- IPA_CMD_ASS_START, 3);
+ rc = qeth_send_simple_setassparms(card, IPA_IPV6, IPA_CMD_ASS_START,
+ &ipv6_data);
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
QETH_CARD_IFNAME(card));
return rc;
}
- rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6,
- IPA_CMD_ASS_START, 0);
+ rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, IPA_CMD_ASS_START,
+ NULL);
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
@@ -868,7 +877,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
return rc;
}
rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Enabling the passthrough mode for %s failed\n",
@@ -894,6 +903,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
{
+ u32 filter_data = 1;
int rc;
QETH_CARD_TEXT(card, 3, "stbrdcst");
@@ -906,7 +916,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
goto out;
}
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
"%s failed\n", QETH_CARD_IFNAME(card));
@@ -914,7 +924,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
}
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
- IPA_CMD_ASS_CONFIGURE, 1);
+ IPA_CMD_ASS_CONFIGURE, &filter_data);
if (rc) {
dev_warn(&card->gdev->dev,
"Setting up broadcast filtering for %s failed\n",
@@ -924,7 +934,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
dev_info(&card->gdev->dev, "Broadcast enabled\n");
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
- IPA_CMD_ASS_ENABLE, 1);
+ IPA_CMD_ASS_ENABLE, &filter_data);
if (rc) {
dev_warn(&card->gdev->dev, "Setting up broadcast echo "
"filtering for %s failed\n", QETH_CARD_IFNAME(card));
@@ -973,10 +983,10 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "hsrmac");
+ QETH_CARD_TEXT(card, 2, "hsrmac");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
- QETH_PROT_IPV6);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
+ IPA_DATA_SIZEOF(create_destroy_addr));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -1011,7 +1021,7 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "guniqeid");
+ QETH_CARD_TEXT(card, 2, "guniqeid");
if (!qeth_is_supported(card, IPA_IPV6)) {
card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
@@ -1019,8 +1029,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
return 0;
}
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
- QETH_PROT_IPV6);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
+ IPA_DATA_SIZEOF(create_destroy_addr));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -1038,7 +1048,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
struct qeth_ipa_cmd *cmd;
__u16 rc;
- QETH_DBF_TEXT(SETUP, 2, "diastrcb");
+ QETH_CARD_TEXT(card, 2, "diastrcb");
cmd = (struct qeth_ipa_cmd *)data;
rc = cmd->hdr.return_code;
@@ -1094,14 +1104,12 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "diagtrac");
+ QETH_CARD_TEXT(card, 2, "diagtrac");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRACE, 0);
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
- cmd->data.diagass.subcmd_len = 16;
- cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
cmd->data.diagass.action = diags_cmd;
return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
@@ -1303,6 +1311,15 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
{
+ struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
+ struct net_device *dev = skb->dev;
+
+ if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
+ dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
+ "FAKELL", skb->len);
+ return;
+ }
+
if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
ETH_P_IP;
@@ -1336,8 +1353,6 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
tg_addr, "FAKELL", skb->len);
}
- skb->protocol = eth_type_trans(skb, card->dev);
-
/* copy VLAN tag from hdr into skb */
if (!card->options.sniffer &&
(hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
@@ -1354,12 +1369,10 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
int budget, int *done)
{
- struct net_device *dev = card->dev;
int work_done = 0;
struct sk_buff *skb;
struct qeth_hdr *hdr;
unsigned int len;
- __u16 magic;
*done = 0;
WARN_ON_ONCE(!budget);
@@ -1373,24 +1386,12 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
}
switch (hdr->hdr.l3.id) {
case QETH_HEADER_TYPE_LAYER3:
- magic = *(__u16 *)skb->data;
- if ((card->info.type == QETH_CARD_TYPE_IQD) &&
- (magic == ETH_P_AF_IUCV)) {
- len = skb->len;
- dev_hard_header(skb, dev, ETH_P_AF_IUCV,
- dev->dev_addr, "FAKELL", len);
- skb->protocol = eth_type_trans(skb, dev);
- netif_receive_skb(skb);
- } else {
- qeth_l3_rebuild_skb(card, skb, hdr);
- len = skb->len;
- napi_gro_receive(&card->napi, skb);
- }
- break;
+ qeth_l3_rebuild_skb(card, skb, hdr);
+ /* fall through */
case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
skb->protocol = eth_type_trans(skb, skb->dev);
len = skb->len;
- netif_receive_skb(skb);
+ napi_gro_receive(&card->napi, skb);
break;
default:
dev_kfree_skb_any(skb);
@@ -1408,11 +1409,13 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
static void qeth_l3_stop_card(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 2, "stopcard");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "stopcard");
qeth_set_allowed_threads(card, 0, 1);
+ cancel_work_sync(&card->rx_mode_work);
+ qeth_l3_drain_rx_mode_cache(card);
+
if (card->options.sniffer &&
(card->info.promisc_mode == SET_PROMISC_MODE_ON))
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
@@ -1424,14 +1427,10 @@ static void qeth_l3_stop_card(struct qeth_card *card)
}
if (card->state == CARD_STATE_HARDSETUP) {
qeth_qdio_clear_card(card, 0);
- qeth_clear_qdio_buffers(card);
+ qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
- if (card->state == CARD_STATE_DOWN) {
- qeth_clear_cmd_buffers(&card->read);
- qeth_clear_cmd_buffers(&card->write);
- }
flush_workqueue(card->event_wq);
}
@@ -1451,7 +1450,7 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
(card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
return;
- if (card->info.guestlan) { /* Guestlan trace */
+ if (IS_VM_NIC(card)) { /* Guestlan trace */
if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
qeth_setadp_promisc_mode(card);
} else if (card->options.sniffer && /* HiperSockets trace */
@@ -1466,9 +1465,10 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
}
}
-static void qeth_l3_set_rx_mode(struct net_device *dev)
+static void qeth_l3_rx_mode_work(struct work_struct *work)
{
- struct qeth_card *card = dev->ml_priv;
+ struct qeth_card *card = container_of(work, struct qeth_card,
+ rx_mode_work);
struct qeth_ipaddr *addr;
struct hlist_node *tmp;
int i, rc;
@@ -1476,8 +1476,6 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
QETH_CARD_TEXT(card, 3, "setmulti");
if (!card->options.sniffer) {
- spin_lock_bh(&card->mclock);
-
qeth_l3_add_multicast_ipv4(card);
qeth_l3_add_multicast_ipv6(card);
@@ -1505,8 +1503,6 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
}
}
- spin_unlock_bh(&card->mclock);
-
if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
return;
}
@@ -1551,14 +1547,15 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
* thus we say EOPNOTSUPP for this ARP function
*/
- if (card->info.guestlan)
+ if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
}
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 4,
+ IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
+ SETASS_DATA_SIZEOF(flags_32bit),
QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
@@ -1704,9 +1701,7 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_QUERY_INFO,
- sizeof(struct qeth_arp_query_data)
- - sizeof(char),
- prot);
+ SETASS_DATA_SIZEOF(query_arp), prot);
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -1783,14 +1778,15 @@ static int qeth_l3_arp_modify_entry(struct qeth_card *card,
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
* thus we say EOPNOTSUPP for this ARP function
*/
- if (card->info.guestlan)
+ if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
}
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
- sizeof(*cmd_entry), QETH_PROT_IPV4);
+ SETASS_DATA_SIZEOF(arp_entry),
+ QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
@@ -1816,7 +1812,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
* thus we say EOPNOTSUPP for this ARP function
*/
- if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
+ if (IS_VM_NIC(card) || IS_IQD(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
@@ -1881,19 +1877,17 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return rc;
}
-static int qeth_l3_get_cast_type(struct sk_buff *skb)
+static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
+ int ipv)
{
struct neighbour *n = NULL;
- struct dst_entry *dst;
- rcu_read_lock();
- dst = skb_dst(skb);
if (dst)
n = dst_neigh_lookup_skb(dst, skb);
+
if (n) {
int cast_type = n->type;
- rcu_read_unlock();
neigh_release(n);
if ((cast_type == RTN_BROADCAST) ||
(cast_type == RTN_MULTICAST) ||
@@ -1901,11 +1895,12 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
return cast_type;
return RTN_UNICAST;
}
- rcu_read_unlock();
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
- switch (qeth_get_ip_version(skb)) {
+ switch (ipv) {
case 4:
+ if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
+ return RTN_BROADCAST;
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
case 6:
@@ -1913,16 +1908,24 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
RTN_MULTICAST : RTN_UNICAST;
default:
/* ... and MAC address */
- if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
- skb->dev->broadcast))
- return RTN_BROADCAST;
- if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
- return RTN_MULTICAST;
- /* default to unicast */
- return RTN_UNICAST;
+ return qeth_get_ether_cast_type(skb);
}
}
+static int qeth_l3_get_cast_type(struct sk_buff *skb)
+{
+ int ipv = qeth_get_ip_version(skb);
+ struct dst_entry *dst;
+ int cast_type;
+
+ rcu_read_lock();
+ dst = qeth_dst_check_rcu(skb, ipv);
+ cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
+ rcu_read_unlock();
+
+ return cast_type;
+}
+
static u8 qeth_l3_cast_type_to_flag(int cast_type)
{
if (cast_type == RTN_MULTICAST)
@@ -1936,11 +1939,13 @@ static u8 qeth_l3_cast_type_to_flag(int cast_type)
static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, int cast_type, unsigned int data_len)
+ int ipv, unsigned int data_len)
{
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
struct qeth_card *card = queue->card;
+ struct dst_entry *dst;
+ int cast_type;
hdr->hdr.l3.length = data_len;
@@ -1977,29 +1982,23 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI);
}
- /* OSA only: */
- if (!ipv) {
- hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
- if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
- skb->dev->broadcast))
- hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
- else
- hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
- QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
- return;
- }
-
- hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
rcu_read_lock();
+ dst = qeth_dst_check_rcu(skb, ipv);
+
+ if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ)
+ cast_type = RTN_UNICAST;
+ else
+ cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
+ l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
+
if (ipv == 4) {
- struct rtable *rt = skb_rtable(skb);
+ struct rtable *rt = (struct rtable *) dst;
*((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
rt_nexthop(rt, ip_hdr(skb)->daddr) :
ip_hdr(skb)->daddr;
- } else {
- /* IPv6 */
- const struct rt6_info *rt = skb_rt6_info(skb);
+ } else if (ipv == 6) {
+ struct rt6_info *rt = (struct rt6_info *) dst;
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
@@ -2007,8 +2006,11 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr;
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
+ } else {
+ /* OSA only: */
+ l3_hdr->flags |= QETH_HDR_PASSTHRU;
}
rcu_read_unlock();
}
@@ -2028,9 +2030,8 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
}
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv, int cast_type)
+ struct qeth_qdio_out_q *queue, int ipv)
{
- unsigned char eth_hdr[ETH_HLEN];
unsigned int hw_hdr_len;
int rc;
@@ -2040,67 +2041,63 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
if (rc)
return rc;
- skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
skb_pull(skb, ETH_HLEN);
qeth_l3_fixup_headers(skb);
- rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
- if (rc == -EBUSY) {
- /* roll back to ETH header */
- skb_push(skb, ETH_HLEN);
- skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
- }
- return rc;
+ return qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
}
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- int cast_type = qeth_l3_get_cast_type(skb);
struct qeth_card *card = dev->ml_priv;
+ u16 txq = skb_get_queue_mapping(skb);
int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc;
- queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
-
if (IS_IQD(card)) {
+ queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
+
if (card->options.sniffer)
goto tx_drop;
if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
(card->options.cq == QETH_CQ_ENABLED &&
skb->protocol != htons(ETH_P_AF_IUCV)))
goto tx_drop;
+ } else {
+ queue = card->qdio.out_qs[txq];
}
- if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
+ if (!(dev->flags & IFF_BROADCAST) &&
+ qeth_l3_get_cast_type(skb) == RTN_BROADCAST)
goto tx_drop;
- netif_stop_queue(dev);
-
if (ipv == 4 || IS_IQD(card))
- rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
+ rc = qeth_l3_xmit(card, skb, queue, ipv);
else
- rc = qeth_xmit(card, skb, queue, ipv, cast_type,
- qeth_l3_fill_header);
+ rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
- } else if (rc == -EBUSY) {
- return NETDEV_TX_BUSY;
- } /* else fall through */
+ }
tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
}
+static void qeth_l3_set_rx_mode(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ schedule_work(&card->rx_mode_work);
+}
+
/*
* we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
* NOARP on the netdevice is no option because it also turns off neighbor
@@ -2134,11 +2131,27 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
return qeth_features_check(skb, dev, features);
}
+static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb),
+ sb_dev);
+}
+
+static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ return qeth_get_priority_queue(card, skb);
+}
+
static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
+ .ndo_select_queue = qeth_l3_iqd_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -2155,6 +2168,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_l3_osa_features_check,
+ .ndo_select_queue = qeth_l3_osa_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -2171,8 +2185,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
unsigned int headroom;
int rc;
- if (card->info.type == QETH_CARD_TYPE_OSD ||
- card->info.type == QETH_CARD_TYPE_OSX) {
+ if (IS_OSD(card) || IS_OSX(card)) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
pr_info("qeth_l3: ignoring TR device\n");
@@ -2186,7 +2199,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
card->dev->dev_id = card->info.unique_id & 0xffff;
- if (!card->info.guestlan) {
+ if (!IS_VM_NIC(card)) {
card->dev->features |= NETIF_F_SG;
card->dev->hw_features |= NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
@@ -2210,7 +2223,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
headroom = sizeof(struct qeth_hdr_tso);
else
headroom = sizeof(struct qeth_hdr) + VLAN_HLEN;
- } else if (card->info.type == QETH_CARD_TYPE_IQD) {
+ } else if (IS_IQD(card)) {
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
@@ -2253,14 +2266,22 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
int rc;
hash_init(card->ip_htable);
+ mutex_init(&card->ip_lock);
+ card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0,
+ dev_name(&gdev->dev));
+ if (!card->cmd_wq)
+ return -ENOMEM;
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l3_create_device_attributes(&gdev->dev);
- if (rc)
+ if (rc) {
+ destroy_workqueue(card->cmd_wq);
return rc;
+ }
}
hash_init(card->ip_mc_htable);
+ INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work);
return 0;
}
@@ -2280,6 +2301,9 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
+
+ flush_workqueue(card->cmd_wq);
+ destroy_workqueue(card->cmd_wq);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
}
@@ -2293,12 +2317,11 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
- QETH_DBF_TEXT(SETUP, 2, "setonlin");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "setonlin");
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -2314,28 +2337,28 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
qeth_print_status_message(card);
/* softsetup */
- QETH_DBF_TEXT(SETUP, 2, "softsetp");
+ QETH_CARD_TEXT(card, 2, "softsetp");
rc = qeth_l3_setadapter_parms(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
if (!card->options.sniffer) {
rc = qeth_l3_start_ipassists(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
goto out_remove;
}
rc = qeth_l3_setrouting_v4(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "4err%04x", rc);
rc = qeth_l3_setrouting_v6(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "5err%04x", rc);
}
rc = qeth_init_qdio_queues(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -2376,7 +2399,6 @@ out_remove:
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
qdio_free(CARD_DDEV(card));
- card->state = CARD_STATE_DOWN;
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
@@ -2391,8 +2413,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
- QETH_DBF_TEXT(SETUP, 3, "setoffl");
- QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 3, "setoffl");
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -2418,7 +2439,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
if (!rc)
rc = (rc2) ? rc2 : rc3;
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
qdio_free(CARD_DDEV(card));
/* let user_space know that device is offline */
@@ -2461,33 +2482,6 @@ static int qeth_l3_recover(void *ptr)
return 0;
}
-static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-
- qeth_set_allowed_threads(card, 0, 1);
- wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (gdev->state == CCWGROUP_OFFLINE)
- return 0;
-
- qeth_l3_set_offline(gdev);
- return 0;
-}
-
-static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc;
-
- rc = qeth_l3_set_online(gdev);
-
- qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (rc)
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- return rc;
-}
-
/* Returns zero if the command is successfully "consumed" */
static int qeth_l3_control_event(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
@@ -2503,9 +2497,6 @@ struct qeth_discipline qeth_l3_discipline = {
.remove = qeth_l3_remove_device,
.set_online = qeth_l3_set_online,
.set_offline = qeth_l3_set_offline,
- .freeze = qeth_l3_pm_suspend,
- .thaw = qeth_l3_pm_resume,
- .restore = qeth_l3_pm_resume,
.do_ioctl = qeth_l3_do_ioctl,
.control_event_handler = qeth_l3_control_event,
};
@@ -2517,20 +2508,40 @@ static int qeth_l3_handle_ip_event(struct qeth_card *card,
{
switch (event) {
case NETDEV_UP:
- spin_lock_bh(&card->ip_lock);
- qeth_l3_add_ip(card, addr);
- spin_unlock_bh(&card->ip_lock);
+ qeth_l3_modify_ip(card, addr, true);
return NOTIFY_OK;
case NETDEV_DOWN:
- spin_lock_bh(&card->ip_lock);
- qeth_l3_delete_ip(card, addr);
- spin_unlock_bh(&card->ip_lock);
+ qeth_l3_modify_ip(card, addr, false);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
+struct qeth_l3_ip_event_work {
+ struct work_struct work;
+ struct qeth_card *card;
+ struct qeth_ipaddr addr;
+};
+
+#define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work)
+
+static void qeth_l3_add_ip_worker(struct work_struct *work)
+{
+ struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
+
+ qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true);
+ kfree(work);
+}
+
+static void qeth_l3_delete_ip_worker(struct work_struct *work)
+{
+ struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
+
+ qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false);
+ kfree(work);
+}
+
static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
{
if (is_vlan_dev(dev))
@@ -2575,9 +2586,12 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct net_device *dev = ifa->idev->dev;
- struct qeth_ipaddr addr;
+ struct qeth_l3_ip_event_work *ip_work;
struct qeth_card *card;
+ if (event != NETDEV_UP && event != NETDEV_DOWN)
+ return NOTIFY_DONE;
+
card = qeth_l3_get_card_from_dev(dev);
if (!card)
return NOTIFY_DONE;
@@ -2585,11 +2599,23 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
if (!qeth_is_supported(card, IPA_IPV6))
return NOTIFY_DONE;
- qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
- addr.u.a6.addr = ifa->addr;
- addr.u.a6.pfxlen = ifa->prefix_len;
+ ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC);
+ if (!ip_work)
+ return NOTIFY_DONE;
- return qeth_l3_handle_ip_event(card, &addr, event);
+ if (event == NETDEV_UP)
+ INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker);
+ else
+ INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker);
+
+ ip_work->card = card;
+ qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL,
+ QETH_PROT_IPV6);
+ ip_work->addr.u.a6.addr = ifa->addr;
+ ip_work->addr.u.a6.pfxlen = ifa->prefix_len;
+
+ queue_work(card->cmd_wq, &ip_work->work);
+ return NOTIFY_OK;
}
static struct notifier_block qeth_l3_ip6_notifier = {
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index cff518b0f904..2f73b33c9347 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -206,7 +206,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
if (!card)
return -EINVAL;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
return -EPERM;
if (card->options.cq == QETH_CQ_ENABLED)
return -EPERM;
@@ -258,7 +258,7 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
if (!card)
return -EINVAL;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
return -EPERM;
memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
@@ -276,7 +276,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
if (!card)
return -EINVAL;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
return -EPERM;
if (card->state != CARD_STATE_DOWN)
return -EPERM;
@@ -367,9 +367,9 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
if (card->ipato.enabled != enable) {
card->ipato.enabled = enable;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
out:
mutex_unlock(&card->conf_mutex);
@@ -412,9 +412,9 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
if (card->ipato.invert4 != invert) {
card->ipato.invert4 = invert;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
out:
mutex_unlock(&card->conf_mutex);
@@ -436,7 +436,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
/* add strlen for "/<mask>\n" */
entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
@@ -449,7 +449,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
i += snprintf(buf + i, PAGE_SIZE - i,
"%s/%i\n", addr_str, ipatoe->mask_bits);
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
@@ -598,9 +598,9 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
if (card->ipato.invert6 != invert) {
card->ipato.invert6 = invert;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
out:
mutex_unlock(&card->conf_mutex);
@@ -684,7 +684,7 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto || ipaddr->type != type)
continue;
@@ -698,7 +698,7 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
addr_str);
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
return str_len;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c6acca521ffe..31e8a7240fd7 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -167,6 +167,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
/* zfcp_unit.c */
extern int zfcp_unit_add(struct zfcp_port *, u64);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 33eddb02ee30..b018b61bd168 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -620,7 +620,7 @@ static void zfcp_fc_sg_free_table(struct scatterlist *sg, int count)
{
int i;
- for (i = 0; i < count; i++, sg++)
+ for (i = 0; i < count; i++, sg = sg_next(sg))
if (sg)
free_page((unsigned long) sg_virt(sg));
else
@@ -641,7 +641,7 @@ static int zfcp_fc_sg_setup_table(struct scatterlist *sg, int count)
int i;
sg_init_table(sg, count);
- for (i = 0; i < count; i++, sg++) {
+ for (i = 0; i < count; i++, sg = sg_next(sg)) {
addr = (void *) get_zeroed_page(GFP_KERNEL);
if (!addr) {
zfcp_fc_sg_free_table(sg, i);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 221d0dfb8493..e9ded2befa0d 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -129,6 +129,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
zfcp_sdev->erp_action.port = port;
+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (zfcp_sysfs_port_is_removing(port)) {
+ /* port is already gone */
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
+ return -ENXIO;
+ }
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index b277be6f7611..af197e2b3e69 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -235,6 +235,53 @@ static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
+static void zfcp_sysfs_port_set_removing(struct zfcp_port *const port)
+{
+ lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
+ atomic_set(&port->units, -1);
+}
+
+bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port)
+{
+ lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
+ return atomic_read(&port->units) == -1;
+}
+
+static bool zfcp_sysfs_port_in_use(struct zfcp_port *const port)
+{
+ struct zfcp_adapter *const adapter = port->adapter;
+ unsigned long flags;
+ struct scsi_device *sdev;
+ bool in_use = true;
+
+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (atomic_read(&port->units) > 0)
+ goto unlock_port_units_mutex; /* zfcp_unit(s) under port */
+
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host) {
+ const struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
+
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL)
+ continue;
+ if (zsdev->port != port)
+ continue;
+ /* alive scsi_device under port of interest */
+ goto unlock_host_lock;
+ }
+
+ /* port is about to be removed, so no more unit_add or slave_alloc */
+ zfcp_sysfs_port_set_removing(port);
+ in_use = false;
+
+unlock_host_lock:
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+unlock_port_units_mutex:
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ return in_use;
+}
+
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -257,15 +304,11 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
else
retval = 0;
- mutex_lock(&zfcp_sysfs_port_units_mutex);
- if (atomic_read(&port->units) > 0) {
+ if (zfcp_sysfs_port_in_use(port)) {
retval = -EBUSY;
- mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
goto out;
}
- /* port is about to be removed, so no more unit_add */
- atomic_set(&port->units, -1);
- mutex_unlock(&zfcp_sysfs_port_units_mutex);
write_lock_irq(&adapter->port_list_lock);
list_del(&port->list);
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 1bf0a0984a09..e67bf7388cae 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -124,7 +124,7 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
int retval = 0;
mutex_lock(&zfcp_sysfs_port_units_mutex);
- if (atomic_read(&port->units) == -1) {
+ if (zfcp_sysfs_port_is_removing(port)) {
/* port is already gone */
retval = -ENODEV;
goto out;
@@ -168,8 +168,14 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);
+ /*
+ * lock order: shost->scan_mutex before zfcp_sysfs_port_units_mutex
+ * due to zfcp_unit_scsi_scan() => zfcp_scsi_slave_alloc()
+ */
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
zfcp_unit_scsi_scan(unit);
+ return retval;
out:
mutex_unlock(&zfcp_sysfs_port_units_mutex);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 74c328321889..1a55e5942d36 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -46,9 +46,15 @@ struct vq_config_block {
#define VIRTIO_CCW_CONFIG_SIZE 0x100
/* same as PCI config space size, should be enough for all drivers */
+struct vcdev_dma_area {
+ unsigned long indicators;
+ unsigned long indicators2;
+ struct vq_config_block config_block;
+ __u8 status;
+};
+
struct virtio_ccw_device {
struct virtio_device vdev;
- __u8 *status;
__u8 config[VIRTIO_CCW_CONFIG_SIZE];
struct ccw_device *cdev;
__u32 curr_io;
@@ -58,16 +64,24 @@ struct virtio_ccw_device {
spinlock_t lock;
struct mutex io_lock; /* Serializes I/O requests */
struct list_head virtqueues;
- unsigned long indicators;
- unsigned long indicators2;
- struct vq_config_block *config_block;
bool is_thinint;
bool going_away;
bool device_lost;
unsigned int config_ready;
void *airq_info;
+ struct vcdev_dma_area *dma_area;
};
+static inline unsigned long *indicators(struct virtio_ccw_device *vcdev)
+{
+ return &vcdev->dma_area->indicators;
+}
+
+static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev)
+{
+ return &vcdev->dma_area->indicators2;
+}
+
struct vq_info_block_legacy {
__u64 queue;
__u32 align;
@@ -108,7 +122,6 @@ struct virtio_rev_info {
struct virtio_ccw_vq_info {
struct virtqueue *vq;
int num;
- void *queue;
union {
struct vq_info_block s;
struct vq_info_block_legacy l;
@@ -127,11 +140,17 @@ static int virtio_ccw_use_airq = 1;
struct airq_info {
rwlock_t lock;
- u8 summary_indicator;
+ u8 summary_indicator_idx;
struct airq_struct airq;
struct airq_iv *aiv;
};
static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+static u8 *summary_indicators;
+
+static inline u8 *get_summary_indicator(struct airq_info *info)
+{
+ return summary_indicators + info->summary_indicator_idx;
+}
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
@@ -182,7 +201,7 @@ static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
write_unlock_irqrestore(&info->lock, flags);
}
-static void virtio_airq_handler(struct airq_struct *airq)
+static void virtio_airq_handler(struct airq_struct *airq, bool floating)
{
struct airq_info *info = container_of(airq, struct airq_info, airq);
unsigned long ai;
@@ -196,7 +215,7 @@ static void virtio_airq_handler(struct airq_struct *airq)
break;
vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
}
- info->summary_indicator = 0;
+ *(get_summary_indicator(info)) = 0;
smp_wmb();
/* Walk through indicators field, summary indicator not active. */
for (ai = 0;;) {
@@ -208,7 +227,7 @@ static void virtio_airq_handler(struct airq_struct *airq)
read_unlock(&info->lock);
}
-static struct airq_info *new_airq_info(void)
+static struct airq_info *new_airq_info(int index)
{
struct airq_info *info;
int rc;
@@ -217,13 +236,15 @@ static struct airq_info *new_airq_info(void)
if (!info)
return NULL;
rwlock_init(&info->lock);
- info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
+ info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR
+ | AIRQ_IV_CACHELINE);
if (!info->aiv) {
kfree(info);
return NULL;
}
info->airq.handler = virtio_airq_handler;
- info->airq.lsi_ptr = &info->summary_indicator;
+ info->summary_indicator_idx = index;
+ info->airq.lsi_ptr = get_summary_indicator(info);
info->airq.lsi_mask = 0xff;
info->airq.isc = VIRTIO_AIRQ_ISC;
rc = register_adapter_interrupt(&info->airq);
@@ -245,7 +266,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
if (!airq_areas[i])
- airq_areas[i] = new_airq_info();
+ airq_areas[i] = new_airq_info(i);
info = airq_areas[i];
if (!info)
return 0;
@@ -326,29 +347,29 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
struct airq_info *airq_info = vcdev->airq_info;
if (vcdev->is_thinint) {
- thinint_area = kzalloc(sizeof(*thinint_area),
- GFP_DMA | GFP_KERNEL);
+ thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*thinint_area));
if (!thinint_area)
return;
thinint_area->summary_indicator =
- (unsigned long) &airq_info->summary_indicator;
+ (unsigned long) get_summary_indicator(airq_info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->count = sizeof(*thinint_area);
ccw->cda = (__u32)(unsigned long) thinint_area;
} else {
/* payload is the address of the indicators */
- indicatorp = kmalloc(sizeof(&vcdev->indicators),
- GFP_DMA | GFP_KERNEL);
+ indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(indicators(vcdev)));
if (!indicatorp)
return;
*indicatorp = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
- ccw->count = sizeof(&vcdev->indicators);
+ ccw->count = sizeof(indicators(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
}
/* Deregister indicators from host. */
- vcdev->indicators = 0;
+ *indicators(vcdev) = 0;
ccw->flags = 0;
ret = ccw_io_helper(vcdev, ccw,
vcdev->is_thinint ?
@@ -359,8 +380,8 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
"Failed to deregister indicators (%d)\n", ret);
else if (vcdev->is_thinint)
virtio_ccw_drop_indicators(vcdev);
- kfree(indicatorp);
- kfree(thinint_area);
+ ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
}
static inline long __do_kvm_notify(struct subchannel_id schid,
@@ -407,15 +428,15 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
{
int ret;
- vcdev->config_block->index = index;
+ vcdev->dma_area->config_block.index = index;
ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
ccw->flags = 0;
ccw->count = sizeof(struct vq_config_block);
- ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
+ ccw->cda = (__u32)(unsigned long)(&vcdev->dma_area->config_block);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
- return vcdev->config_block->num ?: -ENOENT;
+ return vcdev->dma_area->config_block.num ?: -ENOENT;
}
static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
@@ -423,7 +444,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
struct virtio_ccw_vq_info *info = vq->priv;
unsigned long flags;
- unsigned long size;
int ret;
unsigned int index = vq->index;
@@ -461,9 +481,8 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
ret, index);
vring_del_virtqueue(vq);
- size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
- free_pages_exact(info->queue, size);
- kfree(info->info_block);
+ ccw_device_dma_free(vcdev->cdev, info->info_block,
+ sizeof(*info->info_block));
kfree(info);
}
@@ -473,7 +492,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
struct ccw1 *ccw;
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
@@ -482,7 +501,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
virtio_ccw_del_vq(vq, ccw);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
@@ -494,8 +513,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
int err;
struct virtqueue *vq = NULL;
struct virtio_ccw_vq_info *info;
- unsigned long size = 0; /* silence the compiler */
+ u64 queue;
unsigned long flags;
+ bool may_reduce;
/* Allocate queue. */
info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
@@ -504,8 +524,8 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = -ENOMEM;
goto out_err;
}
- info->info_block = kzalloc(sizeof(*info->info_block),
- GFP_DMA | GFP_KERNEL);
+ info->info_block = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*info->info_block));
if (!info->info_block) {
dev_warn(&vcdev->cdev->dev, "no info block\n");
err = -ENOMEM;
@@ -516,37 +536,34 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = info->num;
goto out_err;
}
- size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
- info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
- if (info->queue == NULL) {
- dev_warn(&vcdev->cdev->dev, "no queue\n");
- err = -ENOMEM;
- goto out_err;
- }
+ may_reduce = vcdev->revision > 0;
+ vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN,
+ vdev, true, may_reduce, ctx,
+ virtio_ccw_kvm_notify, callback, name);
- vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
- true, ctx, info->queue, virtio_ccw_kvm_notify,
- callback, name);
if (!vq) {
/* For now, we fail if we can't get the requested size. */
dev_warn(&vcdev->cdev->dev, "no vq\n");
err = -ENOMEM;
goto out_err;
}
+ /* it may have been reduced */
+ info->num = virtqueue_get_vring_size(vq);
/* Register it with the host. */
+ queue = virtqueue_get_desc_addr(vq);
if (vcdev->revision == 0) {
- info->info_block->l.queue = (__u64)info->queue;
+ info->info_block->l.queue = queue;
info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
info->info_block->l.index = i;
info->info_block->l.num = info->num;
ccw->count = sizeof(info->info_block->l);
} else {
- info->info_block->s.desc = (__u64)info->queue;
+ info->info_block->s.desc = queue;
info->info_block->s.index = i;
info->info_block->s.num = info->num;
- info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
- info->info_block->s.used = (__u64)virtqueue_get_used(vq);
+ info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq);
+ info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq);
ccw->count = sizeof(info->info_block->s);
}
ccw->cmd_code = CCW_CMD_SET_VQ;
@@ -572,9 +589,8 @@ out_err:
if (vq)
vring_del_virtqueue(vq);
if (info) {
- if (info->queue)
- free_pages_exact(info->queue, size);
- kfree(info->info_block);
+ ccw_device_dma_free(vcdev->cdev, info->info_block,
+ sizeof(*info->info_block));
}
kfree(info);
return ERR_PTR(err);
@@ -588,7 +604,8 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
struct virtio_thinint_area *thinint_area = NULL;
struct airq_info *info;
- thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
+ thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*thinint_area));
if (!thinint_area) {
ret = -ENOMEM;
goto out;
@@ -603,7 +620,7 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
}
info = vcdev->airq_info;
thinint_area->summary_indicator =
- (unsigned long) &info->summary_indicator;
+ (unsigned long) get_summary_indicator(info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->flags = CCW_FLAG_SLI;
@@ -624,7 +641,7 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
virtio_ccw_drop_indicators(vcdev);
}
out:
- kfree(thinint_area);
+ ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
return ret;
}
@@ -640,7 +657,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
@@ -664,10 +681,11 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* We need a data area under 2G to communicate. Our payload is
* the address of the indicators.
*/
- indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
+ indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(indicators(vcdev)));
if (!indicatorp)
goto out;
- *indicatorp = (unsigned long) &vcdev->indicators;
+ *indicatorp = (unsigned long) indicators(vcdev);
if (vcdev->is_thinint) {
ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
if (ret)
@@ -676,32 +694,36 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
}
if (!vcdev->is_thinint) {
/* Register queue indicators with host. */
- vcdev->indicators = 0;
+ *indicators(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
- ccw->count = sizeof(&vcdev->indicators);
+ ccw->count = sizeof(indicators(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
goto out;
}
/* Register indicators2 with host for config changes */
- *indicatorp = (unsigned long) &vcdev->indicators2;
- vcdev->indicators2 = 0;
+ *indicatorp = (unsigned long) indicators2(vcdev);
+ *indicators2(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_CONF_IND;
ccw->flags = 0;
- ccw->count = sizeof(&vcdev->indicators2);
+ ccw->count = sizeof(indicators2(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
if (ret)
goto out;
- kfree(indicatorp);
- kfree(ccw);
+ if (indicatorp)
+ ccw_device_dma_free(vcdev->cdev, indicatorp,
+ sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return 0;
out:
- kfree(indicatorp);
- kfree(ccw);
+ if (indicatorp)
+ ccw_device_dma_free(vcdev->cdev, indicatorp,
+ sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
virtio_ccw_del_vqs(vdev);
return ret;
}
@@ -711,12 +733,12 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
/* Zero status bits. */
- *vcdev->status = 0;
+ vcdev->dma_area->status = 0;
/* Send a reset ccw on device. */
ccw->cmd_code = CCW_CMD_VDEV_RESET;
@@ -724,7 +746,7 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
ccw->count = 0;
ccw->cda = 0;
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static u64 virtio_ccw_get_features(struct virtio_device *vdev)
@@ -735,11 +757,11 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
u64 rc;
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return 0;
- features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
if (!features) {
rc = 0;
goto out_free;
@@ -772,20 +794,16 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
rc |= (u64)le32_to_cpu(features->features) << 32;
out_free:
- kfree(features);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return rc;
}
static void ccw_transport_features(struct virtio_device *vdev)
{
/*
- * Packed ring isn't enabled on virtio_ccw for now,
- * because virtio_ccw uses some legacy accessors,
- * e.g. virtqueue_get_avail() and virtqueue_get_used()
- * which aren't available in packed ring currently.
+ * Currently nothing to do here.
*/
- __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
}
static int virtio_ccw_finalize_features(struct virtio_device *vdev)
@@ -802,11 +820,11 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
- features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
if (!features) {
ret = -ENOMEM;
goto out_free;
@@ -841,8 +859,8 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
out_free:
- kfree(features);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return ret;
}
@@ -856,11 +874,12 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
- config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ config_area = ccw_device_dma_zalloc(vcdev->cdev,
+ VIRTIO_CCW_CONFIG_SIZE);
if (!config_area)
goto out_free;
@@ -882,8 +901,8 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
memcpy(buf, config_area + offset, len);
out_free:
- kfree(config_area);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static void virtio_ccw_set_config(struct virtio_device *vdev,
@@ -895,11 +914,12 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
- config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ config_area = ccw_device_dma_zalloc(vcdev->cdev,
+ VIRTIO_CCW_CONFIG_SIZE);
if (!config_area)
goto out_free;
@@ -918,61 +938,61 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
out_free:
- kfree(config_area);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static u8 virtio_ccw_get_status(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- u8 old_status = *vcdev->status;
+ u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw;
if (vcdev->revision < 1)
- return *vcdev->status;
+ return vcdev->dma_area->status;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return old_status;
ccw->cmd_code = CCW_CMD_READ_STATUS;
ccw->flags = 0;
- ccw->count = sizeof(*vcdev->status);
- ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw->count = sizeof(vcdev->dma_area->status);
+ ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
/*
* If the channel program failed (should only happen if the device
* was hotunplugged, and then we clean up via the machine check
- * handler anyway), vcdev->status was not overwritten and we just
+ * handler anyway), vcdev->dma_area->status was not overwritten and we just
* return the old status, which is fine.
*/
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
- return *vcdev->status;
+ return vcdev->dma_area->status;
}
static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- u8 old_status = *vcdev->status;
+ u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw;
int ret;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
/* Write the status to the host. */
- *vcdev->status = status;
+ vcdev->dma_area->status = status;
ccw->cmd_code = CCW_CMD_WRITE_STATUS;
ccw->flags = 0;
ccw->count = sizeof(status);
- ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
/* Write failed? We assume status is unchanged. */
if (ret)
- *vcdev->status = old_status;
- kfree(ccw);
+ vcdev->dma_area->status = old_status;
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
@@ -1005,8 +1025,8 @@ static void virtio_ccw_release_dev(struct device *_d)
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_ccw_device *vcdev = to_vc_device(dev);
- kfree(vcdev->status);
- kfree(vcdev->config_block);
+ ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
+ sizeof(*vcdev->dma_area));
kfree(vcdev);
}
@@ -1104,17 +1124,17 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
vcdev->err = -EIO;
}
virtio_ccw_check_activity(vcdev, activity);
- for_each_set_bit(i, &vcdev->indicators,
- sizeof(vcdev->indicators) * BITS_PER_BYTE) {
+ for_each_set_bit(i, indicators(vcdev),
+ sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
/* The bit clear must happen before the vring kick. */
- clear_bit(i, &vcdev->indicators);
+ clear_bit(i, indicators(vcdev));
barrier();
vq = virtio_ccw_vq_by_ind(vcdev, i);
vring_interrupt(0, vq);
}
- if (test_bit(0, &vcdev->indicators2)) {
+ if (test_bit(0, indicators2(vcdev))) {
virtio_config_changed(&vcdev->vdev);
- clear_bit(0, &vcdev->indicators2);
+ clear_bit(0, indicators2(vcdev));
}
}
@@ -1214,12 +1234,12 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
struct ccw1 *ccw;
int ret;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
- rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
+ rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev));
if (!rev) {
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return -ENOMEM;
}
@@ -1249,8 +1269,8 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
}
} while (ret == -EOPNOTSUPP);
- kfree(ccw);
- kfree(rev);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ ccw_device_dma_free(vcdev->cdev, rev, sizeof(*rev));
return ret;
}
@@ -1266,24 +1286,19 @@ static int virtio_ccw_online(struct ccw_device *cdev)
ret = -ENOMEM;
goto out_free;
}
- vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
- GFP_DMA | GFP_KERNEL);
- if (!vcdev->config_block) {
- ret = -ENOMEM;
- goto out_free;
- }
- vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
- if (!vcdev->status) {
+ vcdev->vdev.dev.parent = &cdev->dev;
+ vcdev->cdev = cdev;
+ vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*vcdev->dma_area));
+ if (!vcdev->dma_area) {
ret = -ENOMEM;
goto out_free;
}
vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
- vcdev->vdev.dev.parent = &cdev->dev;
vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops;
- vcdev->cdev = cdev;
init_waitqueue_head(&vcdev->wait_q);
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
@@ -1314,8 +1329,8 @@ out_put:
return ret;
out_free:
if (vcdev) {
- kfree(vcdev->status);
- kfree(vcdev->config_block);
+ ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
+ sizeof(*vcdev->dma_area));
}
kfree(vcdev);
return ret;
@@ -1485,8 +1500,17 @@ static void __init no_auto_parse(void)
static int __init virtio_ccw_init(void)
{
+ int rc;
+
/* parse no_auto string before we do anything further */
no_auto_parse();
- return ccw_driver_register(&virtio_ccw_driver);
+
+ summary_indicators = cio_dma_zalloc(MAX_AIRQ_AREAS);
+ if (!summary_indicators)
+ return -ENOMEM;
+ rc = ccw_driver_register(&virtio_ccw_driver);
+ if (rc)
+ cio_dma_free(summary_indicators, MAX_AIRQ_AREAS);
+ return rc;
}
device_initcall(virtio_ccw_init);