aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/block/dasd_devmap.c44
-rw-r--r--drivers/s390/block/dasd_diag.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c94
-rw-r--r--drivers/s390/block/dasd_eckd.h7
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_int.h8
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/tape_block.c8
-rw-r--r--drivers/s390/cio/ccwreq.c16
-rw-r--r--drivers/s390/cio/chsc.c48
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/device.c47
-rw-r--r--drivers/s390/cio/device_pgid.c3
-rw-r--r--drivers/s390/cio/io_sch.h10
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/net/claw.c120
-rw-r--r--drivers/s390/net/claw.h4
-rw-r--r--drivers/s390/net/ctcm_fsms.c60
-rw-r--r--drivers/s390/net/ctcm_main.c80
-rw-r--r--drivers/s390/net/ctcm_main.h4
-rw-r--r--drivers/s390/net/ctcm_mpc.c64
-rw-r--r--drivers/s390/net/ctcm_sysfs.c20
-rw-r--r--drivers/s390/net/qeth_core.h27
-rw-r--r--drivers/s390/net/qeth_core_main.c423
-rw-r--r--drivers/s390/net/qeth_core_mpc.h5
-rw-r--r--drivers/s390/net/qeth_core_sys.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c108
-rw-r--r--drivers/s390/net/qeth_l3.h1
-rw-r--r--drivers/s390/net/qeth_l3_main.c262
-rw-r--r--drivers/s390/net/qeth_l3_sys.c14
-rw-r--r--drivers/s390/net/smsgiucv.c11
-rw-r--r--drivers/s390/net/smsgiucv_app.c7
-rw-r--r--drivers/s390/scsi/zfcp_aux.c10
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c5
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h5
-rw-r--r--drivers/s390/scsi/zfcp_erp.c24
-rw-r--r--drivers/s390/scsi/zfcp_ext.h11
-rw-r--r--drivers/s390/scsi/zfcp_fc.c54
-rw-r--r--drivers/s390/scsi/zfcp_fc.h27
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c169
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h34
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c206
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h95
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c103
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c12
51 files changed, 1422 insertions, 883 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 33975e922d65..8373ca0de8e0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -21,6 +21,7 @@
#include <linux/hdreg.h>
#include <linux/async.h>
#include <linux/mutex.h>
+#include <linux/smp_lock.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
@@ -1324,14 +1325,14 @@ static void __dasd_device_check_expire(struct dasd_device *device)
if (device->discipline->term_IO(cqr) != 0) {
/* Hmpf, try again in 5 sec */
dev_err(&device->cdev->dev,
- "cqr %p timed out (%is) but cannot be "
+ "cqr %p timed out (%lus) but cannot be "
"ended, retrying in 5 s\n",
cqr, (cqr->expires/HZ));
cqr->expires += 5*HZ;
dasd_device_set_timer(device, 5*HZ);
} else {
dev_err(&device->cdev->dev,
- "cqr %p timed out (%is), %i retries "
+ "cqr %p timed out (%lus), %i retries "
"remaining\n", cqr, (cqr->expires/HZ),
cqr->retries);
}
@@ -2196,7 +2197,7 @@ static void dasd_setup_queue(struct dasd_block *block)
*/
blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
- blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
+ blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN);
}
/*
@@ -2235,6 +2236,7 @@ static int dasd_open(struct block_device *bdev, fmode_t mode)
if (!block)
return -ENODEV;
+ lock_kernel();
base = block->base;
atomic_inc(&block->open_count);
if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
@@ -2269,12 +2271,14 @@ static int dasd_open(struct block_device *bdev, fmode_t mode)
goto out;
}
+ unlock_kernel();
return 0;
out:
module_put(base->discipline->owner);
unlock:
atomic_dec(&block->open_count);
+ unlock_kernel();
return rc;
}
@@ -2282,8 +2286,10 @@ static int dasd_release(struct gendisk *disk, fmode_t mode)
{
struct dasd_block *block = disk->private_data;
+ lock_kernel();
atomic_dec(&block->open_count);
module_put(block->base->discipline->owner);
+ unlock_kernel();
return 0;
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index bed7b4634ccd..8d41f3ed38d7 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1083,6 +1083,49 @@ dasd_eer_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
+/*
+ * expiration time for default requests
+ */
+static ssize_t
+dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_expires_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned long val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((strict_strtoul(buf, 10, &val) != 0) ||
+ (val > DASD_EXPIRES_MAX) || val == 0) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+
+ if (val)
+ device->default_expires = val;
+
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
+
static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr,
&dev_attr_discipline.attr,
@@ -1094,6 +1137,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_eer_enabled.attr,
&dev_attr_erplog.attr,
&dev_attr_failfast.attr,
+ &dev_attr_expires.attr,
NULL,
};
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 687f323cdc38..2b3bc3ec0541 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
sizeof(struct dasd_diag_req)) / \
sizeof(struct dasd_diag_bio)) / 2)
#define DIAG_MAX_RETRIES 32
-#define DIAG_TIMEOUT 50 * HZ
+#define DIAG_TIMEOUT 50
static struct dasd_discipline dasd_diag_discipline;
@@ -360,6 +360,8 @@ dasd_diag_check_device(struct dasd_device *device)
goto out;
}
+ device->default_expires = DIAG_TIMEOUT;
+
/* Figure out position of label block */
switch (private->rdc_data.vdev_class) {
case DEV_CLASS_FBA:
@@ -563,7 +565,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
cqr->startdev = memdev;
cqr->memdev = memdev;
cqr->block = block;
- cqr->expires = DIAG_TIMEOUT;
+ cqr->expires = memdev->default_expires * HZ;
cqr->status = DASD_CQR_FILLED;
return cqr;
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ab84da5592e8..66360c24bd48 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -82,6 +82,14 @@ static struct ccw_driver dasd_eckd_driver; /* see below */
#define INIT_CQR_UNFORMATTED 1
#define INIT_CQR_ERROR 2
+/* emergency request for reserve/release */
+static struct {
+ struct dasd_ccw_req cqr;
+ struct ccw1 ccw;
+ char data[32];
+} *dasd_reserve_req;
+static DEFINE_MUTEX(dasd_reserve_mutex);
+
/* initial attempt at a probe function. this can be simplified once
* the other detection code is gone */
@@ -1107,8 +1115,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
struct dasd_eckd_private *private;
struct dasd_block *block;
struct dasd_uid temp_uid;
- int is_known, rc;
+ int is_known, rc, i;
int readonly;
+ unsigned long value;
if (!ccw_device_is_pathgroup(device->cdev)) {
dev_warn(&device->cdev->dev,
@@ -1143,6 +1152,18 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err1;
+ /* set default timeout */
+ device->default_expires = DASD_EXPIRES;
+ if (private->gneq) {
+ value = 1;
+ for (i = 0; i < private->gneq->timeout.value; i++)
+ value = 10 * value;
+ value = value * private->gneq->timeout.number;
+ /* do not accept useless values */
+ if (value != 0 && value <= DASD_EXPIRES_MAX)
+ device->default_expires = value;
+ }
+
/* Generate device unique id */
rc = dasd_eckd_generate_uid(device);
if (rc)
@@ -1973,7 +1994,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
- cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = private->path_data.ppm;
cqr->retries = 256;
cqr->buildclk = get_clock();
@@ -2150,7 +2171,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
- cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = private->path_data.ppm;
cqr->retries = 256;
cqr->buildclk = get_clock();
@@ -2398,7 +2419,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
- cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = private->path_data.ppm;
cqr->retries = 256;
cqr->buildclk = get_clock();
@@ -2645,15 +2666,23 @@ dasd_eckd_release(struct dasd_device *device)
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
+ int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
- DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "Could not allocate initialization request");
- return PTR_ERR(cqr);
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
@@ -2671,7 +2700,10 @@ dasd_eckd_release(struct dasd_device *device)
rc = dasd_sleep_on_immediatly(cqr);
- dasd_sfree_request(cqr, cqr->memdev);
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
@@ -2687,15 +2719,23 @@ dasd_eckd_reserve(struct dasd_device *device)
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
+ int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
- DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "Could not allocate initialization request");
- return PTR_ERR(cqr);
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
@@ -2713,7 +2753,10 @@ dasd_eckd_reserve(struct dasd_device *device)
rc = dasd_sleep_on_immediatly(cqr);
- dasd_sfree_request(cqr, cqr->memdev);
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
@@ -2728,15 +2771,23 @@ dasd_eckd_steal_lock(struct dasd_device *device)
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
+ int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
- DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "Could not allocate initialization request");
- return PTR_ERR(cqr);
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_SLCK;
@@ -2754,7 +2805,10 @@ dasd_eckd_steal_lock(struct dasd_device *device)
rc = dasd_sleep_on_immediatly(cqr);
- dasd_sfree_request(cqr, cqr->memdev);
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
@@ -3488,10 +3542,15 @@ dasd_eckd_init(void)
int ret;
ASCEBC(dasd_eckd_discipline.ebcname, 4);
+ dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
+ GFP_KERNEL | GFP_DMA);
+ if (!dasd_reserve_req)
+ return -ENOMEM;
ret = ccw_driver_register(&dasd_eckd_driver);
if (!ret)
wait_for_device_probe();
-
+ else
+ kfree(dasd_reserve_req);
return ret;
}
@@ -3499,6 +3558,7 @@ static void __exit
dasd_eckd_cleanup(void)
{
ccw_driver_unregister(&dasd_eckd_driver);
+ kfree(dasd_reserve_req);
}
module_init(dasd_eckd_init);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index dd6385a5af14..0eb49655a6cd 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -320,7 +320,12 @@ struct dasd_gneq {
__u8 identifier:2;
__u8 reserved:6;
} __attribute__ ((packed)) flags;
- __u8 reserved[7];
+ __u8 reserved[5];
+ struct {
+ __u8 value:2;
+ __u8 number:6;
+ } __attribute__ ((packed)) timeout;
+ __u8 reserved3;
__u16 subsystemID;
__u8 reserved2[22];
} __attribute__ ((packed));
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index dd88803e4899..7158f9528ecc 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -701,7 +701,7 @@ int __init dasd_eer_init(void)
void dasd_eer_exit(void)
{
if (dasd_eer_dev) {
- WARN_ON(misc_deregister(dasd_eer_dev) != 0);
+ misc_deregister(dasd_eer_dev);
kfree(dasd_eer_dev);
dasd_eer_dev = NULL;
}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 37282b90eecc..bec5486e0e6d 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -163,6 +163,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
return rc;
}
+ device->default_expires = DASD_EXPIRES;
+
readonly = dasd_device_is_ro(device);
if (readonly)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
@@ -370,7 +372,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
cqr->startdev = memdev;
cqr->memdev = memdev;
cqr->block = block;
- cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
cqr->retries = 32;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 49b431d135e0..500678d7116c 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -186,7 +186,7 @@ struct dasd_ccw_req {
/* ... and how */
unsigned long starttime; /* jiffies time of request start */
- int expires; /* expiration period in jiffies */
+ unsigned long expires; /* expiration period in jiffies */
char lpm; /* logical path mask */
void *data; /* pointer to data area */
@@ -224,6 +224,9 @@ struct dasd_ccw_req {
#define DASD_CQR_CLEARED 0x84 /* request was cleared */
#define DASD_CQR_SUCCESS 0x85 /* request was successful */
+/* default expiration time*/
+#define DASD_EXPIRES 300
+#define DASD_EXPIRES_MAX 40000000
/* per dasd_ccw_req flags */
#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
@@ -404,6 +407,9 @@ struct dasd_device {
/* hook for alias management */
struct list_head alias_list;
+
+ /* default expiration time in s */
+ unsigned long default_expires;
};
struct dasd_block {
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 9b43ae94beba..2bd72aa34c59 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -775,6 +776,7 @@ dcssblk_open(struct block_device *bdev, fmode_t mode)
struct dcssblk_dev_info *dev_info;
int rc;
+ lock_kernel();
dev_info = bdev->bd_disk->private_data;
if (NULL == dev_info) {
rc = -ENODEV;
@@ -784,6 +786,7 @@ dcssblk_open(struct block_device *bdev, fmode_t mode)
bdev->bd_block_size = 4096;
rc = 0;
out:
+ unlock_kernel();
return rc;
}
@@ -794,6 +797,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
struct segment_info *entry;
int rc;
+ lock_kernel();
if (!dev_info) {
rc = -ENODEV;
goto out;
@@ -811,6 +815,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
up_write(&dcssblk_devices_sem);
rc = 0;
out:
+ unlock_kernel();
return rc;
}
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 2ed3f82e5c30..e021ec663ef9 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -627,7 +627,7 @@ out_iucv:
static void __exit mon_exit(void)
{
segment_unload(mon_dcss_name);
- WARN_ON(misc_deregister(&mon_dev) != 0);
+ misc_deregister(&mon_dev);
device_unregister(monreader_device);
driver_unregister(&monreader_driver);
iucv_unregister(&monreader_iucv_handler, 1);
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 98a49dfda1de..572a1e7fd099 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -380,7 +380,7 @@ out_driver:
static void __exit mon_exit(void)
{
- WARN_ON(misc_deregister(&mon_dev) != 0);
+ misc_deregister(&mon_dev);
platform_device_unregister(monwriter_pdev);
platform_driver_unregister(&monwriter_pdrv);
}
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 097da8ce6be6..b7de02525ec9 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/buffer_head.h>
#include <linux/kernel.h>
@@ -361,6 +362,7 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
struct tape_device * device;
int rc;
+ lock_kernel();
device = tape_get_device(disk->private_data);
if (device->required_tapemarks) {
@@ -384,12 +386,14 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
* is called.
*/
tape_state_set(device, TS_BLKUSE);
+ unlock_kernel();
return 0;
release:
tape_release(device);
put_device:
tape_put_device(device);
+ unlock_kernel();
return rc;
}
@@ -403,10 +407,12 @@ static int
tapeblock_release(struct gendisk *disk, fmode_t mode)
{
struct tape_device *device = disk->private_data;
-
+
+ lock_kernel();
tape_state_set(device, TS_IN_USE);
tape_release(device);
tape_put_device(device);
+ unlock_kernel();
return 0;
}
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 7f206ed44fdf..d15f8b4d78bd 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -38,9 +38,13 @@ static u16 ccwreq_next_path(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
+ if (!req->singlepath) {
+ req->mask = 0;
+ goto out;
+ }
req->retries = req->maxretries;
req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
-
+out:
return req->mask;
}
@@ -113,8 +117,12 @@ void ccw_request_start(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
- /* Try all paths twice to counter link flapping. */
- req->mask = 0x8080;
+ if (req->singlepath) {
+ /* Try all paths twice to counter link flapping. */
+ req->mask = 0x8080;
+ } else
+ req->mask = req->lpm;
+
req->retries = req->maxretries;
req->mask = lpm_adjust(req->mask, req->lpm);
req->drc = 0;
@@ -182,6 +190,8 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
/* Ask the driver what to do */
if (cdev->drv && cdev->drv->uc_handler) {
todo = cdev->drv->uc_handler(cdev, lcirb);
+ CIO_TRACE_EVENT(2, "uc_response");
+ CIO_HEX_EVENT(2, &todo, sizeof(todo));
switch (todo) {
case UC_TODO_RETRY:
return IO_STATUS_ERROR;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 407d0e9adfaf..4cbb1a6ca33c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -29,6 +29,7 @@
#include "chsc.h"
static void *sei_page;
+static DEFINE_SPINLOCK(siosl_lock);
static DEFINE_SPINLOCK(sda_lock);
/**
@@ -48,6 +49,7 @@ int chsc_error_from_response(int response)
case 0x0007:
case 0x0008:
case 0x000a:
+ case 0x0104:
return -EINVAL;
case 0x0004:
return -EOPNOTSUPP;
@@ -974,3 +976,49 @@ int chsc_sstpi(void *page, void *result, size_t size)
return (rr->response.code == 0x0001) ? 0 : -EIO;
}
+static struct {
+ struct chsc_header request;
+ u32 word1;
+ struct subchannel_id sid;
+ u32 word3;
+ struct chsc_header response;
+ u32 word[11];
+} __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE)));
+
+int chsc_siosl(struct subchannel_id schid)
+{
+ unsigned long flags;
+ int ccode;
+ int rc;
+
+ spin_lock_irqsave(&siosl_lock, flags);
+ memset(&siosl_area, 0, sizeof(siosl_area));
+ siosl_area.request.length = 0x0010;
+ siosl_area.request.code = 0x0046;
+ siosl_area.word1 = 0x80000000;
+ siosl_area.sid = schid;
+
+ ccode = chsc(&siosl_area);
+ if (ccode > 0) {
+ if (ccode == 3)
+ rc = -ENODEV;
+ else
+ rc = -EBUSY;
+ CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
+ schid.ssid, schid.sch_no, ccode);
+ goto out;
+ }
+ rc = chsc_error_from_response(siosl_area.response.code);
+ if (rc)
+ CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
+ schid.ssid, schid.sch_no,
+ siosl_area.response.code);
+ else
+ CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
+ schid.ssid, schid.sch_no);
+out:
+ spin_unlock_irqrestore(&siosl_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(chsc_siosl);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 37aa611d4ac5..5453013f094b 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -80,4 +80,6 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_error_from_response(int response);
+int chsc_siosl(struct subchannel_id schid);
+
#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6d229f3523a0..51bd3687d163 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -36,6 +36,7 @@
#include "ioasm.h"
#include "io_sch.h"
#include "blacklist.h"
+#include "chsc.h"
static struct timer_list recovery_timer;
static DEFINE_SPINLOCK(recovery_lock);
@@ -486,9 +487,11 @@ static int online_store_handle_offline(struct ccw_device *cdev)
spin_lock_irq(cdev->ccwlock);
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
spin_unlock_irq(cdev->ccwlock);
- } else if (cdev->online && cdev->drv && cdev->drv->set_offline)
+ return 0;
+ }
+ if (cdev->drv && cdev->drv->set_offline)
return ccw_device_set_offline(cdev);
- return 0;
+ return -EINVAL;
}
static int online_store_recog_and_online(struct ccw_device *cdev)
@@ -505,8 +508,8 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
return -EAGAIN;
}
if (cdev->drv && cdev->drv->set_online)
- ccw_device_set_online(cdev);
- return 0;
+ return ccw_device_set_online(cdev);
+ return -EINVAL;
}
static int online_store_handle_online(struct ccw_device *cdev, int force)
@@ -598,6 +601,25 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf)
}
}
+static ssize_t
+initiate_logging(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ int rc;
+
+ rc = chsc_siosl(sch->schid);
+ if (rc < 0) {
+ pr_warning("Logging for subchannel 0.%x.%04x failed with "
+ "errno=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, rc);
+ return rc;
+ }
+ pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ return count;
+}
+
static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
@@ -605,10 +627,12 @@ static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static DEVICE_ATTR(online, 0644, online_show, online_store);
static DEVICE_ATTR(availability, 0444, available_show, NULL);
+static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
static struct attribute *io_subchannel_attrs[] = {
&dev_attr_chpids.attr,
&dev_attr_pimpampom.attr,
+ &dev_attr_logging.attr,
NULL,
};
@@ -2036,6 +2060,21 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
}
}
+/**
+ * ccw_device_siosl() - initiate logging
+ * @cdev: ccw device
+ *
+ * This function is used to invoke model-dependent logging within the channel
+ * subsystem.
+ */
+int ccw_device_siosl(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ return chsc_siosl(sch->schid);
+}
+EXPORT_SYMBOL_GPL(ccw_device_siosl);
+
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_online);
EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 6facb5499a65..82a5ad0d63f6 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -208,6 +208,7 @@ static void spid_start(struct ccw_device *cdev)
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = 0x80;
+ req->singlepath = 1;
req->callback = spid_callback;
spid_do(cdev);
}
@@ -420,6 +421,7 @@ static void verify_start(struct ccw_device *cdev)
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = 0x80;
+ req->singlepath = 1;
if (cdev->private->flags.pgroup) {
CIO_TRACE_EVENT(4, "snid");
CIO_HEX_EVENT(4, devid, sizeof(*devid));
@@ -507,6 +509,7 @@ void ccw_device_disband_start(struct ccw_device *cdev)
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->singlepath = 1;
req->callback = disband_callback;
fn = SPID_FUNC_DISBAND;
if (cdev->private->flags.mpath)
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index b9ce712a7f25..469ef93f2302 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -92,11 +92,12 @@ enum io_status {
* @filter: optional callback to adjust request status based on IRB data
* @callback: final callback
* @data: user-defined pointer passed to all callbacks
+ * @singlepath: if set, use only one path from @lpm per start I/O
+ * @cancel: non-zero if request was cancelled
+ * @done: non-zero if request was finished
* @mask: current path mask
* @retries: current number of retries
* @drc: delayed return code
- * @cancel: non-zero if request was cancelled
- * @done: non-zero if request was finished
*/
struct ccw_request {
struct ccw1 *cp;
@@ -108,12 +109,13 @@ struct ccw_request {
enum io_status);
void (*callback)(struct ccw_device *, void *, int);
void *data;
+ unsigned int singlepath:1;
/* These fields are used internally. */
+ unsigned int cancel:1;
+ unsigned int done:1;
u16 mask;
u16 retries;
int drc;
- int cancel:1;
- int done:1;
} __attribute__((packed));
/*
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 6326b67c45d2..34c7e4046df4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -368,6 +368,8 @@ static void setup_qib(struct qdio_irq *irq_ptr,
if (qebsm_possible())
irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+ irq_ptr->qib.rflags |= init_data->qib_rflags;
+
irq_ptr->qib.qfmt = init_data->q_format;
if (init_data->no_input_qs)
irq_ptr->qib.isliba =
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 147bb1a69aba..8e4153d740f3 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -295,7 +295,7 @@ claw_driver_group_store(struct device_driver *ddrv, const char *buf,
int err;
err = ccwgroup_create_from_string(claw_root_dev,
claw_group_driver.driver_id,
- &claw_ccw_driver, 3, buf);
+ &claw_ccw_driver, 2, buf);
return err ? err : count;
}
@@ -386,7 +386,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
struct chbk *p_ch;
CLAW_DBF_TEXT(4, trace, "claw_tx");
- p_ch=&privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
rc=claw_hw_tx( skb, dev, 1 );
spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
@@ -407,7 +407,7 @@ static struct sk_buff *
claw_pack_skb(struct claw_privbk *privptr)
{
struct sk_buff *new_skb,*held_skb;
- struct chbk *p_ch = &privptr->channel[WRITE];
+ struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
struct claw_env *p_env = privptr->p_env;
int pkt_cnt,pk_ind,so_far;
@@ -515,15 +515,15 @@ claw_open(struct net_device *dev)
privptr->p_env->write_size=CLAW_FRAME_SIZE;
}
claw_set_busy(dev);
- tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet,
- (unsigned long) &privptr->channel[READ]);
+ tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
+ (unsigned long) &privptr->channel[READ_CHANNEL]);
for ( i = 0; i < 2; i++) {
CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
init_waitqueue_head(&privptr->channel[i].wait);
/* skb_queue_head_init(&p_ch->io_queue); */
- if (i == WRITE)
+ if (i == WRITE_CHANNEL)
skb_queue_head_init(
- &privptr->channel[WRITE].collect_queue);
+ &privptr->channel[WRITE_CHANNEL].collect_queue);
privptr->channel[i].flag_a = 0;
privptr->channel[i].IO_active = 0;
privptr->channel[i].flag &= ~CLAW_TIMER;
@@ -551,12 +551,12 @@ claw_open(struct net_device *dev)
if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
del_timer(&timer);
}
- if ((((privptr->channel[READ].last_dstat |
- privptr->channel[WRITE].last_dstat) &
+ if ((((privptr->channel[READ_CHANNEL].last_dstat |
+ privptr->channel[WRITE_CHANNEL].last_dstat) &
~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
- (((privptr->channel[READ].flag |
- privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
- dev_info(&privptr->channel[READ].cdev->dev,
+ (((privptr->channel[READ_CHANNEL].flag |
+ privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
+ dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
"%s: remote side is not ready\n", dev->name);
CLAW_DBF_TEXT(2, trace, "notrdy");
@@ -608,8 +608,8 @@ claw_open(struct net_device *dev)
}
}
privptr->buffs_alloc = 0;
- privptr->channel[READ].flag= 0x00;
- privptr->channel[WRITE].flag = 0x00;
+ privptr->channel[READ_CHANNEL].flag = 0x00;
+ privptr->channel[WRITE_CHANNEL].flag = 0x00;
privptr->p_buff_ccw=NULL;
privptr->p_buff_read=NULL;
privptr->p_buff_write=NULL;
@@ -652,10 +652,10 @@ claw_irq_handler(struct ccw_device *cdev,
}
/* Try to extract channel from driver data. */
- if (privptr->channel[READ].cdev == cdev)
- p_ch = &privptr->channel[READ];
- else if (privptr->channel[WRITE].cdev == cdev)
- p_ch = &privptr->channel[WRITE];
+ if (privptr->channel[READ_CHANNEL].cdev == cdev)
+ p_ch = &privptr->channel[READ_CHANNEL];
+ else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
+ p_ch = &privptr->channel[WRITE_CHANNEL];
else {
dev_warn(&cdev->dev, "The device is not a CLAW device\n");
CLAW_DBF_TEXT(2, trace, "badchan");
@@ -813,7 +813,7 @@ claw_irq_handler(struct ccw_device *cdev,
claw_clearbit_busy(TB_TX, dev);
claw_clear_busy(dev);
}
- p_ch_r = (struct chbk *)&privptr->channel[READ];
+ p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
if (test_and_set_bit(CLAW_BH_ACTIVE,
(void *)&p_ch_r->flag_a) == 0)
tasklet_schedule(&p_ch_r->tasklet);
@@ -878,13 +878,13 @@ claw_release(struct net_device *dev)
for ( i = 1; i >=0 ; i--) {
spin_lock_irqsave(
get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
- /* del_timer(&privptr->channel[READ].timer); */
+ /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
privptr->channel[i].claw_state = CLAW_STOP;
privptr->channel[i].IO_active = 0;
parm = (unsigned long) &privptr->channel[i];
- if (i == WRITE)
+ if (i == WRITE_CHANNEL)
claw_purge_skb_queue(
- &privptr->channel[WRITE].collect_queue);
+ &privptr->channel[WRITE_CHANNEL].collect_queue);
rc = ccw_device_halt (privptr->channel[i].cdev, parm);
if (privptr->system_validate_comp==0x00) /* never opened? */
init_waitqueue_head(&privptr->channel[i].wait);
@@ -971,16 +971,16 @@ claw_release(struct net_device *dev)
privptr->mtc_skipping = 1;
privptr->mtc_offset=0;
- if (((privptr->channel[READ].last_dstat |
- privptr->channel[WRITE].last_dstat) &
+ if (((privptr->channel[READ_CHANNEL].last_dstat |
+ privptr->channel[WRITE_CHANNEL].last_dstat) &
~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
- dev_warn(&privptr->channel[READ].cdev->dev,
+ dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
"Deactivating %s completed with incorrect"
" subchannel status "
"(read %02x, write %02x)\n",
dev->name,
- privptr->channel[READ].last_dstat,
- privptr->channel[WRITE].last_dstat);
+ privptr->channel[READ_CHANNEL].last_dstat,
+ privptr->channel[WRITE_CHANNEL].last_dstat);
CLAW_DBF_TEXT(2, trace, "badclose");
}
CLAW_DBF_TEXT(4, trace, "rlsexit");
@@ -1324,7 +1324,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
CLAW_DBF_TEXT(4, trace, "hw_tx");
privptr = (struct claw_privbk *)(dev->ml_priv);
- p_ch=(struct chbk *)&privptr->channel[WRITE];
+ p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL];
p_env =privptr->p_env;
claw_free_wrt_buf(dev); /* Clean up free chain if posible */
/* scan the write queue to free any completed write packets */
@@ -1357,7 +1357,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
claw_strt_out_IO(dev );
claw_free_wrt_buf( dev );
if (privptr->write_free_count==0) {
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
goto Done;
@@ -1369,7 +1369,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
}
/* tx lock */
if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
claw_strt_out_IO(dev );
@@ -1385,7 +1385,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
privptr->p_write_free_chain == NULL ) {
claw_setbit_busy(TB_NOBUFFER,dev);
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
CLAW_DBF_TEXT(2, trace, "clawbusy");
@@ -1397,7 +1397,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
while (len_of_data > 0) {
p_this_ccw=privptr->p_write_free_chain; /* get a block */
if (p_this_ccw == NULL) { /* lost the race */
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
goto Done2;
@@ -2067,7 +2067,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
*catch up to each other */
privptr = dev->ml_priv;
p_env=privptr->p_env;
- tdev = &privptr->channel[READ].cdev->dev;
+ tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
memcpy( &temp_host_name, p_env->host_name, 8);
memcpy( &temp_ws_name, p_env->adapter_name , 8);
dev_info(tdev, "%s: CLAW device %.8s: "
@@ -2245,7 +2245,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
dev->name, temp_ws_name,
p_ctlbk->linkid);
privptr->active_link_ID = p_ctlbk->linkid;
- p_ch = &privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
break;
case CONNECTION_RESPONSE:
@@ -2296,7 +2296,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
"%s: Confirmed Now packing\n", dev->name);
p_env->packing = DO_PACKED;
}
- p_ch = &privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
wake_up(&p_ch->wait);
} else {
dev_warn(tdev, "Activating %s failed because of"
@@ -2556,7 +2556,7 @@ unpack_read(struct net_device *dev )
p_packd=NULL;
privptr = dev->ml_priv;
- p_dev = &privptr->channel[READ].cdev->dev;
+ p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
p_env = privptr->p_env;
p_this_ccw=privptr->p_read_active_first;
while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
@@ -2728,7 +2728,7 @@ claw_strt_read (struct net_device *dev, int lock )
struct ccwbk*p_ccwbk;
struct chbk *p_ch;
struct clawh *p_clawh;
- p_ch=&privptr->channel[READ];
+ p_ch = &privptr->channel[READ_CHANNEL];
CLAW_DBF_TEXT(4, trace, "StRdNter");
p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
@@ -2782,7 +2782,7 @@ claw_strt_out_IO( struct net_device *dev )
return;
}
privptr = (struct claw_privbk *)dev->ml_priv;
- p_ch=&privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
CLAW_DBF_TEXT(4, trace, "strt_io");
p_first_ccw=privptr->p_write_active_first;
@@ -2875,7 +2875,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
if (dev->flags & IFF_RUNNING)
claw_release(dev);
if (privptr) {
- privptr->channel[READ].ndev = NULL; /* say it's free */
+ privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
}
dev->ml_priv = NULL;
#ifdef MODULE
@@ -2960,18 +2960,18 @@ claw_new_device(struct ccwgroup_device *cgdev)
struct ccw_dev_id dev_id;
dev_info(&cgdev->dev, "add for %s\n",
- dev_name(&cgdev->cdev[READ]->dev));
+ dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
CLAW_DBF_TEXT(2, setup, "new_dev");
privptr = dev_get_drvdata(&cgdev->dev);
- dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
if (!privptr)
return -ENODEV;
p_env = privptr->p_env;
- ccw_device_get_id(cgdev->cdev[READ], &dev_id);
- p_env->devno[READ] = dev_id.devno;
- ccw_device_get_id(cgdev->cdev[WRITE], &dev_id);
- p_env->devno[WRITE] = dev_id.devno;
+ ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
+ p_env->devno[READ_CHANNEL] = dev_id.devno;
+ ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
+ p_env->devno[WRITE_CHANNEL] = dev_id.devno;
ret = add_channel(cgdev->cdev[0],0,privptr);
if (ret == 0)
ret = add_channel(cgdev->cdev[1],1,privptr);
@@ -2980,14 +2980,14 @@ claw_new_device(struct ccwgroup_device *cgdev)
" failed with error code %d\n", ret);
goto out;
}
- ret = ccw_device_set_online(cgdev->cdev[READ]);
+ ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
if (ret != 0) {
dev_warn(&cgdev->dev,
"Setting the read subchannel online"
" failed with error code %d\n", ret);
goto out;
}
- ret = ccw_device_set_online(cgdev->cdev[WRITE]);
+ ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
if (ret != 0) {
dev_warn(&cgdev->dev,
"Setting the write subchannel online "
@@ -3002,8 +3002,8 @@ claw_new_device(struct ccwgroup_device *cgdev)
}
dev->ml_priv = privptr;
dev_set_drvdata(&cgdev->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
/* sysfs magic */
SET_NETDEV_DEV(dev, &cgdev->dev);
if (register_netdev(dev) != 0) {
@@ -3021,16 +3021,16 @@ claw_new_device(struct ccwgroup_device *cgdev)
goto out;
}
}
- privptr->channel[READ].ndev = dev;
- privptr->channel[WRITE].ndev = dev;
+ privptr->channel[READ_CHANNEL].ndev = dev;
+ privptr->channel[WRITE_CHANNEL].ndev = dev;
privptr->p_env->ndev = dev;
dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
"readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
dev->name, p_env->read_size,
p_env->write_size, p_env->read_buffers,
- p_env->write_buffers, p_env->devno[READ],
- p_env->devno[WRITE]);
+ p_env->write_buffers, p_env->devno[READ_CHANNEL],
+ p_env->devno[WRITE_CHANNEL]);
dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
":%.8s api_type: %.8s\n",
dev->name, p_env->host_name,
@@ -3072,10 +3072,10 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
priv = dev_get_drvdata(&cgdev->dev);
if (!priv)
return -ENODEV;
- ndev = priv->channel[READ].ndev;
+ ndev = priv->channel[READ_CHANNEL].ndev;
if (ndev) {
/* Close the device */
- dev_info(&cgdev->dev, "%s: shutting down \n",
+ dev_info(&cgdev->dev, "%s: shutting down\n",
ndev->name);
if (ndev->flags & IFF_RUNNING)
ret = claw_release(ndev);
@@ -3083,8 +3083,8 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
unregister_netdev(ndev);
ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
claw_free_netdevice(ndev, 1);
- priv->channel[READ].ndev = NULL;
- priv->channel[WRITE].ndev = NULL;
+ priv->channel[READ_CHANNEL].ndev = NULL;
+ priv->channel[WRITE_CHANNEL].ndev = NULL;
priv->p_env->ndev = NULL;
}
ccw_device_set_offline(cgdev->cdev[1]);
@@ -3115,8 +3115,8 @@ claw_remove_device(struct ccwgroup_device *cgdev)
priv->channel[1].irb=NULL;
kfree(priv);
dev_set_drvdata(&cgdev->dev, NULL);
- dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL);
- dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
put_device(&cgdev->dev);
return;
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 46d59a13db12..1bc5904df19f 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -74,8 +74,8 @@
#define MAX_ENVELOPE_SIZE 65536
#define CLAW_DEFAULT_MTU_SIZE 4096
#define DEF_PACK_BUFSIZE 32768
-#define READ 0
-#define WRITE 1
+#define READ_CHANNEL 0
+#define WRITE_CHANNEL 1
#define TB_TX 0 /* sk buffer handling in process */
#define TB_STOP 1 /* network device stop in process */
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 70eb7f138414..8c921fc3511a 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -454,7 +454,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
if ((fsmstate == CTC_STATE_SETUPWAIT) &&
(ch->protocol == CTCM_PROTO_OS390)) {
/* OS/390 resp. z/OS */
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
CTC_EVENT_TIMER, ch);
@@ -472,14 +472,14 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
* if in compatibility mode, since VM TCP delays the initial
* frame until it has some data to send.
*/
- if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
+ if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
(ch->protocol != CTCM_PROTO_S390))
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
ch->ccw[1].count = 2; /* Transfer only length */
- fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
+ fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
rc = ccw_device_start(ch->cdev, &ch->ccw[0],
(unsigned long)ch, 0xff, 0);
@@ -495,7 +495,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
* reply from VM TCP which brings up the RX channel to it's
* final state.
*/
- if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
+ if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
(ch->protocol == CTCM_PROTO_S390)) {
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
@@ -600,15 +600,15 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
int rc;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
- CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ CTCM_FUNTAIL, ch->id,
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
if (ch->trans_skb != NULL) {
clear_normalized_cda(&ch->ccw[1]);
dev_kfree_skb(ch->trans_skb);
ch->trans_skb = NULL;
}
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
ch->ccw[1].cmd_code = CCW_CMD_READ;
ch->ccw[1].flags = CCW_FLAG_SLI;
ch->ccw[1].count = 0;
@@ -622,7 +622,8 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
"%s(%s): %s trans_skb alloc delayed "
"until first transfer",
CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
}
ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
@@ -720,7 +721,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
ch->th_seg = 0x00;
ch->th_seq_num = 0x00;
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
skb_queue_purge(&ch->io_queue);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
@@ -799,7 +800,8 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, CTC_STATE_STARTRETRY);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
- if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) {
+ if (!IS_MPC(ch) &&
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
if (rc != 0)
ctcm_ccw_check_rc(ch, rc,
@@ -811,10 +813,10 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
"%s(%s) : %s error during %s channel setup state=%s\n",
CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
fsm_getstate_str(fi));
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
@@ -945,7 +947,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
fsm_newstate(fi, CTC_STATE_DTERM);
- ch2 = priv->channel[WRITE];
+ ch2 = priv->channel[CTCM_WRITE];
fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
ccw_device_halt(ch->cdev, (unsigned long)ch);
@@ -1074,13 +1076,13 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
fsm_deltimer(&ch->timer);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s: %s: %s unrecoverable channel error",
- CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX");
+ CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
if (IS_MPC(ch)) {
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
}
- if (rd == READ) {
+ if (rd == CTCM_READ) {
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
@@ -1503,7 +1505,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
switch (fsm_getstate(fi)) {
case CTC_STATE_STARTRETRY:
case CTC_STATE_SETUPWAIT:
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
ctcmpc_chx_rxidle(fi, event, arg);
} else {
fsm_newstate(fi, CTC_STATE_TXIDLE);
@@ -1514,7 +1516,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
break;
};
- fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
+ fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
done:
@@ -1753,8 +1755,8 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
struct net_device *dev = ach->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
- struct channel *wch = priv->channel[WRITE];
- struct channel *rch = priv->channel[READ];
+ struct channel *wch = priv->channel[CTCM_WRITE];
+ struct channel *rch = priv->channel[CTCM_READ];
struct sk_buff *skb;
struct th_sweep *header;
int rc = 0;
@@ -2070,7 +2072,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
if (IS_MPC(priv))
priv->mpcg->channels_terminating = 0;
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
fsm_event(ch->fsm, CTC_EVENT_START, ch);
}
@@ -2092,7 +2094,7 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg)
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
ch->th_seq_num = 0x00;
@@ -2183,11 +2185,11 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg)
if (IS_MPC(priv)) {
if (event == DEV_EVENT_RXUP)
- mpc_channel_action(priv->channel[READ],
- READ, MPC_CHANNEL_ADD);
+ mpc_channel_action(priv->channel[CTCM_READ],
+ CTCM_READ, MPC_CHANNEL_ADD);
else
- mpc_channel_action(priv->channel[WRITE],
- WRITE, MPC_CHANNEL_ADD);
+ mpc_channel_action(priv->channel[CTCM_WRITE],
+ CTCM_WRITE, MPC_CHANNEL_ADD);
}
}
@@ -2239,11 +2241,11 @@ static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
}
if (IS_MPC(priv)) {
if (event == DEV_EVENT_RXDOWN)
- mpc_channel_action(priv->channel[READ],
- READ, MPC_CHANNEL_REMOVE);
+ mpc_channel_action(priv->channel[CTCM_READ],
+ CTCM_READ, MPC_CHANNEL_REMOVE);
else
- mpc_channel_action(priv->channel[WRITE],
- WRITE, MPC_CHANNEL_REMOVE);
+ mpc_channel_action(priv->channel[CTCM_WRITE],
+ CTCM_WRITE, MPC_CHANNEL_REMOVE);
}
}
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 4ecafbf91211..6edf20b62de5 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -267,7 +267,7 @@ static struct channel *channel_get(enum ctcm_channel_types type,
else {
ch->flags |= CHANNEL_FLAGS_INUSE;
ch->flags &= ~CHANNEL_FLAGS_RWMASK;
- ch->flags |= (direction == WRITE)
+ ch->flags |= (direction == CTCM_WRITE)
? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
}
@@ -388,7 +388,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s trans_skb allocation error",
CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
return -ENOMEM;
}
@@ -399,7 +400,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s set norm_cda failed",
CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
return -ENOMEM;
}
@@ -603,14 +605,14 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
priv = dev->ml_priv;
grp = priv->mpcg;
- ch = priv->channel[WRITE];
+ ch = priv->channel[CTCM_WRITE];
/* sweep processing is not complete until response and request */
/* has completed for all read channels in group */
if (grp->in_sweep == 0) {
grp->in_sweep = 1;
- grp->sweep_rsp_pend_num = grp->active_channels[READ];
- grp->sweep_req_pend_num = grp->active_channels[READ];
+ grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
+ grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
}
sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
@@ -911,7 +913,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
dev->trans_start = jiffies;
- if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0)
+ if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
return NETDEV_TX_BUSY;
return NETDEV_TX_OK;
}
@@ -994,7 +996,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
}
dev->trans_start = jiffies;
- if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) {
+ if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): device error - dropped",
CTCM_FUNTAIL, dev->name);
@@ -1035,7 +1037,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
priv = dev->ml_priv;
- max_bufsize = priv->channel[READ]->max_bufsize;
+ max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
if (IS_MPC(priv)) {
if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
@@ -1226,10 +1228,10 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
priv = dev_get_drvdata(&cgdev->dev);
/* Try to extract channel from driver data. */
- if (priv->channel[READ]->cdev == cdev)
- ch = priv->channel[READ];
- else if (priv->channel[WRITE]->cdev == cdev)
- ch = priv->channel[WRITE];
+ if (priv->channel[CTCM_READ]->cdev == cdev)
+ ch = priv->channel[CTCM_READ];
+ else if (priv->channel[CTCM_WRITE]->cdev == cdev)
+ ch = priv->channel[CTCM_WRITE];
else {
dev_err(&cdev->dev,
"%s: Internal error: Can't determine channel for "
@@ -1587,13 +1589,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
goto out_ccw2;
}
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
priv->channel[direction] =
- channel_get(type, direction == READ ? read_id : write_id,
- direction);
+ channel_get(type, direction == CTCM_READ ?
+ read_id : write_id, direction);
if (priv->channel[direction] == NULL) {
- if (direction == WRITE)
- channel_free(priv->channel[READ]);
+ if (direction == CTCM_WRITE)
+ channel_free(priv->channel[CTCM_READ]);
goto out_dev;
}
priv->channel[direction]->netdev = dev;
@@ -1617,13 +1619,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
dev_info(&dev->dev,
"setup OK : r/w = %s/%s, protocol : %d\n",
- priv->channel[READ]->id,
- priv->channel[WRITE]->id, priv->protocol);
+ priv->channel[CTCM_READ]->id,
+ priv->channel[CTCM_WRITE]->id, priv->protocol);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
- priv->channel[READ]->id,
- priv->channel[WRITE]->id, priv->protocol);
+ priv->channel[CTCM_READ]->id,
+ priv->channel[CTCM_WRITE]->id, priv->protocol);
return 0;
out_unregister:
@@ -1635,10 +1637,10 @@ out_ccw2:
out_ccw1:
ccw_device_set_offline(cgdev->cdev[0]);
out_remove_channel2:
- readc = channel_get(type, read_id, READ);
+ readc = channel_get(type, read_id, CTCM_READ);
channel_remove(readc);
out_remove_channel1:
- writec = channel_get(type, write_id, WRITE);
+ writec = channel_get(type, write_id, CTCM_WRITE);
channel_remove(writec);
out_err_result:
return result;
@@ -1660,19 +1662,19 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
if (!priv)
return -ENODEV;
- if (priv->channel[READ]) {
- dev = priv->channel[READ]->netdev;
+ if (priv->channel[CTCM_READ]) {
+ dev = priv->channel[CTCM_READ]->netdev;
CTCM_DBF_DEV(SETUP, dev, "");
/* Close the device */
ctcm_close(dev);
dev->flags &= ~IFF_RUNNING;
ctcm_remove_attributes(&cgdev->dev);
- channel_free(priv->channel[READ]);
+ channel_free(priv->channel[CTCM_READ]);
} else
dev = NULL;
- if (priv->channel[WRITE])
- channel_free(priv->channel[WRITE]);
+ if (priv->channel[CTCM_WRITE])
+ channel_free(priv->channel[CTCM_WRITE]);
if (dev) {
unregister_netdev(dev);
@@ -1685,11 +1687,11 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
ccw_device_set_offline(cgdev->cdev[1]);
ccw_device_set_offline(cgdev->cdev[0]);
- if (priv->channel[READ])
- channel_remove(priv->channel[READ]);
- if (priv->channel[WRITE])
- channel_remove(priv->channel[WRITE]);
- priv->channel[READ] = priv->channel[WRITE] = NULL;
+ if (priv->channel[CTCM_READ])
+ channel_remove(priv->channel[CTCM_READ]);
+ if (priv->channel[CTCM_WRITE])
+ channel_remove(priv->channel[CTCM_WRITE]);
+ priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
return 0;
@@ -1720,11 +1722,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
- netif_device_detach(priv->channel[READ]->netdev);
- ctcm_close(priv->channel[READ]->netdev);
+ netif_device_detach(priv->channel[CTCM_READ]->netdev);
+ ctcm_close(priv->channel[CTCM_READ]->netdev);
if (!wait_event_timeout(priv->fsm->wait_q,
fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
- netif_device_attach(priv->channel[READ]->netdev);
+ netif_device_attach(priv->channel[CTCM_READ]->netdev);
return -EBUSY;
}
ccw_device_set_offline(gdev->cdev[1]);
@@ -1745,9 +1747,9 @@ static int ctcm_pm_resume(struct ccwgroup_device *gdev)
rc = ccw_device_set_online(gdev->cdev[0]);
if (rc)
goto err_out;
- ctcm_open(priv->channel[READ]->netdev);
+ ctcm_open(priv->channel[CTCM_READ]->netdev);
err_out:
- netif_device_attach(priv->channel[READ]->netdev);
+ netif_device_attach(priv->channel[CTCM_READ]->netdev);
return rc;
}
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d34fa14f44e7..24d5215eb0c4 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -111,8 +111,8 @@ enum ctcm_channel_types {
#define CTCM_INITIAL_BLOCKLEN 2
-#define READ 0
-#define WRITE 1
+#define CTCM_READ 0
+#define CTCM_WRITE 1
#define CTCM_ID_SIZE 20+3
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 87c24d2936d6..2861e78773cb 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -419,8 +419,8 @@ void ctc_mpc_establish_connectivity(int port_num,
return;
priv = dev->ml_priv;
grp = priv->mpcg;
- rch = priv->channel[READ];
- wch = priv->channel[WRITE];
+ rch = priv->channel[CTCM_READ];
+ wch = priv->channel[CTCM_WRITE];
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
"%s(%s): state=%s",
@@ -578,7 +578,7 @@ void ctc_mpc_flow_control(int port_num, int flowc)
"%s: %s: flowc = %d",
CTCM_FUNTAIL, dev->name, flowc);
- rch = priv->channel[READ];
+ rch = priv->channel[CTCM_READ];
mpcg_state = fsm_getstate(grp->fsm);
switch (flowc) {
@@ -622,7 +622,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
- struct channel *ch = priv->channel[WRITE];
+ struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -656,7 +656,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
int rc = 0;
struct th_sweep *header;
struct sk_buff *sweep_skb;
- struct channel *ch = priv->channel[WRITE];
+ struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
@@ -712,7 +712,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
- struct channel *ch = priv->channel[WRITE];
+ struct channel *ch = priv->channel[CTCM_WRITE];
if (do_debug)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
@@ -721,8 +721,8 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
if (grp->in_sweep == 0) {
grp->in_sweep = 1;
ctcm_test_and_set_busy(dev);
- grp->sweep_req_pend_num = grp->active_channels[READ];
- grp->sweep_rsp_pend_num = grp->active_channels[READ];
+ grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
+ grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
}
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -906,14 +906,14 @@ void mpc_group_ready(unsigned long adev)
fsm_newstate(grp->fsm, MPCG_STATE_READY);
/* Put up a read on the channel */
- ch = priv->channel[READ];
+ ch = priv->channel[CTCM_READ];
ch->pdu_seq = 0;
CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
__func__, ch->pdu_seq);
ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
/* Put the write channel in idle state */
- ch = priv->channel[WRITE];
+ ch = priv->channel[CTCM_WRITE];
if (ch->collect_len > 0) {
spin_lock(&ch->collect_lock);
ctcm_purge_skb_queue(&ch->collect_queue);
@@ -960,7 +960,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
"%s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths,
- grp->active_channels[READ], grp->active_channels[WRITE]);
+ grp->active_channels[CTCM_READ],
+ grp->active_channels[CTCM_WRITE]);
if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
grp->num_channel_paths++;
@@ -994,10 +995,11 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
grp->xid_skb->data,
grp->xid_skb->len);
- ch->xid->xid2_dlc_type = ((CHANNEL_DIRECTION(ch->flags) == READ)
+ ch->xid->xid2_dlc_type =
+ ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? XID2_READ_SIDE : XID2_WRITE_SIDE);
- if (CHANNEL_DIRECTION(ch->flags) == WRITE)
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
ch->xid->xid2_buf_len = 0x00;
ch->xid_skb->data = ch->xid_skb_data;
@@ -1006,8 +1008,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
fsm_newstate(ch->fsm, CH_XID0_PENDING);
- if ((grp->active_channels[READ] > 0) &&
- (grp->active_channels[WRITE] > 0) &&
+ if ((grp->active_channels[CTCM_READ] > 0) &&
+ (grp->active_channels[CTCM_WRITE] > 0) &&
(fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
@@ -1027,10 +1029,10 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
if (grp->channels_terminating)
goto done;
- if (((grp->active_channels[READ] == 0) &&
- (grp->active_channels[WRITE] > 0))
- || ((grp->active_channels[WRITE] == 0) &&
- (grp->active_channels[READ] > 0)))
+ if (((grp->active_channels[CTCM_READ] == 0) &&
+ (grp->active_channels[CTCM_WRITE] > 0))
+ || ((grp->active_channels[CTCM_WRITE] == 0) &&
+ (grp->active_channels[CTCM_READ] > 0)))
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
done:
@@ -1038,7 +1040,8 @@ done:
"exit %s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths,
- grp->active_channels[READ], grp->active_channels[WRITE]);
+ grp->active_channels[CTCM_READ],
+ grp->active_channels[CTCM_WRITE]);
CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
}
@@ -1392,8 +1395,8 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
(grp->port_persist == 0))
fsm_deltimer(&priv->restart_timer);
- wch = priv->channel[WRITE];
- rch = priv->channel[READ];
+ wch = priv->channel[CTCM_WRITE];
+ rch = priv->channel[CTCM_READ];
switch (grp->saved_state) {
case MPCG_STATE_RESET:
@@ -1480,8 +1483,8 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
priv = dev->ml_priv;
grp = priv->mpcg;
- wch = priv->channel[WRITE];
- rch = priv->channel[READ];
+ wch = priv->channel[CTCM_WRITE];
+ rch = priv->channel[CTCM_READ];
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW:
@@ -1586,7 +1589,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
/*the received direction should be the opposite of ours */
- if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE :
+ if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
XID2_READ_SIDE) != xid->xid2_dlc_type) {
rc = 2;
/* XID REJECTED: r/w channel pairing mismatch */
@@ -1912,7 +1915,7 @@ static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
if (grp == NULL)
return;
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
struct xid2 *thisxid = ch->xid;
ch->xid_skb->data = ch->xid_skb_data;
@@ -2152,14 +2155,15 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
return -ENOMEM;
}
- *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq;
- priv->channel[READ]->pdu_seq++;
+ *((__u32 *)skb_push(skb, 4)) =
+ priv->channel[CTCM_READ]->pdu_seq;
+ priv->channel[CTCM_READ]->pdu_seq++;
CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
- __func__, priv->channel[READ]->pdu_seq);
+ __func__, priv->channel[CTCM_READ]->pdu_seq);
/* receipt of CC03 resets anticipated sequence number on
receiving side */
- priv->channel[READ]->pdu_seq = 0x00;
+ priv->channel[CTCM_READ]->pdu_seq = 0x00;
skb_reset_mac_header(skb);
skb->dev = dev;
skb->protocol = htons(ETH_P_SNAP);
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 2b24550e865e..8305319b2a84 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -38,8 +38,8 @@ static ssize_t ctcm_buffer_write(struct device *dev,
int bs1;
struct ctcm_priv *priv = dev_get_drvdata(dev);
- if (!(priv && priv->channel[READ] &&
- (ndev = priv->channel[READ]->netdev))) {
+ ndev = priv->channel[CTCM_READ]->netdev;
+ if (!(priv && priv->channel[CTCM_READ] && ndev)) {
CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
return -ENODEV;
}
@@ -55,12 +55,12 @@ static ssize_t ctcm_buffer_write(struct device *dev,
(bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
goto einval;
- priv->channel[READ]->max_bufsize = bs1;
- priv->channel[WRITE]->max_bufsize = bs1;
+ priv->channel[CTCM_READ]->max_bufsize = bs1;
+ priv->channel[CTCM_WRITE]->max_bufsize = bs1;
if (!(ndev->flags & IFF_RUNNING))
ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
- priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
- priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+ priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+ priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
CTCM_DBF_DEV(SETUP, ndev, buf);
return count;
@@ -85,9 +85,9 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
p += sprintf(p, " Device FSM state: %s\n",
fsm_getstate_str(priv->fsm));
p += sprintf(p, " RX channel FSM state: %s\n",
- fsm_getstate_str(priv->channel[READ]->fsm));
+ fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
p += sprintf(p, " TX channel FSM state: %s\n",
- fsm_getstate_str(priv->channel[WRITE]->fsm));
+ fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
p += sprintf(p, " Max. TX buffer used: %ld\n",
priv->channel[WRITE]->prof.maxmulti);
p += sprintf(p, " Max. chained SKBs: %ld\n",
@@ -102,7 +102,7 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
priv->channel[WRITE]->prof.tx_time);
printk(KERN_INFO "Statistics for %s:\n%s",
- priv->channel[WRITE]->netdev->name, sbuf);
+ priv->channel[CTCM_WRITE]->netdev->name, sbuf);
kfree(sbuf);
return;
}
@@ -125,7 +125,7 @@ static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
return -ENODEV;
/* Reset statistics */
memset(&priv->channel[WRITE]->prof, 0,
- sizeof(priv->channel[WRITE]->prof));
+ sizeof(priv->channel[CTCM_WRITE]->prof));
return count;
}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 7a44c38aaf65..d1257768be90 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -40,11 +40,7 @@
*/
enum qeth_dbf_names {
QETH_DBF_SETUP,
- QETH_DBF_QERR,
- QETH_DBF_TRACE,
QETH_DBF_MSG,
- QETH_DBF_SENSE,
- QETH_DBF_MISC,
QETH_DBF_CTRL,
QETH_DBF_INFOS /* must be last element */
};
@@ -71,7 +67,19 @@ struct qeth_dbf_info {
debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
#define QETH_DBF_TEXT_(name, level, text...) \
- qeth_dbf_longtext(QETH_DBF_##name, level, text)
+ qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text)
+
+#define QETH_CARD_TEXT(card, level, text) \
+ debug_text_event(card->debug, level, text)
+
+#define QETH_CARD_HEX(card, level, addr, len) \
+ debug_event(card->debug, level, (void *)(addr), len)
+
+#define QETH_CARD_MESSAGE(card, text...) \
+ debug_sprintf_event(card->debug, level, text)
+
+#define QETH_CARD_TEXT_(card, level, text...) \
+ qeth_dbf_longtext(card->debug, level, text)
#define SENSE_COMMAND_REJECT_BYTE 0
#define SENSE_COMMAND_REJECT_FLAG 0x80
@@ -180,8 +188,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
#define QETH_IDX_FUNC_LEVEL_OSD 0x0101
-#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
-#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
+#define QETH_IDX_FUNC_LEVEL_IQD 0x4108
#define QETH_MODELLIST_ARRAY \
{{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \
@@ -733,12 +740,15 @@ struct qeth_card {
struct qeth_qdio_info qdio;
struct qeth_perf_stats perf_stats;
int use_hard_stop;
+ int read_or_write_problem;
struct qeth_osn_info osn_info;
struct qeth_discipline discipline;
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
+ debug_info_t *debug;
struct mutex conf_mutex;
+ struct mutex discipline_mutex;
};
struct qeth_card_list_struct {
@@ -857,9 +867,10 @@ void qeth_core_get_ethtool_stats(struct net_device *,
struct ethtool_stats *, u64 *);
void qeth_core_get_strings(struct net_device *, u32, u8 *);
void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
-void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
+void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
int qeth_set_access_ctrl_online(struct qeth_card *card);
+int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 13ef46b9d388..3a5a18a0fc28 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -32,16 +32,8 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
/* N P A M L V H */
[QETH_DBF_SETUP] = {"qeth_setup",
8, 1, 8, 5, &debug_hex_ascii_view, NULL},
- [QETH_DBF_QERR] = {"qeth_qerr",
- 2, 1, 8, 2, &debug_hex_ascii_view, NULL},
- [QETH_DBF_TRACE] = {"qeth_trace",
- 4, 1, 8, 3, &debug_hex_ascii_view, NULL},
[QETH_DBF_MSG] = {"qeth_msg",
8, 1, 128, 3, &debug_sprintf_view, NULL},
- [QETH_DBF_SENSE] = {"qeth_sense",
- 2, 1, 64, 2, &debug_hex_ascii_view, NULL},
- [QETH_DBF_MISC] = {"qeth_misc",
- 2, 1, 256, 2, &debug_hex_ascii_view, NULL},
[QETH_DBF_CTRL] = {"qeth_control",
8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
@@ -65,48 +57,6 @@ static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
-static inline void __qeth_fill_buffer_frag(struct sk_buff *skb,
- struct qdio_buffer *buffer, int is_tso,
- int *next_element_to_fill)
-{
- struct skb_frag_struct *frag;
- int fragno;
- unsigned long addr;
- int element, cnt, dlen;
-
- fragno = skb_shinfo(skb)->nr_frags;
- element = *next_element_to_fill;
- dlen = 0;
-
- if (is_tso)
- buffer->element[element].flags =
- SBAL_FLAGS_MIDDLE_FRAG;
- else
- buffer->element[element].flags =
- SBAL_FLAGS_FIRST_FRAG;
- dlen = skb->len - skb->data_len;
- if (dlen) {
- buffer->element[element].addr = skb->data;
- buffer->element[element].length = dlen;
- element++;
- }
- for (cnt = 0; cnt < fragno; cnt++) {
- frag = &skb_shinfo(skb)->frags[cnt];
- addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
- frag->page_offset;
- buffer->element[element].addr = (char *)addr;
- buffer->element[element].length = frag->size;
- if (cnt < (fragno - 1))
- buffer->element[element].flags =
- SBAL_FLAGS_MIDDLE_FRAG;
- else
- buffer->element[element].flags =
- SBAL_FLAGS_LAST_FRAG;
- element++;
- }
- *next_element_to_fill = element;
-}
-
static inline const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
@@ -232,7 +182,7 @@ void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
- QETH_DBF_TEXT(TRACE, 5, "clwrklst");
+ QETH_CARD_TEXT(card, 5, "clwrklst");
list_for_each_entry_safe(pool_entry, tmp,
&card->qdio.in_buf_pool.entry_list, list){
list_del(&pool_entry->list);
@@ -246,7 +196,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card)
void *ptr;
int i, j;
- QETH_DBF_TEXT(TRACE, 5, "alocpool");
+ QETH_CARD_TEXT(card, 5, "alocpool");
for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
if (!pool_entry) {
@@ -273,7 +223,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card)
int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
{
- QETH_DBF_TEXT(TRACE, 2, "realcbp");
+ QETH_CARD_TEXT(card, 2, "realcbp");
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER))
@@ -293,7 +243,7 @@ static int qeth_issue_next_read(struct qeth_card *card)
int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 5, "issnxrd");
+ QETH_CARD_TEXT(card, 5, "issnxrd");
if (card->read.state != CH_STATE_UP)
return -EIO;
iob = qeth_get_buffer(&card->read);
@@ -305,13 +255,14 @@ static int qeth_issue_next_read(struct qeth_card *card)
return -ENOMEM;
}
qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
- QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
+ QETH_CARD_TEXT(card, 6, "noirqpnd");
rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
(addr_t) iob, 0, 0);
if (rc) {
QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
"rc=%i\n", dev_name(&card->gdev->dev), rc);
atomic_set(&card->read.irq_pending, 0);
+ card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
wake_up(&card->wait_q);
}
@@ -364,7 +315,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd = NULL;
- QETH_DBF_TEXT(TRACE, 5, "chkipad");
+ QETH_CARD_TEXT(card, 5, "chkipad");
if (IS_IPA(iob->data)) {
cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
if (IS_IPA_REPLY(cmd)) {
@@ -400,10 +351,10 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
case IPA_CMD_MODCCID:
return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
- QETH_DBF_TEXT(TRACE, 3, "irla");
+ QETH_CARD_TEXT(card, 3, "irla");
break;
case IPA_CMD_UNREGISTER_LOCAL_ADDR:
- QETH_DBF_TEXT(TRACE, 3, "urla");
+ QETH_CARD_TEXT(card, 3, "urla");
break;
default:
QETH_DBF_MESSAGE(2, "Received data is IPA "
@@ -420,7 +371,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
struct qeth_reply *reply, *r;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "clipalst");
+ QETH_CARD_TEXT(card, 4, "clipalst");
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
@@ -432,6 +383,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
qeth_put_reply(reply);
}
spin_unlock_irqrestore(&card->lock, flags);
+ atomic_set(&card->write.irq_pending, 0);
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
@@ -448,9 +400,9 @@ static int qeth_check_idx_response(struct qeth_card *card,
buffer[4],
((buffer[4] == 0x22) ?
" -- try another portname" : ""));
- QETH_DBF_TEXT(TRACE, 2, "ckidxres");
- QETH_DBF_TEXT(TRACE, 2, " idxterm");
- QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
+ QETH_CARD_TEXT(card, 2, "ckidxres");
+ QETH_CARD_TEXT(card, 2, " idxterm");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
if (buffer[4] == 0xf6) {
dev_err(&card->gdev->dev,
"The qeth device is not configured "
@@ -467,8 +419,8 @@ static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 4, "setupccw");
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 4, "setupccw");
if (channel == &card->read)
memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
else
@@ -481,7 +433,7 @@ static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
{
__u8 index;
- QETH_DBF_TEXT(TRACE, 6, "getbuff");
+ QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff");
index = channel->io_buf_no;
do {
if (channel->iob[index].state == BUF_STATE_FREE) {
@@ -502,7 +454,7 @@ void qeth_release_buffer(struct qeth_channel *channel,
{
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 6, "relbuff");
+ QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
spin_lock_irqsave(&channel->iob_lock, flags);
memset(iob->data, 0, QETH_BUFSIZE);
iob->state = BUF_STATE_FREE;
@@ -553,9 +505,8 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
int keep_reply;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "sndctlcb");
-
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 4, "sndctlcb");
rc = qeth_check_idx_response(card, iob->data);
switch (rc) {
case 0:
@@ -563,6 +514,7 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
case -EIO:
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
+ /* fall through */
default:
goto out;
}
@@ -722,7 +674,7 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread);
void qeth_schedule_recovery(struct qeth_card *card)
{
- QETH_DBF_TEXT(TRACE, 2, "startrec");
+ QETH_CARD_TEXT(card, 2, "startrec");
if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
schedule_work(&card->kernel_thread_starter);
}
@@ -732,15 +684,17 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
{
int dstat, cstat;
char *sense;
+ struct qeth_card *card;
sense = (char *) irb->ecw;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
+ card = CARD_FROM_CDEV(cdev);
if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
- QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
+ QETH_CARD_TEXT(card, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
@@ -753,23 +707,23 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
if (dstat & DEV_STAT_UNIT_CHECK) {
if (sense[SENSE_RESETTING_EVENT_BYTE] &
SENSE_RESETTING_EVENT_FLAG) {
- QETH_DBF_TEXT(TRACE, 2, "REVIND");
+ QETH_CARD_TEXT(card, 2, "REVIND");
return 1;
}
if (sense[SENSE_COMMAND_REJECT_BYTE] &
SENSE_COMMAND_REJECT_FLAG) {
- QETH_DBF_TEXT(TRACE, 2, "CMDREJi");
+ QETH_CARD_TEXT(card, 2, "CMDREJi");
return 1;
}
if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
- QETH_DBF_TEXT(TRACE, 2, "AFFE");
+ QETH_CARD_TEXT(card, 2, "AFFE");
return 1;
}
if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
- QETH_DBF_TEXT(TRACE, 2, "ZEROSEN");
+ QETH_CARD_TEXT(card, 2, "ZEROSEN");
return 0;
}
- QETH_DBF_TEXT(TRACE, 2, "DGENCHK");
+ QETH_CARD_TEXT(card, 2, "DGENCHK");
return 1;
}
return 0;
@@ -778,6 +732,10 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
static long __qeth_check_irb_error(struct ccw_device *cdev,
unsigned long intparm, struct irb *irb)
{
+ struct qeth_card *card;
+
+ card = CARD_FROM_CDEV(cdev);
+
if (!IS_ERR(irb))
return 0;
@@ -785,17 +743,15 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
case -EIO:
QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
dev_name(&cdev->dev));
- QETH_DBF_TEXT(TRACE, 2, "ckirberr");
- QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
break;
case -ETIMEDOUT:
dev_warn(&cdev->dev, "A hardware operation timed out"
" on the device\n");
- QETH_DBF_TEXT(TRACE, 2, "ckirberr");
- QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT);
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
if (intparm == QETH_RCD_PARM) {
- struct qeth_card *card = CARD_FROM_CDEV(cdev);
-
if (card && (card->data.ccwdev == cdev)) {
card->data.state = CH_STATE_DOWN;
wake_up(&card->wait_q);
@@ -805,8 +761,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
default:
QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
dev_name(&cdev->dev), PTR_ERR(irb));
- QETH_DBF_TEXT(TRACE, 2, "ckirberr");
- QETH_DBF_TEXT(TRACE, 2, " rc???");
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT(card, 2, " rc???");
}
return PTR_ERR(irb);
}
@@ -822,8 +778,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
struct qeth_cmd_buffer *iob;
__u8 index;
- QETH_DBF_TEXT(TRACE, 5, "irq");
-
if (__qeth_check_irb_error(cdev, intparm, irb))
return;
cstat = irb->scsw.cmd.cstat;
@@ -833,15 +787,17 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
if (!card)
return;
+ QETH_CARD_TEXT(card, 5, "irq");
+
if (card->read.ccwdev == cdev) {
channel = &card->read;
- QETH_DBF_TEXT(TRACE, 5, "read");
+ QETH_CARD_TEXT(card, 5, "read");
} else if (card->write.ccwdev == cdev) {
channel = &card->write;
- QETH_DBF_TEXT(TRACE, 5, "write");
+ QETH_CARD_TEXT(card, 5, "write");
} else {
channel = &card->data;
- QETH_DBF_TEXT(TRACE, 5, "data");
+ QETH_CARD_TEXT(card, 5, "data");
}
atomic_set(&channel->irq_pending, 0);
@@ -857,12 +813,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
goto out;
if (intparm == QETH_CLEAR_CHANNEL_PARM) {
- QETH_DBF_TEXT(TRACE, 6, "clrchpar");
+ QETH_CARD_TEXT(card, 6, "clrchpar");
/* we don't have to handle this further */
intparm = 0;
}
if (intparm == QETH_HALT_CHANNEL_PARM) {
- QETH_DBF_TEXT(TRACE, 6, "hltchpar");
+ QETH_CARD_TEXT(card, 6, "hltchpar");
/* we don't have to handle this further */
intparm = 0;
}
@@ -963,7 +919,7 @@ void qeth_clear_qdio_buffers(struct qeth_card *card)
{
int i, j;
- QETH_DBF_TEXT(TRACE, 2, "clearqdbf");
+ QETH_CARD_TEXT(card, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i)
if (card->qdio.out_qs[i]) {
@@ -978,7 +934,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
int i = 0;
- QETH_DBF_TEXT(TRACE, 5, "freepool");
list_for_each_entry_safe(pool_entry, tmp,
&card->qdio.init_pool.entry_list, init_list){
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
@@ -992,7 +947,6 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
{
int i, j;
- QETH_DBF_TEXT(TRACE, 2, "freeqdbf");
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
@@ -1089,7 +1043,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
- QETH_DBF_TEXT_(TRACE, 4, " %02x%02x%02x",
+ QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
(u8) card->thread_start_mask,
(u8) card->thread_allowed_mask,
(u8) card->thread_running_mask);
@@ -1102,7 +1056,7 @@ static void qeth_start_kernel_thread(struct work_struct *work)
{
struct qeth_card *card = container_of(work, struct qeth_card,
kernel_thread_starter);
- QETH_DBF_TEXT(TRACE , 2, "strthrd");
+ QETH_CARD_TEXT(card , 2, "strthrd");
if (card->read.state != CH_STATE_UP &&
card->write.state != CH_STATE_UP)
@@ -1124,6 +1078,7 @@ static int qeth_setup_card(struct qeth_card *card)
card->state = CARD_STATE_DOWN;
card->lan_online = 0;
card->use_hard_stop = 0;
+ card->read_or_write_problem = 0;
card->dev = NULL;
spin_lock_init(&card->vlanlock);
spin_lock_init(&card->mclock);
@@ -1132,6 +1087,7 @@ static int qeth_setup_card(struct qeth_card *card)
spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
mutex_init(&card->conf_mutex);
+ mutex_init(&card->discipline_mutex);
card->thread_start_mask = 0;
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
@@ -1229,8 +1185,8 @@ static int qeth_clear_channel(struct qeth_channel *channel)
struct qeth_card *card;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "clearch");
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 3, "clearch");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1253,8 +1209,8 @@ static int qeth_halt_channel(struct qeth_channel *channel)
struct qeth_card *card;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "haltch");
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 3, "haltch");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1274,7 +1230,7 @@ static int qeth_halt_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
- QETH_DBF_TEXT(TRACE, 3, "haltchs");
+ QETH_CARD_TEXT(card, 3, "haltchs");
rc1 = qeth_halt_channel(&card->read);
rc2 = qeth_halt_channel(&card->write);
rc3 = qeth_halt_channel(&card->data);
@@ -1289,7 +1245,7 @@ static int qeth_clear_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
- QETH_DBF_TEXT(TRACE, 3, "clearchs");
+ QETH_CARD_TEXT(card, 3, "clearchs");
rc1 = qeth_clear_channel(&card->read);
rc2 = qeth_clear_channel(&card->write);
rc3 = qeth_clear_channel(&card->data);
@@ -1304,8 +1260,7 @@ static int qeth_clear_halt_card(struct qeth_card *card, int halt)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "clhacrd");
- QETH_DBF_HEX(TRACE, 3, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 3, "clhacrd");
if (halt)
rc = qeth_halt_channels(card);
@@ -1318,7 +1273,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "qdioclr");
+ QETH_CARD_TEXT(card, 3, "qdioclr");
switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
@@ -1329,7 +1284,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
- QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 3, "1err%d", rc);
qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
@@ -1340,7 +1295,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
}
rc = qeth_clear_halt_card(card, use_halt);
if (rc)
- QETH_DBF_TEXT_(TRACE, 3, "2err%d", rc);
+ QETH_CARD_TEXT_(card, 3, "2err%d", rc);
card->state = CARD_STATE_DOWN;
return rc;
}
@@ -1432,14 +1387,10 @@ static void qeth_init_func_level(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
- if (card->ipato.enabled)
- card->info.func_level =
- QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
- else
- card->info.func_level =
- QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
+ card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
break;
case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSN:
card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
break;
default:
@@ -1637,15 +1588,18 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
"host\n");
break;
case QETH_IDX_ACT_ERR_AUTH:
+ case QETH_IDX_ACT_ERR_AUTH_USER:
dev_err(&card->read.ccwdev->dev,
"Setting the device online failed because of "
- "insufficient LPAR authorization\n");
+ "insufficient authorization\n");
break;
default:
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
" negative reply\n",
dev_name(&card->read.ccwdev->dev));
}
+ QETH_CARD_TEXT_(card, 2, "idxread%c",
+ QETH_IDX_ACT_CAUSE_CODE(iob->data));
goto out;
}
@@ -1705,8 +1659,12 @@ int qeth_send_control_data(struct qeth_card *card, int len,
unsigned long timeout, event_timeout;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "sendctl");
+ QETH_CARD_TEXT(card, 2, "sendctl");
+ if (card->read_or_write_problem) {
+ qeth_release_buffer(iob->channel, iob);
+ return -EIO;
+ }
reply = qeth_alloc_reply(card);
if (!reply) {
return -ENOMEM;
@@ -1732,7 +1690,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
event_timeout = QETH_TIMEOUT;
timeout = jiffies + event_timeout;
- QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
+ QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
(addr_t) iob, 0, 0);
@@ -1741,7 +1699,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
"ccw_device_start rc = %i\n",
dev_name(&card->write.ccwdev->dev), rc);
- QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
+ QETH_CARD_TEXT_(card, 2, " err%d", rc);
spin_lock_irqsave(&card->lock, flags);
list_del_init(&reply->list);
qeth_put_reply(reply);
@@ -1778,6 +1736,9 @@ time_err:
spin_unlock_irqrestore(&reply->card->lock, flags);
reply->rc = -ETIME;
atomic_inc(&reply->received);
+ atomic_set(&card->write.irq_pending, 0);
+ qeth_release_buffer(iob->channel, iob);
+ card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
wake_up(&reply->wait_q);
rc = reply->rc;
qeth_put_reply(reply);
@@ -1978,7 +1939,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
card->info.link_type = link_type;
} else
card->info.link_type = 0;
- QETH_DBF_TEXT_(SETUP, 2, "link%d", link_type);
+ QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
@@ -2035,7 +1996,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
QETH_DBF_TEXT(SETUP, 2, "olmlimit");
dev_err(&card->gdev->dev, "A connection could not be "
"established because of an OLM limit\n");
- rc = -EMLINK;
+ iob->rc = -EMLINK;
}
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return rc;
@@ -2335,7 +2296,7 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *entry;
- QETH_DBF_TEXT(TRACE, 5, "inwrklst");
+ QETH_CARD_TEXT(card, 5, "inwrklst");
list_for_each_entry(entry,
&card->qdio.init_pool.entry_list, init_list) {
@@ -2522,7 +2483,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int rc;
char prot_type;
- QETH_DBF_TEXT(TRACE, 4, "sendipa");
+ QETH_CARD_TEXT(card, 4, "sendipa");
if (card->options.layer2)
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -2534,6 +2495,10 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
qeth_prepare_ipa_cmd(card, iob, prot_type);
rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
iob, reply_cb, reply_param);
+ if (rc == -ETIME) {
+ qeth_clear_ipacmd_list(card);
+ qeth_schedule_recovery(card);
+ }
return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
@@ -2582,7 +2547,7 @@ int qeth_default_setadapterparms_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "defadpcb");
+ QETH_CARD_TEXT(card, 4, "defadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0)
@@ -2597,7 +2562,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 3, "quyadpcb");
+ QETH_CARD_TEXT(card, 3, "quyadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
@@ -2633,7 +2598,7 @@ int qeth_query_setadapterparms(struct qeth_card *card)
int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 3, "queryadp");
+ QETH_CARD_TEXT(card, 3, "queryadp");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
sizeof(struct qeth_ipacmd_setadpparms));
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
@@ -2645,13 +2610,12 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
unsigned int qdio_error, const char *dbftext)
{
if (qdio_error) {
- QETH_DBF_TEXT(TRACE, 2, dbftext);
- QETH_DBF_TEXT(QERR, 2, dbftext);
- QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
+ QETH_CARD_TEXT(card, 2, dbftext);
+ QETH_CARD_TEXT_(card, 2, " F15=%02X",
buf->element[15].flags & 0xff);
- QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
+ QETH_CARD_TEXT_(card, 2, " F14=%02X",
buf->element[14].flags & 0xff);
- QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
+ QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
if ((buf->element[15].flags & 0xff) == 0x12) {
card->stats.rx_dropped++;
return 0;
@@ -2717,8 +2681,7 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
if (rc) {
dev_warn(&card->gdev->dev,
"QDIO reported an error, rc=%i\n", rc);
- QETH_DBF_TEXT(TRACE, 2, "qinberr");
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+ QETH_CARD_TEXT(card, 2, "qinberr");
}
queue->next_buf_to_init = (queue->next_buf_to_init + count) %
QDIO_MAX_BUFFERS_PER_Q;
@@ -2731,7 +2694,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
{
int sbalf15 = buffer->buffer->element[15].flags & 0xff;
- QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
+ QETH_CARD_TEXT(card, 6, "hdsnderr");
if (card->info.type == QETH_CARD_TYPE_IQD) {
if (sbalf15 == 0) {
qdio_err = 0;
@@ -2747,9 +2710,8 @@ static int qeth_handle_send_error(struct qeth_card *card,
if ((sbalf15 >= 15) && (sbalf15 <= 31))
return QETH_SEND_ERROR_RETRY;
- QETH_DBF_TEXT(TRACE, 1, "lnkfail");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
+ QETH_CARD_TEXT(card, 1, "lnkfail");
+ QETH_CARD_TEXT_(card, 1, "%04x %02x",
(u16)qdio_err, (u8)sbalf15);
return QETH_SEND_ERROR_LINK_FAILURE;
}
@@ -2764,7 +2726,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
if (atomic_read(&queue->used_buffers)
>= QETH_HIGH_WATERMARK_PACK){
/* switch non-PACKING -> PACKING */
- QETH_DBF_TEXT(TRACE, 6, "np->pack");
+ QETH_CARD_TEXT(queue->card, 6, "np->pack");
if (queue->card->options.performance_stats)
queue->card->perf_stats.sc_dp_p++;
queue->do_pack = 1;
@@ -2787,7 +2749,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
if (atomic_read(&queue->used_buffers)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
- QETH_DBF_TEXT(TRACE, 6, "pack->np");
+ QETH_CARD_TEXT(queue->card, 6, "pack->np");
if (queue->card->options.performance_stats)
queue->card->perf_stats.sc_p_dp++;
queue->do_pack = 0;
@@ -2896,9 +2858,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
/* ignore temporary SIGA errors without busy condition */
if (rc == QDIO_ERROR_SIGA_TARGET)
return;
- QETH_DBF_TEXT(TRACE, 2, "flushbuf");
- QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card));
+ QETH_CARD_TEXT(queue->card, 2, "flushbuf");
+ QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
/* this must not happen under normal circumstances. if it
* happens something is really wrong -> recover */
@@ -2960,10 +2921,9 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
int i;
unsigned qeth_send_err;
- QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
+ QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 2, "achkcond");
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+ QETH_CARD_TEXT(card, 2, "achkcond");
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
return;
@@ -3033,13 +2993,11 @@ EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
int qeth_get_elements_no(struct qeth_card *card, void *hdr,
struct sk_buff *skb, int elems)
{
- int elements_needed = 0;
+ int dlen = skb->len - skb->data_len;
+ int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
+ PFN_DOWN((unsigned long)skb->data);
- if (skb_shinfo(skb)->nr_frags > 0)
- elements_needed = (skb_shinfo(skb)->nr_frags + 1);
- if (elements_needed == 0)
- elements_needed = 1 + (((((unsigned long) skb->data) %
- PAGE_SIZE) + skb->len) >> PAGE_SHIFT);
+ elements_needed += skb_shinfo(skb)->nr_frags;
if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
"(Number=%d / Length=%d). Discarded.\n",
@@ -3050,15 +3008,35 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
}
EXPORT_SYMBOL_GPL(qeth_get_elements_no);
+int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len)
+{
+ int hroom, inpage, rest;
+
+ if (((unsigned long)skb->data & PAGE_MASK) !=
+ (((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
+ hroom = skb_headroom(skb);
+ inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
+ rest = len - inpage;
+ if (rest > hroom)
+ return 1;
+ memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
+ skb->data -= rest;
+ QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
+
static inline void __qeth_fill_buffer(struct sk_buff *skb,
struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
int offset)
{
- int length = skb->len;
+ int length = skb->len - skb->data_len;
int length_here;
int element;
char *data;
- int first_lap ;
+ int first_lap, cnt;
+ struct skb_frag_struct *frag;
element = *next_element_to_fill;
data = skb->data;
@@ -3081,10 +3059,14 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
length -= length_here;
if (!length) {
if (first_lap)
- buffer->element[element].flags = 0;
+ if (skb_shinfo(skb)->nr_frags)
+ buffer->element[element].flags =
+ SBAL_FLAGS_FIRST_FRAG;
+ else
+ buffer->element[element].flags = 0;
else
buffer->element[element].flags =
- SBAL_FLAGS_LAST_FRAG;
+ SBAL_FLAGS_MIDDLE_FRAG;
} else {
if (first_lap)
buffer->element[element].flags =
@@ -3097,6 +3079,18 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
element++;
first_lap = 0;
}
+
+ for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+ frag = &skb_shinfo(skb)->frags[cnt];
+ buffer->element[element].addr = (char *)page_to_phys(frag->page)
+ + frag->page_offset;
+ buffer->element[element].length = frag->size;
+ buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG;
+ element++;
+ }
+
+ if (buffer->element[element - 1].flags)
+ buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG;
*next_element_to_fill = element;
}
@@ -3137,20 +3131,16 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
buf->next_element_to_fill++;
}
- if (skb_shinfo(skb)->nr_frags == 0)
- __qeth_fill_buffer(skb, buffer, large_send,
- (int *)&buf->next_element_to_fill, offset);
- else
- __qeth_fill_buffer_frag(skb, buffer, large_send,
- (int *)&buf->next_element_to_fill);
+ __qeth_fill_buffer(skb, buffer, large_send,
+ (int *)&buf->next_element_to_fill, offset);
if (!queue->do_pack) {
- QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
+ QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
/* set state to PRIMED -> will be flushed */
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
flush_cnt = 1;
} else {
- QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
+ QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
if (queue->card->options.performance_stats)
queue->card->perf_stats.skbs_sent_pack++;
if (buf->next_element_to_fill >=
@@ -3210,7 +3200,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
rc = dev_queue_xmit(skb);
} else {
dev_kfree_skb_any(skb);
- QETH_DBF_TEXT(QERR, 2, "qrdrop");
+ QETH_CARD_TEXT(card, 2, "qrdrop");
}
}
return 0;
@@ -3312,14 +3302,14 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
struct qeth_ipacmd_setadpparms *setparms;
- QETH_DBF_TEXT(TRACE, 4, "prmadpcb");
+ QETH_CARD_TEXT(card, 4, "prmadpcb");
cmd = (struct qeth_ipa_cmd *) data;
setparms = &(cmd->data.setadapterparms);
qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 4, "prmrc%2.2x", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code);
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
card->info.promisc_mode = setparms->data.mode;
@@ -3333,7 +3323,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "setprom");
+ QETH_CARD_TEXT(card, 4, "setprom");
if (((dev->flags & IFF_PROMISC) &&
(card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
@@ -3343,7 +3333,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
mode = SET_PROMISC_MODE_OFF;
if (dev->flags & IFF_PROMISC)
mode = SET_PROMISC_MODE_ON;
- QETH_DBF_TEXT_(TRACE, 4, "mode:%x", mode);
+ QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
sizeof(struct qeth_ipacmd_setadpparms));
@@ -3360,9 +3350,9 @@ int qeth_change_mtu(struct net_device *dev, int new_mtu)
card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "chgmtu");
+ QETH_CARD_TEXT(card, 4, "chgmtu");
sprintf(dbf_text, "%8x", new_mtu);
- QETH_DBF_TEXT(TRACE, 4, dbf_text);
+ QETH_CARD_TEXT(card, 4, dbf_text);
if (new_mtu < 64)
return -EINVAL;
@@ -3382,7 +3372,7 @@ struct net_device_stats *qeth_get_stats(struct net_device *dev)
card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 5, "getstat");
+ QETH_CARD_TEXT(card, 5, "getstat");
return &card->stats;
}
@@ -3393,7 +3383,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "chgmaccb");
+ QETH_CARD_TEXT(card, 4, "chgmaccb");
cmd = (struct qeth_ipa_cmd *) data;
if (!card->options.layer2 ||
@@ -3413,7 +3403,7 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "chgmac");
+ QETH_CARD_TEXT(card, 4, "chgmac");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
sizeof(struct qeth_ipacmd_setadpparms));
@@ -3433,9 +3423,8 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
struct qeth_set_access_ctrl *access_ctrl_req;
- int rc;
- QETH_DBF_TEXT(TRACE, 4, "setaccb");
+ QETH_CARD_TEXT(card, 4, "setaccb");
cmd = (struct qeth_ipa_cmd *) data;
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
@@ -3460,7 +3449,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
card->gdev->dev.kobj.name,
access_ctrl_req->subcmd_code,
cmd->data.setadapterparms.hdr.return_code);
- rc = 0;
break;
}
case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
@@ -3474,7 +3462,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
/* ensure isolation mode is "none" */
card->options.isolation = ISOLATION_MODE_NONE;
- rc = -EOPNOTSUPP;
break;
}
case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
@@ -3489,7 +3476,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
/* ensure isolation mode is "none" */
card->options.isolation = ISOLATION_MODE_NONE;
- rc = -EOPNOTSUPP;
break;
}
case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
@@ -3503,7 +3489,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
/* ensure isolation mode is "none" */
card->options.isolation = ISOLATION_MODE_NONE;
- rc = -EPERM;
break;
}
default:
@@ -3517,12 +3502,11 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
/* ensure isolation mode is "none" */
card->options.isolation = ISOLATION_MODE_NONE;
- rc = 0;
break;
}
}
qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
- return rc;
+ return 0;
}
static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
@@ -3533,7 +3517,7 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
struct qeth_set_access_ctrl *access_ctrl_req;
- QETH_DBF_TEXT(TRACE, 4, "setacctl");
+ QETH_CARD_TEXT(card, 4, "setacctl");
QETH_DBF_TEXT_(SETUP, 2, "setacctl");
QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
@@ -3555,7 +3539,7 @@ int qeth_set_access_ctrl_online(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "setactlo");
+ QETH_CARD_TEXT(card, 4, "setactlo");
if ((card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) &&
@@ -3583,8 +3567,8 @@ void qeth_tx_timeout(struct net_device *dev)
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 4, "txtimeo");
card = dev->ml_priv;
+ QETH_CARD_TEXT(card, 4, "txtimeo");
card->stats.tx_errors++;
qeth_schedule_recovery(card);
}
@@ -3663,7 +3647,7 @@ static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
{
u16 s1, s2;
- QETH_DBF_TEXT(TRACE, 4, "sendsnmp");
+ QETH_CARD_TEXT(card, 4, "sendsnmp");
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
@@ -3688,7 +3672,7 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
unsigned char *data;
__u16 data_len;
- QETH_DBF_TEXT(TRACE, 3, "snpcmdcb");
+ QETH_CARD_TEXT(card, 3, "snpcmdcb");
cmd = (struct qeth_ipa_cmd *) sdata;
data = (unsigned char *)((char *)cmd - reply->offset);
@@ -3696,13 +3680,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
snmp = &cmd->data.setadapterparms.data.snmp;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 4, "scer1%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code);
return 0;
}
if (cmd->data.setadapterparms.hdr.return_code) {
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
- QETH_DBF_TEXT_(TRACE, 4, "scer2%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code);
return 0;
}
data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
@@ -3713,13 +3697,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
- QETH_DBF_TEXT_(TRACE, 4, "scer3%i", -ENOMEM);
+ QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
cmd->hdr.return_code = -ENOMEM;
return 0;
}
- QETH_DBF_TEXT_(TRACE, 4, "snore%i",
+ QETH_CARD_TEXT_(card, 4, "snore%i",
cmd->data.setadapterparms.hdr.used_total);
- QETH_DBF_TEXT_(TRACE, 4, "sseqn%i",
+ QETH_CARD_TEXT_(card, 4, "sseqn%i",
cmd->data.setadapterparms.hdr.seq_no);
/*copy entries to user buffer*/
if (cmd->data.setadapterparms.hdr.seq_no == 1) {
@@ -3733,9 +3717,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
}
qinfo->udata_offset += data_len;
/* check if all replies received ... */
- QETH_DBF_TEXT_(TRACE, 4, "srtot%i",
+ QETH_CARD_TEXT_(card, 4, "srtot%i",
cmd->data.setadapterparms.hdr.used_total);
- QETH_DBF_TEXT_(TRACE, 4, "srseq%i",
+ QETH_CARD_TEXT_(card, 4, "srseq%i",
cmd->data.setadapterparms.hdr.seq_no);
if (cmd->data.setadapterparms.hdr.seq_no <
cmd->data.setadapterparms.hdr.used_total)
@@ -3752,7 +3736,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
struct qeth_arp_query_info qinfo = {0, };
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "snmpcmd");
+ QETH_CARD_TEXT(card, 3, "snmpcmd");
if (card->info.guestlan)
return -EOPNOTSUPP;
@@ -3764,15 +3748,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
/* skip 4 bytes (data_len struct member) to get req_len */
if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
return -EFAULT;
- ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
- if (!ureq) {
- QETH_DBF_TEXT(TRACE, 2, "snmpnome");
- return -ENOMEM;
- }
- if (copy_from_user(ureq, udata,
- req_len + sizeof(struct qeth_snmp_ureq_hdr))) {
- kfree(ureq);
- return -EFAULT;
+ ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
+ if (IS_ERR(ureq)) {
+ QETH_CARD_TEXT(card, 2, "snmpnome");
+ return PTR_ERR(ureq);
}
qinfo.udata_len = ureq->hdr.data_len;
qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
@@ -3991,6 +3970,7 @@ retriable:
else
goto retry;
}
+ card->read_or_write_problem = 0;
rc = qeth_mpc_initialize(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
@@ -4120,13 +4100,8 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
skb_len -= data_len;
if (skb_len) {
if (qeth_is_last_sbale(element)) {
- QETH_DBF_TEXT(TRACE, 4, "unexeob");
- QETH_DBF_TEXT_(TRACE, 4, "%s",
- CARD_BUS_ID(card));
- QETH_DBF_TEXT(QERR, 2, "unexeob");
- QETH_DBF_TEXT_(QERR, 2, "%s",
- CARD_BUS_ID(card));
- QETH_DBF_HEX(MISC, 4, buffer, sizeof(*buffer));
+ QETH_CARD_TEXT(card, 4, "unexeob");
+ QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
dev_kfree_skb_any(skb);
card->stats.rx_errors++;
return NULL;
@@ -4147,8 +4122,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
return skb;
no_mem:
if (net_ratelimit()) {
- QETH_DBF_TEXT(TRACE, 2, "noskbmem");
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+ QETH_CARD_TEXT(card, 2, "noskbmem");
}
card->stats.rx_dropped++;
return NULL;
@@ -4164,17 +4138,17 @@ static void qeth_unregister_dbf_views(void)
}
}
-void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...)
+void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
{
char dbf_txt_buf[32];
va_list args;
- if (level > (qeth_dbf[dbf_nix].id)->level)
+ if (level > id->level)
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
va_end(args);
- debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf);
+ debug_text_event(id, level, dbf_txt_buf);
}
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
@@ -4282,6 +4256,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
struct device *dev;
int rc;
unsigned long flags;
+ char dbf_name[20];
QETH_DBF_TEXT(SETUP, 2, "probedev");
@@ -4297,6 +4272,17 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
rc = -ENOMEM;
goto err_dev;
}
+
+ snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
+ dev_name(&gdev->dev));
+ card->debug = debug_register(dbf_name, 2, 1, 8);
+ if (!card->debug) {
+ QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
+ rc = -ENOMEM;
+ goto err_card;
+ }
+ debug_register_view(card->debug, &debug_hex_ascii_view);
+
card->read.ccwdev = gdev->cdev[0];
card->write.ccwdev = gdev->cdev[1];
card->data.ccwdev = gdev->cdev[2];
@@ -4309,12 +4295,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
rc = qeth_determine_card_type(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- goto err_card;
+ goto err_dbf;
}
rc = qeth_setup_card(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- goto err_card;
+ goto err_dbf;
}
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -4322,7 +4308,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
else
rc = qeth_core_create_device_attributes(dev);
if (rc)
- goto err_card;
+ goto err_dbf;
switch (card->info.type) {
case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_OSM:
@@ -4352,6 +4338,8 @@ err_attr:
qeth_core_remove_osn_attributes(dev);
else
qeth_core_remove_device_attributes(dev);
+err_dbf:
+ debug_unregister(card->debug);
err_card:
qeth_core_free_card(card);
err_dev:
@@ -4365,16 +4353,19 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
QETH_DBF_TEXT(SETUP, 2, "removedv");
- if (card->discipline.ccwgdriver) {
- card->discipline.ccwgdriver->remove(gdev);
- qeth_core_free_discipline(card);
- }
if (card->info.type == QETH_CARD_TYPE_OSN) {
qeth_core_remove_osn_attributes(&gdev->dev);
} else {
qeth_core_remove_device_attributes(&gdev->dev);
}
+
+ if (card->discipline.ccwgdriver) {
+ card->discipline.ccwgdriver->remove(gdev);
+ qeth_core_free_discipline(card);
+ }
+
+ debug_unregister(card->debug);
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_del(&card->list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index f9ed24de7514..e37dd8c4bf4e 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -616,8 +616,9 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
-#define QETH_IDX_ACT_ERR_EXCL 0x19
-#define QETH_IDX_ACT_ERR_AUTH 0x1E
+#define QETH_IDX_ACT_ERR_EXCL 0x19
+#define QETH_IDX_ACT_ERR_AUTH 0x1E
+#define QETH_IDX_ACT_ERR_AUTH_USER 0x20
#define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer + 0x0b)) + \
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 2eb022ff2610..42fa783a70c8 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -411,7 +411,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
if (!card)
return -EINVAL;
- mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->discipline_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
@@ -433,6 +433,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
if (card->options.layer2 == newdis)
goto out;
else {
+ card->info.mac_bits = 0;
if (card->discipline.ccwgdriver) {
card->discipline.ccwgdriver->remove(card->gdev);
qeth_core_free_discipline(card);
@@ -445,7 +446,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
rc = card->discipline.ccwgdriver->probe(card->gdev);
out:
- mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
return rc ? rc : count;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d43f57a4ac66..830d63524d61 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -79,7 +79,7 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = -EOPNOTSUPP;
}
if (rc)
- QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
+ QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
return rc;
}
@@ -130,7 +130,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
__u8 *mac;
- QETH_DBF_TEXT(TRACE, 2, "L2Sgmacb");
+ QETH_CARD_TEXT(card, 2, "L2Sgmacb");
cmd = (struct qeth_ipa_cmd *) data;
mac = &cmd->data.setdelmac.mac[0];
/* MAC already registered, needed in couple/uncouple case */
@@ -147,7 +147,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Sgmac");
+ QETH_CARD_TEXT(card, 2, "L2Sgmac");
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
qeth_l2_send_setgroupmac_cb);
}
@@ -159,7 +159,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
__u8 *mac;
- QETH_DBF_TEXT(TRACE, 2, "L2Dgmacb");
+ QETH_CARD_TEXT(card, 2, "L2Dgmacb");
cmd = (struct qeth_ipa_cmd *) data;
mac = &cmd->data.setdelmac.mac[0];
if (cmd->hdr.return_code)
@@ -170,7 +170,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Dgmac");
+ QETH_CARD_TEXT(card, 2, "L2Dgmac");
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
qeth_l2_send_delgroupmac_cb);
}
@@ -262,15 +262,14 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "L2sdvcb");
+ QETH_CARD_TEXT(card, 2, "L2sdvcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. "
"Continuing\n", cmd->data.setdelvlan.vlan_id,
QETH_CARD_IFNAME(card), cmd->hdr.return_code);
- QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command);
- QETH_DBF_TEXT_(TRACE, 2, "L2%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
+ QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
}
return 0;
}
@@ -281,7 +280,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT_(TRACE, 4, "L2sdv%x", ipacmd);
+ QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelvlan.vlan_id = i;
@@ -292,7 +291,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
{
struct qeth_vlan_vid *id;
- QETH_DBF_TEXT(TRACE, 3, "L2prcvln");
+ QETH_CARD_TEXT(card, 3, "L2prcvln");
spin_lock_bh(&card->vlanlock);
list_for_each_entry(id, &card->vid_list, list) {
if (clear)
@@ -310,13 +309,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
- QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
+ QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
- QETH_DBF_TEXT(TRACE, 3, "aidOSM");
+ QETH_CARD_TEXT(card, 3, "aidOSM");
return;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "aidREC");
+ QETH_CARD_TEXT(card, 3, "aidREC");
return;
}
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
@@ -334,13 +333,13 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+ QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
- QETH_DBF_TEXT(TRACE, 3, "kidOSM");
+ QETH_CARD_TEXT(card, 3, "kidOSM");
return;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "kidREC");
+ QETH_CARD_TEXT(card, 3, "kidREC");
return;
}
spin_lock_bh(&card->vlanlock);
@@ -456,7 +455,7 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
/* else unknown */
default:
dev_kfree_skb_any(skb);
- QETH_DBF_TEXT(TRACE, 3, "inbunkno");
+ QETH_CARD_TEXT(card, 3, "inbunkno");
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
continue;
}
@@ -474,7 +473,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 2, "L2sdmac");
+ QETH_CARD_TEXT(card, 2, "L2sdmac");
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
@@ -488,10 +487,10 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "L2Smaccb");
+ QETH_CARD_TEXT(card, 2, "L2Smaccb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
switch (cmd->hdr.return_code) {
case IPA_RC_L2_DUP_MAC:
@@ -523,7 +522,7 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Setmac");
+ QETH_CARD_TEXT(card, 2, "L2Setmac");
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
qeth_l2_send_setmac_cb);
}
@@ -534,10 +533,10 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "L2Dmaccb");
+ QETH_CARD_TEXT(card, 2, "L2Dmaccb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
cmd->hdr.return_code = -EIO;
return 0;
}
@@ -548,7 +547,7 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card,
static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Delmac");
+ QETH_CARD_TEXT(card, 2, "L2Delmac");
if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
return 0;
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
@@ -594,23 +593,22 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
struct qeth_card *card = dev->ml_priv;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "setmac");
+ QETH_CARD_TEXT(card, 3, "setmac");
if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
- QETH_DBF_TEXT(TRACE, 3, "setmcINV");
+ QETH_CARD_TEXT(card, 3, "setmcINV");
return -EOPNOTSUPP;
}
if (card->info.type == QETH_CARD_TYPE_OSN ||
card->info.type == QETH_CARD_TYPE_OSM ||
card->info.type == QETH_CARD_TYPE_OSX) {
- QETH_DBF_TEXT(TRACE, 3, "setmcTYP");
+ QETH_CARD_TEXT(card, 3, "setmcTYP");
return -EOPNOTSUPP;
}
- QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card));
- QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN);
+ QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "setmcREC");
+ QETH_CARD_TEXT(card, 3, "setmcREC");
return -ERESTARTSYS;
}
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
@@ -627,7 +625,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
if (card->info.type == QETH_CARD_TYPE_OSN)
return ;
- QETH_DBF_TEXT(TRACE, 3, "setmulti");
+ QETH_CARD_TEXT(card, 3, "setmulti");
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
@@ -714,10 +712,13 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (card->info.type != QETH_CARD_TYPE_IQD) {
+ if (qeth_hdr_chk_and_bounce(new_skb,
+ sizeof(struct qeth_hdr_layer2)))
+ goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements);
- else
+ } else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements, data_offset, hd_len);
if (!rc) {
@@ -771,11 +772,10 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_start_time = qeth_get_micros();
}
if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 1, "qdinchk");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
+ QETH_CARD_TEXT(card, 1, "qdinchk");
+ QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element,
count);
- QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
+ QETH_CARD_TEXT_(card, 1, "%04X", queue);
qeth_schedule_recovery(card);
return;
}
@@ -799,13 +799,13 @@ static int qeth_l2_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethopen");
+ QETH_CARD_TEXT(card, 4, "qethopen");
if (card->state != CARD_STATE_SOFTSETUP)
return -ENODEV;
if ((card->info.type != QETH_CARD_TYPE_OSN) &&
(!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
- QETH_DBF_TEXT(TRACE, 4, "nomacadr");
+ QETH_CARD_TEXT(card, 4, "nomacadr");
return -EPERM;
}
card->data.state = CH_STATE_UP;
@@ -822,7 +822,7 @@ static int qeth_l2_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethstop");
+ QETH_CARD_TEXT(card, 4, "qethstop");
netif_tx_disable(dev);
if (card->state == CARD_STATE_UP)
card->state = CARD_STATE_SOFTSETUP;
@@ -860,8 +860,6 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
unregister_netdev(card->dev);
card->dev = NULL;
}
-
- qeth_l2_del_all_mc(card);
return;
}
@@ -935,6 +933,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
enum qeth_card_states recover_flag;
BUG_ON(!card);
+ mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -1012,6 +1011,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
out:
mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
return 0;
out_remove:
@@ -1025,6 +1025,7 @@ out_remove:
else
card->state = CARD_STATE_DOWN;
mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
@@ -1040,6 +1041,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
int rc = 0, rc2 = 0, rc3 = 0;
enum qeth_card_states recover_flag;
+ mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -1060,6 +1062,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
return 0;
}
@@ -1074,11 +1077,10 @@ static int qeth_l2_recover(void *ptr)
int rc = 0;
card = (struct qeth_card *) ptr;
- QETH_DBF_TEXT(TRACE, 2, "recover1");
- QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
- QETH_DBF_TEXT(TRACE, 2, "recover2");
+ QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
card->use_hard_stop = 1;
@@ -1181,12 +1183,12 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 5, "osndctrd");
+ QETH_CARD_TEXT(card, 5, "osndctrd");
wait_event(card->wait_q,
atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
qeth_prepare_control_data(card, len, iob);
- QETH_DBF_TEXT(TRACE, 6, "osnoirqp");
+ QETH_CARD_TEXT(card, 6, "osnoirqp");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
(addr_t) iob, 0, 0);
@@ -1194,7 +1196,7 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
"ccw_device_start rc = %i\n", rc);
- QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
+ QETH_CARD_TEXT_(card, 2, " err%d", rc);
qeth_release_buffer(iob->channel, iob);
atomic_set(&card->write.irq_pending, 0);
wake_up(&card->wait_q);
@@ -1207,7 +1209,7 @@ static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
{
u16 s1, s2;
- QETH_DBF_TEXT(TRACE, 4, "osndipa");
+ QETH_CARD_TEXT(card, 4, "osndipa");
qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
@@ -1225,12 +1227,12 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
struct qeth_card *card;
int rc;
- QETH_DBF_TEXT(TRACE, 2, "osnsdmc");
if (!dev)
return -ENODEV;
card = dev->ml_priv;
if (!card)
return -ENODEV;
+ QETH_CARD_TEXT(card, 2, "osnsdmc");
if ((card->state != CARD_STATE_UP) &&
(card->state != CARD_STATE_SOFTSETUP))
return -ENODEV;
@@ -1247,13 +1249,13 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 2, "osnreg");
*dev = qeth_l2_netdev_by_devno(read_dev_no);
if (*dev == NULL)
return -ENODEV;
card = (*dev)->ml_priv;
if (!card)
return -ENODEV;
+ QETH_CARD_TEXT(card, 2, "osnreg");
if ((assist_cb == NULL) || (data_cb == NULL))
return -EINVAL;
card->osn_info.assist_cb = assist_cb;
@@ -1266,12 +1268,12 @@ void qeth_osn_deregister(struct net_device *dev)
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 2, "osndereg");
if (!dev)
return;
card = dev->ml_priv;
if (!card)
return;
+ QETH_CARD_TEXT(card, 2, "osndereg");
card->osn_info.assist_cb = NULL;
card->osn_info.data_cb = NULL;
return;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 8447d233d0b3..e705b27ec7dc 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -64,5 +64,6 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
const u8 *);
int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
+int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 61adae21a464..e22ae248f613 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -195,7 +195,7 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
}
}
-static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
struct qeth_ipaddr *addr)
{
struct qeth_ipato_entry *ipatoe;
@@ -287,7 +287,7 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
addr->users += add ? 1 : -1;
if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
qeth_l3_is_addr_covered_by_ipato(card, addr)) {
- QETH_DBF_TEXT(TRACE, 2, "tkovaddr");
+ QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
}
list_add_tail(&addr->entry, card->ip_tbd_list);
@@ -301,13 +301,13 @@ static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "delip");
+ QETH_CARD_TEXT(card, 4, "delip");
if (addr->proto == QETH_PROT_IPV4)
- QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
+ QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
else {
- QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
}
spin_lock_irqsave(&card->ip_lock, flags);
rc = __qeth_l3_insert_ip_todo(card, addr, 0);
@@ -320,12 +320,12 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "addip");
+ QETH_CARD_TEXT(card, 4, "addip");
if (addr->proto == QETH_PROT_IPV4)
- QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
+ QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
else {
- QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
}
spin_lock_irqsave(&card->ip_lock, flags);
rc = __qeth_l3_insert_ip_todo(card, addr, 1);
@@ -353,10 +353,10 @@ static void qeth_l3_delete_mc_addresses(struct qeth_card *card)
struct qeth_ipaddr *iptodo;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "delmc");
+ QETH_CARD_TEXT(card, 4, "delmc");
iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (!iptodo) {
- QETH_DBF_TEXT(TRACE, 2, "dmcnomem");
+ QETH_CARD_TEXT(card, 2, "dmcnomem");
return;
}
iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
@@ -457,8 +457,8 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
unsigned long flags;
int rc;
- QETH_DBF_TEXT(TRACE, 2, "sdiplist");
- QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "sdiplist");
+ QETH_CARD_HEX(card, 2, &card, sizeof(void *));
if (card->options.sniffer)
return;
@@ -466,7 +466,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
tbd_list = card->ip_tbd_list;
card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
if (!card->ip_tbd_list) {
- QETH_DBF_TEXT(TRACE, 0, "silnomem");
+ QETH_CARD_TEXT(card, 0, "silnomem");
card->ip_tbd_list = tbd_list;
spin_unlock_irqrestore(&card->ip_lock, flags);
return;
@@ -517,7 +517,7 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
struct qeth_ipaddr *addr, *tmp;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "clearip");
+ QETH_CARD_TEXT(card, 4, "clearip");
if (recover && card->options.sniffer)
return;
spin_lock_irqsave(&card->ip_lock, flags);
@@ -577,7 +577,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "setdelmc");
+ QETH_CARD_TEXT(card, 4, "setdelmc");
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -615,8 +615,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
__u8 netmask[16];
- QETH_DBF_TEXT(TRACE, 4, "setdelip");
- QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags);
+ QETH_CARD_TEXT(card, 4, "setdelip");
+ QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -645,7 +645,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 4, "setroutg");
+ QETH_CARD_TEXT(card, 4, "setroutg");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setrtg.type = (type);
@@ -689,7 +689,7 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "setrtg4");
+ QETH_CARD_TEXT(card, 3, "setrtg4");
qeth_l3_correct_routing_type(card, &card->options.route4.type,
QETH_PROT_IPV4);
@@ -709,7 +709,7 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "setrtg6");
+ QETH_CARD_TEXT(card, 3, "setrtg6");
#ifdef CONFIG_QETH_IPV6
if (!qeth_is_supported(card, IPA_IPV6))
@@ -753,7 +753,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 2, "addipato");
+ QETH_CARD_TEXT(card, 2, "addipato");
spin_lock_irqsave(&card->ip_lock, flags);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != new->proto)
@@ -778,7 +778,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
struct qeth_ipato_entry *ipatoe, *tmp;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 2, "delipato");
+ QETH_CARD_TEXT(card, 2, "delipato");
spin_lock_irqsave(&card->ip_lock, flags);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
@@ -806,11 +806,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "addvipa4");
+ QETH_CARD_TEXT(card, 2, "addvipa4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "addvipa6");
+ QETH_CARD_TEXT(card, 2, "addvipa6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -841,11 +841,11 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "delvipa4");
+ QETH_CARD_TEXT(card, 2, "delvipa4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "delvipa6");
+ QETH_CARD_TEXT(card, 2, "delvipa6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -870,11 +870,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip4");
+ QETH_CARD_TEXT(card, 2, "addrxip4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip6");
+ QETH_CARD_TEXT(card, 2, "addrxip6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -905,11 +905,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip4");
+ QETH_CARD_TEXT(card, 2, "addrxip4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip6");
+ QETH_CARD_TEXT(card, 2, "addrxip6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -929,15 +929,15 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
int cnt = 3;
if (addr->proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "setaddr4");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
+ QETH_CARD_TEXT(card, 2, "setaddr4");
+ QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
} else if (addr->proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "setaddr6");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_TEXT(card, 2, "setaddr6");
+ QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
} else {
- QETH_DBF_TEXT(TRACE, 2, "setaddr?");
- QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
+ QETH_CARD_TEXT(card, 2, "setaddr?");
+ QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
}
do {
if (addr->is_multicast)
@@ -946,10 +946,10 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP,
addr->set_flags);
if (rc)
- QETH_DBF_TEXT(TRACE, 2, "failed");
+ QETH_CARD_TEXT(card, 2, "failed");
} while ((--cnt > 0) && rc);
if (rc) {
- QETH_DBF_TEXT(TRACE, 2, "FAILED");
+ QETH_CARD_TEXT(card, 2, "FAILED");
qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
dev_warn(&card->gdev->dev,
"Registering IP address %s failed\n", buf);
@@ -963,15 +963,15 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
int rc = 0;
if (addr->proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "deladdr4");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
+ QETH_CARD_TEXT(card, 2, "deladdr4");
+ QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
} else if (addr->proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "deladdr6");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_TEXT(card, 2, "deladdr6");
+ QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
} else {
- QETH_DBF_TEXT(TRACE, 2, "deladdr?");
- QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
+ QETH_CARD_TEXT(card, 2, "deladdr?");
+ QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
}
if (addr->is_multicast)
rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
@@ -979,7 +979,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
addr->del_flags);
if (rc)
- QETH_DBF_TEXT(TRACE, 2, "failed");
+ QETH_CARD_TEXT(card, 2, "failed");
return rc;
}
@@ -1012,7 +1012,7 @@ static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "adpmode");
+ QETH_CARD_TEXT(card, 4, "adpmode");
iob = qeth_get_adapter_cmd(card, command,
sizeof(struct qeth_ipacmd_setadpparms));
@@ -1027,7 +1027,7 @@ static int qeth_l3_setadapter_hstr(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 4, "adphstr");
+ QETH_CARD_TEXT(card, 4, "adphstr");
if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
rc = qeth_l3_send_setadp_mode(card,
@@ -1093,7 +1093,7 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "defadpcb");
+ QETH_CARD_TEXT(card, 4, "defadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0) {
@@ -1106,13 +1106,13 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
- QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask);
+ QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
}
if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
card->info.tx_csum_mask =
cmd->data.setassparms.data.flags_32bit;
- QETH_DBF_TEXT_(TRACE, 3, "tcsu:%d", card->info.tx_csum_mask);
+ QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
}
return 0;
@@ -1125,7 +1125,7 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "getasscm");
+ QETH_CARD_TEXT(card, 4, "getasscm");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1147,7 +1147,7 @@ static int qeth_l3_send_setassparms(struct qeth_card *card,
int rc;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "sendassp");
+ QETH_CARD_TEXT(card, 4, "sendassp");
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
if (len <= sizeof(__u32))
@@ -1166,7 +1166,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 4, "simassp6");
+ QETH_CARD_TEXT(card, 4, "simassp6");
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
0, QETH_PROT_IPV6);
rc = qeth_l3_send_setassparms(card, iob, 0, 0,
@@ -1182,7 +1182,7 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
int length = 0;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 4, "simassp4");
+ QETH_CARD_TEXT(card, 4, "simassp4");
if (data)
length = sizeof(__u32);
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
@@ -1196,7 +1196,7 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "ipaarp");
+ QETH_CARD_TEXT(card, 3, "ipaarp");
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
dev_info(&card->gdev->dev,
@@ -1218,7 +1218,7 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "ipaipfrg");
+ QETH_CARD_TEXT(card, 3, "ipaipfrg");
if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
dev_info(&card->gdev->dev,
@@ -1243,7 +1243,7 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "stsrcmac");
+ QETH_CARD_TEXT(card, 3, "stsrcmac");
if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
dev_info(&card->gdev->dev,
@@ -1265,7 +1265,7 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "strtvlan");
+ QETH_CARD_TEXT(card, 3, "strtvlan");
if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
dev_info(&card->gdev->dev,
@@ -1289,7 +1289,7 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "stmcast");
+ QETH_CARD_TEXT(card, 3, "stmcast");
if (!qeth_is_supported(card, IPA_MULTICASTING)) {
dev_info(&card->gdev->dev,
@@ -1349,7 +1349,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "softipv6");
+ QETH_CARD_TEXT(card, 3, "softipv6");
if (card->info.type == QETH_CARD_TYPE_IQD)
goto out;
@@ -1395,7 +1395,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "strtipv6");
+ QETH_CARD_TEXT(card, 3, "strtipv6");
if (!qeth_is_supported(card, IPA_IPV6)) {
dev_info(&card->gdev->dev,
@@ -1412,7 +1412,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "stbrdcst");
+ QETH_CARD_TEXT(card, 3, "stbrdcst");
card->info.broadcast_capable = 0;
if (!qeth_is_supported(card, IPA_FILTERING)) {
dev_info(&card->gdev->dev,
@@ -1512,7 +1512,7 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "strtcsum");
+ QETH_CARD_TEXT(card, 3, "strtcsum");
if (card->options.checksum_type == NO_CHECKSUMMING) {
dev_info(&card->gdev->dev,
@@ -1569,7 +1569,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "sttso");
+ QETH_CARD_TEXT(card, 3, "sttso");
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
dev_info(&card->gdev->dev,
@@ -1596,7 +1596,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
static int qeth_l3_start_ipassists(struct qeth_card *card)
{
- QETH_DBF_TEXT(TRACE, 3, "strtipas");
+ QETH_CARD_TEXT(card, 3, "strtipas");
qeth_set_access_ctrl_online(card); /* go on*/
qeth_l3_start_ipa_arp_processing(card); /* go on*/
@@ -1619,7 +1619,7 @@ static int qeth_l3_put_unique_id(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "puniqeid");
+ QETH_CARD_TEXT(card, 2, "puniqeid");
if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
UNIQUE_ID_NOT_BY_CARD)
@@ -1723,7 +1723,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
cmd = (struct qeth_ipa_cmd *)data;
rc = cmd->hdr.return_code;
if (rc)
- QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc);
+ QETH_CARD_TEXT_(card, 2, "dxter%x", rc);
switch (cmd->data.diagass.action) {
case QETH_DIAGS_CMD_TRACE_QUERY:
break;
@@ -1800,7 +1800,7 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
struct ip_mc_list *im4;
char buf[MAX_ADDR_LEN];
- QETH_DBF_TEXT(TRACE, 4, "addmc");
+ QETH_CARD_TEXT(card, 4, "addmc");
for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
@@ -1820,7 +1820,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
struct vlan_group *vg;
int i;
- QETH_DBF_TEXT(TRACE, 4, "addmcvl");
+ QETH_CARD_TEXT(card, 4, "addmcvl");
if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
return;
@@ -1844,7 +1844,7 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
{
struct in_device *in4_dev;
- QETH_DBF_TEXT(TRACE, 4, "chkmcv4");
+ QETH_CARD_TEXT(card, 4, "chkmcv4");
in4_dev = in_dev_get(card->dev);
if (in4_dev == NULL)
return;
@@ -1862,7 +1862,7 @@ static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
struct ifmcaddr6 *im6;
char buf[MAX_ADDR_LEN];
- QETH_DBF_TEXT(TRACE, 4, "addmc6");
+ QETH_CARD_TEXT(card, 4, "addmc6");
for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
@@ -1883,7 +1883,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
struct vlan_group *vg;
int i;
- QETH_DBF_TEXT(TRACE, 4, "admc6vl");
+ QETH_CARD_TEXT(card, 4, "admc6vl");
if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
return;
@@ -1907,7 +1907,7 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
{
struct inet6_dev *in6_dev;
- QETH_DBF_TEXT(TRACE, 4, "chkmcv6");
+ QETH_CARD_TEXT(card, 4, "chkmcv6");
if (!qeth_is_supported(card, IPA_IPV6))
return ;
in6_dev = in6_dev_get(card->dev);
@@ -1928,7 +1928,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
struct in_ifaddr *ifa;
struct qeth_ipaddr *addr;
- QETH_DBF_TEXT(TRACE, 4, "frvaddr4");
+ QETH_CARD_TEXT(card, 4, "frvaddr4");
in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in_dev)
@@ -1954,7 +1954,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
struct inet6_ifaddr *ifa;
struct qeth_ipaddr *addr;
- QETH_DBF_TEXT(TRACE, 4, "frvaddr6");
+ QETH_CARD_TEXT(card, 4, "frvaddr6");
in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in6_dev)
@@ -1989,7 +1989,7 @@ static void qeth_l3_vlan_rx_register(struct net_device *dev,
struct qeth_card *card = dev->ml_priv;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "vlanreg");
+ QETH_CARD_TEXT(card, 4, "vlanreg");
spin_lock_irqsave(&card->vlanlock, flags);
card->vlangrp = grp;
spin_unlock_irqrestore(&card->vlanlock, flags);
@@ -2005,9 +2005,9 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct qeth_card *card = dev->ml_priv;
unsigned long flags;
- QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+ QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "kidREC");
+ QETH_CARD_TEXT(card, 3, "kidREC");
return;
}
spin_lock_irqsave(&card->vlanlock, flags);
@@ -2162,7 +2162,7 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
break;
default:
dev_kfree_skb_any(skb);
- QETH_DBF_TEXT(TRACE, 3, "inbunkno");
+ QETH_CARD_TEXT(card, 3, "inbunkno");
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
continue;
}
@@ -2229,7 +2229,8 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
card = vlan_dev_real_dev(dev)->ml_priv;
if (card && card->options.layer2)
card = NULL;
- QETH_DBF_TEXT_(TRACE, 4, "%d", rc);
+ if (card)
+ QETH_CARD_TEXT_(card, 4, "%d", rc);
return card ;
}
@@ -2307,10 +2308,10 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
} else if (card->options.sniffer && /* HiperSockets trace */
qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
if (dev->flags & IFF_PROMISC) {
- QETH_DBF_TEXT(TRACE, 3, "+promisc");
+ QETH_CARD_TEXT(card, 3, "+promisc");
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
} else {
- QETH_DBF_TEXT(TRACE, 3, "-promisc");
+ QETH_CARD_TEXT(card, 3, "-promisc");
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
}
}
@@ -2320,7 +2321,7 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 3, "setmulti");
+ QETH_CARD_TEXT(card, 3, "setmulti");
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
@@ -2365,7 +2366,7 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arpstnoe");
+ QETH_CARD_TEXT(card, 3, "arpstnoe");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2417,17 +2418,17 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
int uentry_size;
int i;
- QETH_DBF_TEXT(TRACE, 4, "arpquecb");
+ QETH_CARD_TEXT(card, 4, "arpquecb");
qinfo = (struct qeth_arp_query_info *) reply->param;
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "qaer1%i", cmd->hdr.return_code);
return 0;
}
if (cmd->data.setassparms.hdr.return_code) {
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
- QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "qaer2%i", cmd->hdr.return_code);
return 0;
}
qdata = &cmd->data.setassparms.data.query_arp;
@@ -2449,14 +2450,14 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) <
qdata->no_entries * uentry_size){
- QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM);
+ QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
cmd->hdr.return_code = -ENOMEM;
goto out_error;
}
- QETH_DBF_TEXT_(TRACE, 4, "anore%i",
+ QETH_CARD_TEXT_(card, 4, "anore%i",
cmd->data.setassparms.hdr.number_of_replies);
- QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
- QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries);
+ QETH_CARD_TEXT_(card, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
+ QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
/* strip off "media specific information" */
@@ -2492,7 +2493,7 @@ static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
unsigned long),
void *reply_param)
{
- QETH_DBF_TEXT(TRACE, 4, "sendarp");
+ QETH_CARD_TEXT(card, 4, "sendarp");
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
@@ -2508,7 +2509,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arpquery");
+ QETH_CARD_TEXT(card, 3, "arpquery");
if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
IPA_ARP_PROCESSING)) {
@@ -2551,7 +2552,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arpadent");
+ QETH_CARD_TEXT(card, 3, "arpadent");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2590,7 +2591,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arprment");
+ QETH_CARD_TEXT(card, 3, "arprment");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2626,7 +2627,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
int rc;
int tmp;
- QETH_DBF_TEXT(TRACE, 3, "arpflush");
+ QETH_CARD_TEXT(card, 3, "arpflush");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2734,7 +2735,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = -EOPNOTSUPP;
}
if (rc)
- QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
+ QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
return rc;
}
@@ -2903,19 +2904,11 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb)
unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
tcp_hdr(skb)->doff * 4;
int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
- int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
+ int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd);
elements += skb_shinfo(skb)->nr_frags;
return elements;
}
-static inline int qeth_l3_tso_check(struct sk_buff *skb)
-{
- int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
- (unsigned long)skb->data;
- return (((unsigned long)skb->data & PAGE_MASK) !=
- (((unsigned long)skb->data + len) & PAGE_MASK));
-}
-
static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
int rc;
@@ -3015,8 +3008,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
(cast_type == RTN_UNSPEC)) {
hdr = (struct qeth_hdr *)skb_push(new_skb,
sizeof(struct qeth_hdr_tso));
- if (qeth_l3_tso_check(new_skb))
- QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
qeth_tso_fill_header(card, hdr, new_skb);
@@ -3047,10 +3038,20 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
elements_needed += elems;
nr_frags = skb_shinfo(new_skb)->nr_frags;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (card->info.type != QETH_CARD_TYPE_IQD) {
+ int len;
+ if (large_send == QETH_LARGE_SEND_TSO)
+ len = ((unsigned long)tcp_hdr(new_skb) +
+ tcp_hdr(new_skb)->doff * 4) -
+ (unsigned long)new_skb->data;
+ else
+ len = sizeof(struct qeth_hdr_layer3);
+
+ if (qeth_hdr_chk_and_bounce(new_skb, len))
+ goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements_needed);
- else
+ } else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements_needed, data_offset, 0);
@@ -3103,7 +3104,7 @@ static int qeth_l3_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethopen");
+ QETH_CARD_TEXT(card, 4, "qethopen");
if (card->state != CARD_STATE_SOFTSETUP)
return -ENODEV;
card->data.state = CH_STATE_UP;
@@ -3119,7 +3120,7 @@ static int qeth_l3_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethstop");
+ QETH_CARD_TEXT(card, 4, "qethstop");
netif_tx_disable(dev);
if (card->state == CARD_STATE_UP)
card->state = CARD_STATE_SOFTSETUP;
@@ -3312,11 +3313,10 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_start_time = qeth_get_micros();
}
if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 1, "qdinchk");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
+ QETH_CARD_TEXT(card, 1, "qdinchk");
+ QETH_CARD_TEXT_(card, 1, "%04X%04X",
first_element, count);
- QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
+ QETH_CARD_TEXT_(card, 1, "%04X", queue);
qeth_schedule_recovery(card);
return;
}
@@ -3354,6 +3354,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+ qeth_l3_remove_device_attributes(&cgdev->dev);
+
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
@@ -3367,7 +3369,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
card->dev = NULL;
}
- qeth_l3_remove_device_attributes(&cgdev->dev);
qeth_l3_clear_ip_list(card, 0, 0);
qeth_l3_clear_ipato_list(card);
return;
@@ -3380,6 +3381,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
enum qeth_card_states recover_flag;
BUG_ON(!card);
+ mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -3461,6 +3463,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
out:
mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
return 0;
out_remove:
card->use_hard_stop = 1;
@@ -3473,6 +3476,7 @@ out_remove:
else
card->state = CARD_STATE_DOWN;
mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
@@ -3488,6 +3492,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
int rc = 0, rc2 = 0, rc3 = 0;
enum qeth_card_states recover_flag;
+ mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -3508,6 +3513,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
return 0;
}
@@ -3522,11 +3528,11 @@ static int qeth_l3_recover(void *ptr)
int rc = 0;
card = (struct qeth_card *) ptr;
- QETH_DBF_TEXT(TRACE, 2, "recover1");
- QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "recover1");
+ QETH_CARD_HEX(card, 2, &card, sizeof(void *));
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
- QETH_DBF_TEXT(TRACE, 2, "recover2");
+ QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
card->use_hard_stop = 1;
@@ -3624,8 +3630,8 @@ static int qeth_l3_ip_event(struct notifier_block *this,
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
- QETH_DBF_TEXT(TRACE, 3, "ipevent");
card = qeth_l3_get_card_from_dev(dev);
+ QETH_CARD_TEXT(card, 3, "ipevent");
if (!card)
return NOTIFY_DONE;
@@ -3671,11 +3677,11 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
struct qeth_ipaddr *addr;
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 3, "ip6event");
card = qeth_l3_get_card_from_dev(dev);
if (!card)
return NOTIFY_DONE;
+ QETH_CARD_TEXT(card, 3, "ip6event");
if (!qeth_is_supported(card, IPA_IPV6))
return NOTIFY_DONE;
@@ -3714,7 +3720,7 @@ static int qeth_l3_register_notifiers(void)
{
int rc;
- QETH_DBF_TEXT(TRACE, 5, "regnotif");
+ QETH_DBF_TEXT(SETUP, 5, "regnotif");
rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
if (rc)
return rc;
@@ -3733,7 +3739,7 @@ static int qeth_l3_register_notifiers(void)
static void qeth_l3_unregister_notifiers(void)
{
- QETH_DBF_TEXT(TRACE, 5, "unregnot");
+ QETH_DBF_TEXT(SETUP, 5, "unregnot");
BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
#ifdef CONFIG_QETH_IPV6
BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index fb5318b30e99..67cfa68dcf1b 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -479,6 +479,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ struct qeth_ipaddr *tmpipa, *t;
char *tmp;
int rc = 0;
@@ -497,8 +498,21 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
} else if (!strcmp(tmp, "1")) {
card->ipato.enabled = 1;
+ list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
+ if ((tmpipa->type == QETH_IP_TYPE_NORMAL) &&
+ qeth_l3_is_addr_covered_by_ipato(card, tmpipa))
+ tmpipa->set_flags |=
+ QETH_IPA_SETIP_TAKEOVER_FLAG;
+ }
+
} else if (!strcmp(tmp, "0")) {
card->ipato.enabled = 0;
+ list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
+ if (tmpipa->set_flags &
+ QETH_IPA_SETIP_TAKEOVER_FLAG)
+ tmpipa->set_flags &=
+ ~QETH_IPA_SETIP_TAKEOVER_FLAG;
+ }
} else
rc = -EINVAL;
out:
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 70491274da16..65e1cf104943 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -47,6 +47,7 @@ static struct device *smsg_dev;
static DEFINE_SPINLOCK(smsg_list_lock);
static LIST_HEAD(smsg_list);
+static int iucv_path_connected;
static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
@@ -142,8 +143,10 @@ static int smsg_pm_freeze(struct device *dev)
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "smsg_pm_freeze\n");
#endif
- if (smsg_path)
+ if (smsg_path && iucv_path_connected) {
iucv_path_sever(smsg_path, NULL);
+ iucv_path_connected = 0;
+ }
return 0;
}
@@ -154,7 +157,7 @@ static int smsg_pm_restore_thaw(struct device *dev)
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "smsg_pm_restore_thaw\n");
#endif
- if (smsg_path) {
+ if (smsg_path && iucv_path_connected) {
memset(smsg_path, 0, sizeof(*smsg_path));
smsg_path->msglim = 255;
smsg_path->flags = 0;
@@ -165,6 +168,8 @@ static int smsg_pm_restore_thaw(struct device *dev)
printk(KERN_ERR
"iucv_path_connect returned with rc %i\n", rc);
#endif
+ if (!rc)
+ iucv_path_connected = 1;
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
}
return 0;
@@ -214,6 +219,8 @@ static int __init smsg_init(void)
NULL, NULL, NULL);
if (rc)
goto out_free_path;
+ else
+ iucv_path_connected = 1;
smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!smsg_dev) {
rc = -ENOMEM;
diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c
index 137688790207..4d2ea4000422 100644
--- a/drivers/s390/net/smsgiucv_app.c
+++ b/drivers/s390/net/smsgiucv_app.c
@@ -180,6 +180,13 @@ static int __init smsgiucv_app_init(void)
goto fail_put_driver;
}
+ /* convert sender to uppercase characters */
+ if (sender) {
+ int len = strlen(sender);
+ while (len--)
+ sender[len] = toupper(sender[len]);
+ }
+
/* register with the smsgiucv device driver */
rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback);
if (rc) {
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e331df2122f7..96fa1f536394 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -98,13 +98,11 @@ static void __init zfcp_init_device_setup(char *devstr)
u64 wwpn, lun;
/* duplicate devstr and keep the original for sysfs presentation*/
- str_saved = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
+ str_saved = kstrdup(devstr, GFP_KERNEL);
str = str_saved;
if (!str)
return;
- strcpy(str, devstr);
-
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
@@ -314,7 +312,7 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
}
retval = -EINVAL;
- INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
+ INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work);
spin_lock_init(&unit->latencies.lock);
unit->latencies.write.channel.min = 0xFFFFFFFF;
@@ -526,6 +524,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
rwlock_init(&adapter->port_list_lock);
INIT_LIST_HEAD(&adapter->port_list);
+ INIT_LIST_HEAD(&adapter->events.list);
+ INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
+ spin_lock_init(&adapter->events.list_lock);
+
init_waitqueue_head(&adapter->erp_ready_wq);
init_waitqueue_head(&adapter->erp_done_wqh);
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 1a2db0a35737..fcbd2b756da4 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -189,18 +189,12 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
if (!fsf_cfdc)
return -ENOMEM;
- data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL);
- if (!data) {
- retval = -ENOMEM;
+ data = memdup_user(data_user, sizeof(*data_user));
+ if (IS_ERR(data)) {
+ retval = PTR_ERR(data);
goto no_mem_sense;
}
- retval = copy_from_user(data, data_user, sizeof(*data));
- if (retval) {
- retval = -EFAULT;
- goto free_buffer;
- }
-
if (data->signature != 0xCFDCACDF) {
retval = -EINVAL;
goto free_buffer;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 075852f6968c..a86117b0d6e1 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -155,6 +155,8 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
if (scsi_cmnd) {
response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
response->u.fcp.serial = scsi_cmnd->serial_number;
+ response->u.fcp.data_dir =
+ qtcb->bottom.io.data_direction;
}
break;
@@ -326,6 +328,7 @@ static void zfcp_dbf_hba_view_response(char **p,
case FSF_QTCB_FCP_CMND:
if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
break;
+ zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
*p += sprintf(*p, "\n");
@@ -1005,7 +1008,7 @@ int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
char dbf_name[DEBUG_MAX_NAME_LEN];
struct zfcp_dbf *dbf;
- dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
+ dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
if (!dbf)
return -ENOMEM;
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 457e046f2d28..2bcc3403126a 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -111,6 +111,7 @@ struct zfcp_dbf_hba_record_response {
struct {
u64 cmnd;
u64 serial;
+ u32 data_dir;
} fcp;
struct {
u64 wwpn;
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 9fa1b064893e..e1c6b6e05a75 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -37,6 +37,7 @@
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
#include "zfcp_fsf.h"
+#include "zfcp_fc.h"
#include "zfcp_qdio.h"
struct zfcp_reqlist;
@@ -72,10 +73,12 @@ struct zfcp_reqlist;
/* adapter status */
#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
+#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
+#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
/* remote port status */
#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
@@ -190,6 +193,7 @@ struct zfcp_adapter {
struct service_level service_level;
struct workqueue_struct *work_queue;
struct device_dma_parameters dma_parms;
+ struct zfcp_fc_events events;
};
struct zfcp_port {
@@ -212,6 +216,7 @@ struct zfcp_port {
struct work_struct test_link_work;
struct work_struct rport_work;
enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task;
+ unsigned int starget_id;
};
struct zfcp_unit {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index fd068bc1bd0a..160b432c907f 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -141,9 +141,13 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
/* fall through */
- case ZFCP_ERP_ACTION_REOPEN_PORT:
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
p_status = atomic_read(&port->status);
+ if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
+ need = ZFCP_ERP_ACTION_REOPEN_PORT;
+ /* fall through */
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ p_status = atomic_read(&port->status);
if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
return 0;
a_status = atomic_read(&adapter->status);
@@ -893,8 +897,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
}
if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
port->d_id = 0;
- _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL);
- return ZFCP_ERP_EXIT;
+ return ZFCP_ERP_FAILED;
}
/* fall through otherwise */
}
@@ -1188,19 +1191,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
switch (act->action) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
- if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
- get_device(&unit->dev);
- if (scsi_queue_work(unit->port->adapter->scsi_host,
- &unit->scsi_work) <= 0)
- put_device(&unit->dev);
- }
put_device(&unit->dev);
break;
- case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_scsi_schedule_rport_register(port);
+ /* fall through */
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
break;
@@ -1247,6 +1245,11 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
goto unlock;
}
+ if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
+ retval = ZFCP_ERP_FAILED;
+ goto check_target;
+ }
+
zfcp_erp_action_to_running(erp_action);
/* no lock to allow for blocking operations */
@@ -1279,6 +1282,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
goto unlock;
}
+check_target:
retval = zfcp_erp_strategy_check_target(erp_action, retval);
zfcp_erp_action_dequeue(erp_action);
retval = zfcp_erp_strategy_statechange(erp_action, retval);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 48a8f93b72f5..3b93239c6f69 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -96,6 +96,9 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
extern void zfcp_erp_timeout_handler(unsigned long);
/* zfcp_fc.c */
+extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
+ enum fc_host_event_code event_code, u32);
+extern void zfcp_fc_post_event(struct work_struct *);
extern void zfcp_fc_scan_ports(struct work_struct *);
extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
extern void zfcp_fc_port_did_lookup(struct work_struct *);
@@ -146,9 +149,10 @@ extern void zfcp_qdio_destroy(struct zfcp_qdio *);
extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
- struct scatterlist *, int);
+ struct scatterlist *);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
+extern void zfcp_qdio_siosl(struct zfcp_adapter *);
/* zfcp_scsi.c */
extern struct zfcp_data zfcp_data;
@@ -159,7 +163,10 @@ extern void zfcp_scsi_rport_work(struct work_struct *);
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
-extern void zfcp_scsi_scan(struct work_struct *);
+extern void zfcp_scsi_scan(struct zfcp_unit *);
+extern void zfcp_scsi_scan_work(struct work_struct *);
+extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
+extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
/* zfcp_sysfs.c */
extern struct attribute_group zfcp_sysfs_unit_attrs;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 6f8ab43a4856..6f3ed2b9a349 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -23,6 +23,58 @@ static u32 zfcp_fc_rscn_range_mask[] = {
[ELS_ADDR_FMT_FAB] = 0x000000,
};
+/**
+ * zfcp_fc_post_event - post event to userspace via fc_transport
+ * @work: work struct with enqueued events
+ */
+void zfcp_fc_post_event(struct work_struct *work)
+{
+ struct zfcp_fc_event *event = NULL, *tmp = NULL;
+ LIST_HEAD(tmp_lh);
+ struct zfcp_fc_events *events = container_of(work,
+ struct zfcp_fc_events, work);
+ struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
+ events);
+
+ spin_lock_bh(&events->list_lock);
+ list_splice_init(&events->list, &tmp_lh);
+ spin_unlock_bh(&events->list_lock);
+
+ list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
+ fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
+ event->code, event->data);
+ list_del(&event->list);
+ kfree(event);
+ }
+
+}
+
+/**
+ * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
+ * @adapter: The adapter where to enqueue the event
+ * @event_code: The event code (as defined in fc_host_event_code in
+ * scsi_transport_fc.h)
+ * @event_data: The event data (e.g. n_port page in case of els)
+ */
+void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
+ enum fc_host_event_code event_code, u32 event_data)
+{
+ struct zfcp_fc_event *event;
+
+ event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
+ if (!event)
+ return;
+
+ event->code = event_code;
+ event->data = event_data;
+
+ spin_lock(&adapter->events.list_lock);
+ list_add_tail(&event->list, &adapter->events.list);
+ spin_unlock(&adapter->events.list_lock);
+
+ queue_work(adapter->work_queue, &adapter->events.work);
+}
+
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
if (mutex_lock_interruptible(&wka_port->mutex))
@@ -148,6 +200,8 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
page);
+ zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
+ *(u32 *)page);
}
queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
}
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 0747b087390d..938d50360166 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -30,6 +30,30 @@
#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000)
/**
+ * struct zfcp_fc_event - FC HBAAPI event for internal queueing from irq context
+ * @code: Event code
+ * @data: Event data
+ * @list: list_head for zfcp_fc_events list
+ */
+struct zfcp_fc_event {
+ enum fc_host_event_code code;
+ u32 data;
+ struct list_head list;
+};
+
+/**
+ * struct zfcp_fc_events - Infrastructure for posting FC events from irq context
+ * @list: List for queueing of events from irq context to workqueue
+ * @list_lock: Lock for event list
+ * @work: work_struct for forwarding events in workqueue
+*/
+struct zfcp_fc_events {
+ struct list_head list;
+ spinlock_t list_lock;
+ struct work_struct work;
+};
+
+/**
* struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
* @ct_hdr: FC GS common transport header
* @gid_pn: GID_PN request
@@ -196,6 +220,9 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
fcp->fc_dl = scsi_bufflen(scsi);
+
+ if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1)
+ fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
}
/**
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 71663fb77310..9d1d7d1842ce 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -21,6 +21,7 @@
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+ zfcp_qdio_siosl(adapter);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"fsrth_1", NULL);
}
@@ -274,6 +275,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
break;
case FSF_STATUS_READ_LINK_DOWN:
zfcp_fsf_status_read_link_down(req);
+ zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
break;
case FSF_STATUS_READ_LINK_UP:
dev_info(&adapter->ccw_device->dev,
@@ -286,6 +288,8 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED,
"fssrh_2", req);
+ zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
+
break;
case FSF_STATUS_READ_NOTIFICATION_LOST:
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
@@ -323,6 +327,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
dev_err(&req->adapter->ccw_device->dev,
"The FCP adapter reported a problem "
"that cannot be recovered\n");
+ zfcp_qdio_siosl(req->adapter);
zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
break;
}
@@ -413,6 +418,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
dev_err(&adapter->ccw_device->dev,
"0x%x is not a valid transfer protocol status\n",
qtcb->prefix.prot_status);
+ zfcp_qdio_siosl(adapter);
zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
}
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -495,7 +501,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
adapter->hydra_version = bottom->adapter_type;
- adapter->timer_ticks = bottom->timer_interval;
+ adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
(u16)FSF_STATUS_READS_RECOM);
@@ -523,6 +529,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
return -EIO;
}
+ zfcp_scsi_set_prot(adapter);
+
return 0;
}
@@ -732,7 +740,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
zfcp_reqlist_add(adapter->req_list, req);
- req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
+ req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
req->issued = get_clock();
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
del_timer(&req->timer);
@@ -959,8 +967,7 @@ static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
struct scatterlist *sg_req,
- struct scatterlist *sg_resp,
- int max_sbals)
+ struct scatterlist *sg_resp)
{
struct zfcp_adapter *adapter = req->adapter;
u32 feat = adapter->adapter_features;
@@ -983,18 +990,19 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
return 0;
}
- bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
- sg_req, max_sbals);
+ bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req);
if (bytes <= 0)
return -EIO;
+ zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
req->qtcb->bottom.support.req_buf_length = bytes;
zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
- sg_resp, max_sbals);
+ sg_resp);
req->qtcb->bottom.support.resp_buf_length = bytes;
if (bytes <= 0)
return -EIO;
+ zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
return 0;
}
@@ -1002,11 +1010,11 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
struct scatterlist *sg_req,
struct scatterlist *sg_resp,
- int max_sbals, unsigned int timeout)
+ unsigned int timeout)
{
int ret;
- ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
+ ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
if (ret)
return ret;
@@ -1046,8 +1054,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
- ZFCP_FSF_MAX_SBALS_PER_REQ, timeout);
+ ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
if (ret)
goto failed_send;
@@ -1143,7 +1150,10 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout);
+
+ zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
+
+ ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
if (ret)
goto failed_send;
@@ -2025,7 +2035,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
blktrc.flags |= ZFCP_BLK_REQ_ERROR;
- blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
+ blktrc.inb_usage = 0;
blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
@@ -2035,9 +2045,13 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
blktrc.fabric_lat = lat_in->fabric_lat * ticks;
switch (req->qtcb->bottom.io.data_direction) {
+ case FSF_DATADIR_DIF_READ_STRIP:
+ case FSF_DATADIR_DIF_READ_CONVERT:
case FSF_DATADIR_READ:
lat = &unit->latencies.read;
break;
+ case FSF_DATADIR_DIF_WRITE_INSERT:
+ case FSF_DATADIR_DIF_WRITE_CONVERT:
case FSF_DATADIR_WRITE:
lat = &unit->latencies.write;
break;
@@ -2078,6 +2092,21 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
goto skip_fsfstatus;
}
+ switch (req->qtcb->header.fsf_status) {
+ case FSF_INCONSISTENT_PROT_DATA:
+ case FSF_INVALID_PROT_PARM:
+ set_host_byte(scpnt, DID_ERROR);
+ goto skip_fsfstatus;
+ case FSF_BLOCK_GUARD_CHECK_FAILURE:
+ zfcp_scsi_dif_sense_error(scpnt, 0x1);
+ goto skip_fsfstatus;
+ case FSF_APP_TAG_CHECK_FAILURE:
+ zfcp_scsi_dif_sense_error(scpnt, 0x2);
+ goto skip_fsfstatus;
+ case FSF_REF_TAG_CHECK_FAILURE:
+ zfcp_scsi_dif_sense_error(scpnt, 0x3);
+ goto skip_fsfstatus;
+ }
fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
@@ -2187,6 +2216,44 @@ skip_fsfstatus:
}
}
+static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
+{
+ switch (scsi_get_prot_op(scsi_cmnd)) {
+ case SCSI_PROT_NORMAL:
+ switch (scsi_cmnd->sc_data_direction) {
+ case DMA_NONE:
+ *data_dir = FSF_DATADIR_CMND;
+ break;
+ case DMA_FROM_DEVICE:
+ *data_dir = FSF_DATADIR_READ;
+ break;
+ case DMA_TO_DEVICE:
+ *data_dir = FSF_DATADIR_WRITE;
+ break;
+ case DMA_BIDIRECTIONAL:
+ return -EINVAL;
+ }
+ break;
+
+ case SCSI_PROT_READ_STRIP:
+ *data_dir = FSF_DATADIR_DIF_READ_STRIP;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
+ break;
+ case SCSI_PROT_READ_PASS:
+ *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
* @unit: unit where command is sent to
@@ -2198,16 +2265,17 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
struct zfcp_fsf_req *req;
struct fcp_cmnd *fcp_cmnd;
unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
- int real_bytes, retval = -EIO;
+ int real_bytes, retval = -EIO, dix_bytes = 0;
struct zfcp_adapter *adapter = unit->port->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
+ struct fsf_qtcb_bottom_io *io;
if (unlikely(!(atomic_read(&unit->status) &
ZFCP_STATUS_COMMON_UNBLOCKED)))
return -EBUSY;
spin_lock(&qdio->req_q_lock);
- if (atomic_read(&qdio->req_q.count) <= 0) {
+ if (atomic_read(&qdio->req_q_free) <= 0) {
atomic_inc(&qdio->req_q_full);
goto out;
}
@@ -2223,56 +2291,45 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
goto out;
}
+ scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
+
+ io = &req->qtcb->bottom.io;
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req->unit = unit;
req->data = scsi_cmnd;
req->handler = zfcp_fsf_send_fcp_command_handler;
req->qtcb->header.lun_handle = unit->handle;
req->qtcb->header.port_handle = unit->port->handle;
- req->qtcb->bottom.io.service_class = FSF_CLASS_3;
- req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
+ io->service_class = FSF_CLASS_3;
+ io->fcp_cmnd_length = FCP_CMND_LEN;
- scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
-
- /*
- * set depending on data direction:
- * data direction bits in SBALE (SB Type)
- * data direction bits in QTCB
- */
- switch (scsi_cmnd->sc_data_direction) {
- case DMA_NONE:
- req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
- break;
- case DMA_FROM_DEVICE:
- req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
- break;
- case DMA_TO_DEVICE:
- req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
- break;
- case DMA_BIDIRECTIONAL:
- goto failed_scsi_cmnd;
+ if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
+ io->data_block_length = scsi_cmnd->device->sector_size;
+ io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
}
+ zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
+
get_device(&unit->dev);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
+ if (scsi_prot_sg_count(scsi_cmnd)) {
+ zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+ scsi_prot_sg_count(scsi_cmnd));
+ dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+ scsi_prot_sglist(scsi_cmnd));
+ io->prot_data_length = dix_bytes;
+ }
+
real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
- scsi_sglist(scsi_cmnd),
- ZFCP_FSF_MAX_SBALS_PER_REQ);
- if (unlikely(real_bytes < 0)) {
- if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) {
- dev_err(&adapter->ccw_device->dev,
- "Oversize data package, unit 0x%016Lx "
- "on port 0x%016Lx closed\n",
- (unsigned long long)unit->fcp_lun,
- (unsigned long long)unit->port->wwpn);
- zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
- retval = -EINVAL;
- }
+ scsi_sglist(scsi_cmnd));
+
+ if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0))
goto failed_scsi_cmnd;
- }
+
+ zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
retval = zfcp_fsf_req_send(req);
if (unlikely(retval))
@@ -2391,13 +2448,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
bottom->option = fsf_cfdc->option;
- bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
- fsf_cfdc->sg,
- ZFCP_FSF_MAX_SBALS_PER_REQ);
+ bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
+
if (bytes != ZFCP_CFDC_MAX_SIZE) {
zfcp_fsf_req_free(req);
goto out;
}
+ zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
@@ -2419,7 +2476,7 @@ out:
void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
{
struct zfcp_adapter *adapter = qdio->adapter;
- struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
+ struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *fsf_req;
unsigned long req_id;
@@ -2431,17 +2488,17 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
req_id = (unsigned long) sbale->addr;
fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
- if (!fsf_req)
+ if (!fsf_req) {
/*
* Unknown request means that we have potentially memory
* corruption and must stop the machine immediately.
*/
+ zfcp_qdio_siosl(adapter);
panic("error: unknown req_id (%lx) on adapter %s.\n",
req_id, dev_name(&adapter->ccw_device->dev));
+ }
fsf_req->qdio_req.sbal_response = sbal_idx;
- fsf_req->qdio_req.qdio_inb_usage =
- atomic_read(&qdio->resp_q.count);
zfcp_fsf_req_complete(fsf_req);
if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 519083fd6e89..db8c85382dca 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -80,11 +80,15 @@
#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
#define FSF_SBAL_MISMATCH 0x00000063
+#define FSF_INCONSISTENT_PROT_DATA 0x00000070
+#define FSF_INVALID_PROT_PARM 0x00000071
+#define FSF_BLOCK_GUARD_CHECK_FAILURE 0x00000081
+#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
+#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
#define FSF_UNKNOWN_COMMAND 0x000000E2
#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
-/* #define FSF_ERROR 0x000000FF */
#define FSF_PROT_STATUS_QUAL_SIZE 16
#define FSF_STATUS_QUALIFIER_SIZE 16
@@ -147,18 +151,17 @@
#define FSF_DATADIR_WRITE 0x00000001
#define FSF_DATADIR_READ 0x00000002
#define FSF_DATADIR_CMND 0x00000004
+#define FSF_DATADIR_DIF_WRITE_INSERT 0x00000009
+#define FSF_DATADIR_DIF_READ_STRIP 0x0000000a
+#define FSF_DATADIR_DIF_WRITE_CONVERT 0x0000000b
+#define FSF_DATADIR_DIF_READ_CONVERT 0X0000000c
+
+/* data protection control flags */
+#define FSF_APP_TAG_CHECK_ENABLE 0x10
/* fc service class */
#define FSF_CLASS_3 0x00000003
-/* SBAL chaining */
-#define ZFCP_FSF_MAX_SBALS_PER_REQ 36
-
-/* max. number of (data buffer) SBALEs in largest SBAL chain
- * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
-#define ZFCP_FSF_MAX_SBALES_PER_REQ \
- (ZFCP_FSF_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
-
/* logging space behind QTCB */
#define FSF_QTCB_LOG_SIZE 1024
@@ -170,6 +173,8 @@
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
+#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
+#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
/* host connection features */
#define FSF_FEATURE_NPIV_MODE 0x00000001
@@ -324,9 +329,14 @@ struct fsf_qtcb_header {
struct fsf_qtcb_bottom_io {
u32 data_direction;
u32 service_class;
- u8 res1[8];
+ u8 res1;
+ u8 data_prot_flags;
+ u16 app_tag_value;
+ u32 ref_tag_value;
u32 fcp_cmnd_length;
- u8 res2[12];
+ u32 data_block_length;
+ u32 prot_data_length;
+ u8 res2[4];
u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
u8 fcp_rsp[FSF_FCP_RSP_SIZE];
u8 res3[64];
@@ -352,6 +362,8 @@ struct fsf_qtcb_bottom_support {
u8 els[256];
} __attribute__ ((packed));
+#define ZFCP_FSF_TIMER_INT_MASK 0x3FFF
+
struct fsf_qtcb_bottom_config {
u32 lic_version;
u32 feature_selection;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 6fa5e0453176..b2635759721c 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -30,12 +30,15 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
return 0;
}
-static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
+static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
+ unsigned int qdio_err)
{
struct zfcp_adapter *adapter = qdio->adapter;
dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
+ if (qdio_err & QDIO_ERROR_SLSB_STATE)
+ zfcp_qdio_siosl(adapter);
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
@@ -55,72 +58,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
{
unsigned long long now, span;
- int free, used;
+ int used;
spin_lock(&qdio->stat_lock);
now = get_clock_monotonic();
span = (now - qdio->req_q_time) >> 12;
- free = atomic_read(&qdio->req_q.count);
- used = QDIO_MAX_BUFFERS_PER_Q - free;
+ used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
qdio->req_q_util += used * span;
qdio->req_q_time = now;
spin_unlock(&qdio->stat_lock);
}
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
- int queue_no, int first, int count,
+ int queue_no, int idx, int count,
unsigned long parm)
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
- struct zfcp_qdio_queue *queue = &qdio->req_q;
if (unlikely(qdio_err)) {
- zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first,
- count);
- zfcp_qdio_handler_error(qdio, "qdireq1");
+ zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
+ zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
return;
}
/* cleanup all SBALs being program-owned now */
- zfcp_qdio_zero_sbals(queue->sbal, first, count);
+ zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
zfcp_qdio_account(qdio);
- atomic_add(count, &queue->count);
+ atomic_add(count, &qdio->req_q_free);
wake_up(&qdio->req_q_wq);
}
-static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
-{
- struct zfcp_qdio_queue *queue = &qdio->resp_q;
- struct ccw_device *cdev = qdio->adapter->ccw_device;
- u8 count, start = queue->first;
- unsigned int retval;
-
- count = atomic_read(&queue->count) + processed;
-
- retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
-
- if (unlikely(retval)) {
- atomic_set(&queue->count, count);
- zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
- } else {
- queue->first += count;
- queue->first %= QDIO_MAX_BUFFERS_PER_Q;
- atomic_set(&queue->count, 0);
- }
-}
-
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
- int queue_no, int first, int count,
+ int queue_no, int idx, int count,
unsigned long parm)
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
int sbal_idx, sbal_no;
if (unlikely(qdio_err)) {
- zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first,
- count);
- zfcp_qdio_handler_error(qdio, "qdires1");
+ zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
+ zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
return;
}
@@ -129,25 +107,16 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
* returned by QDIO layer
*/
for (sbal_no = 0; sbal_no < count; sbal_no++) {
- sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
+ sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
/* go through all SBALEs of SBAL */
zfcp_fsf_reqid_check(qdio, sbal_idx);
}
/*
- * put range of SBALs back to response queue
- * (including SBALs which have already been free before)
+ * put SBALs back to response queue
*/
- zfcp_qdio_resp_put_back(qdio, count);
-}
-
-static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
- struct zfcp_qdio_req *q_req, int max_sbals)
-{
- int count = atomic_read(&qdio->req_q.count);
- count = min(count, max_sbals);
- q_req->sbal_limit = (q_req->sbal_first + count - 1)
- % QDIO_MAX_BUFFERS_PER_Q;
+ if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL);
}
static struct qdio_buffer_element *
@@ -173,6 +142,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
/* keep this requests number of SBALs up-to-date */
q_req->sbal_number++;
+ BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
/* start at first SBALE of new SBAL */
q_req->sbale_curr = 0;
@@ -193,17 +163,6 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
return zfcp_qdio_sbale_curr(qdio, q_req);
}
-static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
- struct zfcp_qdio_req *q_req)
-{
- struct qdio_buffer **sbal = qdio->req_q.sbal;
- int first = q_req->sbal_first;
- int last = q_req->sbal_last;
- int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
- QDIO_MAX_BUFFERS_PER_Q + 1;
- zfcp_qdio_zero_sbals(sbal, first, count);
-}
-
/**
* zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
* @qdio: pointer to struct zfcp_qdio
@@ -213,14 +172,11 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
* Returns: number of bytes, or error (negativ)
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- struct scatterlist *sg, int max_sbals)
+ struct scatterlist *sg)
{
struct qdio_buffer_element *sbale;
int bytes = 0;
- /* figure out last allowed SBAL */
- zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
-
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->flags |= q_req->sbtype;
@@ -229,7 +185,8 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
sbale = zfcp_qdio_sbale_next(qdio, q_req);
if (!sbale) {
atomic_inc(&qdio->req_q_full);
- zfcp_qdio_undo_sbals(qdio, q_req);
+ zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
+ q_req->sbal_number);
return -EINVAL;
}
@@ -239,19 +196,13 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
bytes += sg->length;
}
- /* assume that no other SBALEs are to follow in the same SBAL */
- sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
-
return bytes;
}
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
{
- struct zfcp_qdio_queue *req_q = &qdio->req_q;
-
spin_lock_bh(&qdio->req_q_lock);
- if (atomic_read(&req_q->count) ||
+ if (atomic_read(&qdio->req_q_free) ||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return 1;
spin_unlock_bh(&qdio->req_q_lock);
@@ -300,25 +251,25 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
*/
int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
- struct zfcp_qdio_queue *req_q = &qdio->req_q;
- int first = q_req->sbal_first;
- int count = q_req->sbal_number;
int retval;
- unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
+ u8 sbal_number = q_req->sbal_number;
zfcp_qdio_account(qdio);
- retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first,
- count);
+ retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
+ q_req->sbal_first, sbal_number);
+
if (unlikely(retval)) {
- zfcp_qdio_zero_sbals(req_q->sbal, first, count);
+ zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
+ sbal_number);
return retval;
}
/* account for transferred buffers */
- atomic_sub(count, &req_q->count);
- req_q->first += count;
- req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
+ atomic_sub(sbal_number, &qdio->req_q_free);
+ qdio->req_q_idx += sbal_number;
+ qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
+
return 0;
}
@@ -331,6 +282,7 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
id->q_format = QDIO_ZFCP_QFMT;
memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
ASCEBC(id->adapter_name, 8);
+ id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
id->qib_param_field_format = 0;
id->qib_param_field = NULL;
id->input_slib_elements = NULL;
@@ -340,10 +292,10 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
id->input_handler = zfcp_qdio_int_resp;
id->output_handler = zfcp_qdio_int_req;
id->int_parm = (unsigned long) qdio;
- id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal);
- id->output_sbal_addr_array = (void **) (qdio->req_q.sbal);
-
+ id->input_sbal_addr_array = (void **) (qdio->res_q);
+ id->output_sbal_addr_array = (void **) (qdio->req_q);
}
+
/**
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
* @adapter: pointer to struct zfcp_adapter
@@ -354,8 +306,8 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
{
struct qdio_initialize init_data;
- if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) ||
- zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal))
+ if (zfcp_qdio_buffers_enqueue(qdio->req_q) ||
+ zfcp_qdio_buffers_enqueue(qdio->res_q))
return -ENOMEM;
zfcp_qdio_setup_init_data(&init_data, qdio);
@@ -369,34 +321,30 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
*/
void zfcp_qdio_close(struct zfcp_qdio *qdio)
{
- struct zfcp_qdio_queue *req_q;
- int first, count;
+ struct zfcp_adapter *adapter = qdio->adapter;
+ int idx, count;
- if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+ if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return;
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
- req_q = &qdio->req_q;
spin_lock_bh(&qdio->req_q_lock);
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_bh(&qdio->req_q_lock);
wake_up(&qdio->req_q_wq);
- qdio_shutdown(qdio->adapter->ccw_device,
- QDIO_FLAG_CLEANUP_USING_CLEAR);
+ qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
/* cleanup used outbound sbals */
- count = atomic_read(&req_q->count);
+ count = atomic_read(&qdio->req_q_free);
if (count < QDIO_MAX_BUFFERS_PER_Q) {
- first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q;
+ idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
count = QDIO_MAX_BUFFERS_PER_Q - count;
- zfcp_qdio_zero_sbals(req_q->sbal, first, count);
+ zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
}
- req_q->first = 0;
- atomic_set(&req_q->count, 0);
- qdio->resp_q.first = 0;
- atomic_set(&qdio->resp_q.count, 0);
+ qdio->req_q_idx = 0;
+ atomic_set(&qdio->req_q_free, 0);
}
/**
@@ -408,34 +356,45 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
{
struct qdio_buffer_element *sbale;
struct qdio_initialize init_data;
- struct ccw_device *cdev = qdio->adapter->ccw_device;
+ struct zfcp_adapter *adapter = qdio->adapter;
+ struct ccw_device *cdev = adapter->ccw_device;
+ struct qdio_ssqd_desc ssqd;
int cc;
- if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
+ if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
return -EIO;
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+ &qdio->adapter->status);
+
zfcp_qdio_setup_init_data(&init_data, qdio);
if (qdio_establish(&init_data))
goto failed_establish;
+ if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
+ goto failed_qdio;
+
+ if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
+ &qdio->adapter->status);
+
if (qdio_activate(cdev))
goto failed_qdio;
for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
- sbale = &(qdio->resp_q.sbal[cc]->element[0]);
+ sbale = &(qdio->res_q[cc]->element[0]);
sbale->length = 0;
sbale->flags = SBAL_FLAGS_LAST_ENTRY;
sbale->addr = NULL;
}
- if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0,
- QDIO_MAX_BUFFERS_PER_Q))
+ if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
goto failed_qdio;
/* set index of first avalable SBALS / number of available SBALS */
- qdio->req_q.first = 0;
- atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
+ qdio->req_q_idx = 0;
+ atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
return 0;
@@ -449,7 +408,6 @@ failed_establish:
void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
{
- struct qdio_buffer **sbal_req, **sbal_resp;
int p;
if (!qdio)
@@ -458,12 +416,9 @@ void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
if (qdio->adapter->ccw_device)
qdio_free(qdio->adapter->ccw_device);
- sbal_req = qdio->req_q.sbal;
- sbal_resp = qdio->resp_q.sbal;
-
for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
- free_page((unsigned long) sbal_req[p]);
- free_page((unsigned long) sbal_resp[p]);
+ free_page((unsigned long) qdio->req_q[p]);
+ free_page((unsigned long) qdio->res_q[p]);
}
kfree(qdio);
@@ -491,3 +446,26 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter)
return 0;
}
+/**
+ * zfcp_qdio_siosl - Trigger logging in FCP channel
+ * @adapter: The zfcp_adapter where to trigger logging
+ *
+ * Call the cio siosl function to trigger hardware logging. This
+ * wrapper function sets a flag to ensure hardware logging is only
+ * triggered once before going through qdio shutdown.
+ *
+ * The triggers are always run from qdio tasklet context, so no
+ * additional synchronization is necessary.
+ */
+void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
+{
+ int rc;
+
+ if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
+ return;
+
+ rc = ccw_device_siosl(adapter->ccw_device);
+ if (!rc)
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+ &adapter->status);
+}
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 138fba577b48..2297d8d3e947 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -19,22 +19,20 @@
/* index of last SBALE (with respect to DMQ bug workaround) */
#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
-/**
- * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
- * @sbal: qdio buffers
- * @first: index of next free buffer in queue
- * @count: number of free buffers in queue
- */
-struct zfcp_qdio_queue {
- struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
- u8 first;
- atomic_t count;
-};
+/* Max SBALS for chaining */
+#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
+
+/* max. number of (data buffer) SBALEs in largest SBAL chain
+ * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
+#define ZFCP_QDIO_MAX_SBALES_PER_REQ \
+ (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
/**
* struct zfcp_qdio - basic qdio data structure
- * @resp_q: response queue
+ * @res_q: response queue
* @req_q: request queue
+ * @req_q_idx: index of next free buffer
+ * @req_q_free: number of free buffers in queue
* @stat_lock: lock to protect req_q_util and req_q_time
* @req_q_lock: lock to serialize access to request queue
* @req_q_time: time of last fill level change
@@ -44,8 +42,10 @@ struct zfcp_qdio_queue {
* @adapter: adapter used in conjunction with this qdio structure
*/
struct zfcp_qdio {
- struct zfcp_qdio_queue resp_q;
- struct zfcp_qdio_queue req_q;
+ struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
+ struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q];
+ u8 req_q_idx;
+ atomic_t req_q_free;
spinlock_t stat_lock;
spinlock_t req_q_lock;
unsigned long long req_q_time;
@@ -65,7 +65,6 @@ struct zfcp_qdio {
* @sbale_curr: current sbale at creation of this request
* @sbal_response: sbal used in interrupt
* @qdio_outb_usage: usage of outbound queue
- * @qdio_inb_usage: usage of inbound queue
*/
struct zfcp_qdio_req {
u32 sbtype;
@@ -76,22 +75,9 @@ struct zfcp_qdio_req {
u8 sbale_curr;
u8 sbal_response;
u16 qdio_outb_usage;
- u16 qdio_inb_usage;
};
/**
- * zfcp_qdio_sbale - return pointer to sbale in qdio queue
- * @q: queue where to find sbal
- * @sbal_idx: sbal index in queue
- * @sbale_idx: sbale index in sbal
- */
-static inline struct qdio_buffer_element *
-zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
-{
- return &q->sbal[sbal_idx]->element[sbale_idx];
-}
-
-/**
* zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
* @qdio: pointer to struct zfcp_qdio
* @q_rec: pointer to struct zfcp_qdio_req
@@ -100,7 +86,7 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
static inline struct qdio_buffer_element *
zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
- return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
+ return &qdio->req_q[q_req->sbal_last]->element[0];
}
/**
@@ -112,8 +98,7 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
static inline struct qdio_buffer_element *
zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
- return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
- q_req->sbale_curr);
+ return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
}
/**
@@ -134,21 +119,25 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
unsigned long req_id, u32 sbtype, void *data, u32 len)
{
struct qdio_buffer_element *sbale;
+ int count = min(atomic_read(&qdio->req_q_free),
+ ZFCP_QDIO_MAX_SBALS_PER_REQ);
- q_req->sbal_first = q_req->sbal_last = qdio->req_q.first;
+ q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
q_req->sbal_number = 1;
q_req->sbtype = sbtype;
+ q_req->sbale_curr = 1;
+ q_req->sbal_limit = (q_req->sbal_first + count - 1)
+ % QDIO_MAX_BUFFERS_PER_Q;
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->addr = (void *) req_id;
- sbale->flags |= SBAL_FLAGS0_COMMAND;
- sbale->flags |= sbtype;
+ sbale->flags = SBAL_FLAGS0_COMMAND | sbtype;
- q_req->sbale_curr = 1;
+ if (unlikely(!data))
+ return;
sbale++;
sbale->addr = data;
- if (likely(data))
- sbale->length = len;
+ sbale->length = len;
}
/**
@@ -210,4 +199,36 @@ void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
}
+/**
+ * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ * @max_sbals: maximum number of SBALs allowed
+ */
+static inline
+void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req, int max_sbals)
+{
+ int count = min(atomic_read(&qdio->req_q_free), max_sbals);
+
+ q_req->sbal_limit = (q_req->sbal_first + count - 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+}
+
+/**
+ * zfcp_qdio_set_data_div - set data division count
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ * @count: The data division count
+ */
+static inline
+void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req, u32 count)
+{
+ struct qdio_buffer_element *sbale;
+
+ sbale = &qdio->req_q[q_req->sbal_first]->element[0];
+ sbale->length = count;
+}
+
#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index be5d2c60453d..cb000c9833bb 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_fcp.h>
+#include <scsi/scsi_eh.h>
#include <asm/atomic.h>
#include "zfcp_ext.h"
#include "zfcp_dbf.h"
@@ -22,6 +23,13 @@ static unsigned int default_depth = 32;
module_param_named(queue_depth, default_depth, uint, 0600);
MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
+static bool enable_dif;
+
+#ifdef CONFIG_ZFCP_DIF
+module_param_named(dif, enable_dif, bool, 0600);
+MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
+#endif
+
static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
int reason)
{
@@ -506,8 +514,10 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
* @rport: The FC rport where to teminate I/O
*
* Abort all pending SCSI commands for a port by closing the
- * port. Using a reopen avoiding a conflict with a shutdown
- * overwriting a reopen.
+ * port. Using a reopen avoids a conflict with a shutdown
+ * overwriting a reopen. The "forced" ensures that a disappeared port
+ * is not opened again as valid due to the cached plogi data in
+ * non-NPIV mode.
*/
static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
{
@@ -519,11 +529,25 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (port) {
- zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
+ zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL);
put_device(&port->dev);
}
}
+static void zfcp_scsi_queue_unit_register(struct zfcp_port *port)
+{
+ struct zfcp_unit *unit;
+
+ read_lock_irq(&port->unit_list_lock);
+ list_for_each_entry(unit, &port->unit_list, list) {
+ get_device(&unit->dev);
+ if (scsi_queue_work(port->adapter->scsi_host,
+ &unit->scsi_work) <= 0)
+ put_device(&unit->dev);
+ }
+ read_unlock_irq(&port->unit_list_lock);
+}
+
static void zfcp_scsi_rport_register(struct zfcp_port *port)
{
struct fc_rport_identifiers ids;
@@ -548,6 +572,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
rport->maxframe_size = port->maxframe_size;
rport->supported_classes = port->supported_classes;
port->rport = rport;
+ port->starget_id = rport->scsi_target_id;
+
+ zfcp_scsi_queue_unit_register(port);
}
static void zfcp_scsi_rport_block(struct zfcp_port *port)
@@ -610,24 +637,74 @@ void zfcp_scsi_rport_work(struct work_struct *work)
put_device(&port->dev);
}
-
-void zfcp_scsi_scan(struct work_struct *work)
+/**
+ * zfcp_scsi_scan - Register LUN with SCSI midlayer
+ * @unit: The LUN/unit to register
+ */
+void zfcp_scsi_scan(struct zfcp_unit *unit)
{
- struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
- scsi_work);
- struct fc_rport *rport;
-
- flush_work(&unit->port->rport_work);
- rport = unit->port->rport;
+ struct fc_rport *rport = unit->port->rport;
if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
scsilun_to_int((struct scsi_lun *)
&unit->fcp_lun), 0);
+}
+void zfcp_scsi_scan_work(struct work_struct *work)
+{
+ struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
+ scsi_work);
+
+ zfcp_scsi_scan(unit);
put_device(&unit->dev);
}
+/**
+ * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
+ * @adapter: The adapter where to configure DIF/DIX for the SCSI host
+ */
+void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
+{
+ unsigned int mask = 0;
+ unsigned int data_div;
+ struct Scsi_Host *shost = adapter->scsi_host;
+
+ data_div = atomic_read(&adapter->status) &
+ ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
+
+ if (enable_dif &&
+ adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
+ mask |= SHOST_DIF_TYPE1_PROTECTION;
+
+ if (enable_dif && data_div &&
+ adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
+ mask |= SHOST_DIX_TYPE1_PROTECTION;
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
+ shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
+ shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2;
+ }
+
+ scsi_host_set_prot(shost, mask);
+}
+
+/**
+ * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error
+ * @scmd: The SCSI command to report the error for
+ * @ascq: The ASCQ to put in the sense buffer
+ *
+ * See the error handling in sd_done for the sense codes used here.
+ * Set DID_SOFT_ERROR to retry the request, if possible.
+ */
+void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
+{
+ scsi_build_sense_buffer(1, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x10, ascq);
+ set_driver_byte(scmd, DRIVER_SENSE);
+ scmd->result |= SAM_STAT_CHECK_CONDITION;
+ set_host_byte(scmd, DID_SOFT_ERROR);
+}
+
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
@@ -677,11 +754,11 @@ struct zfcp_data zfcp_data = {
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
.can_queue = 4096,
.this_id = -1,
- .sg_tablesize = ZFCP_FSF_MAX_SBALES_PER_REQ,
+ .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
.cmd_per_lun = 1,
.use_clustering = 1,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
- .max_sectors = (ZFCP_FSF_MAX_SBALES_PER_REQ * 8),
+ .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
},
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index f5f60698dc4c..b4561c86e230 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -275,7 +275,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
zfcp_erp_wait(unit->port->adapter);
- flush_work(&unit->scsi_work);
+ zfcp_scsi_scan(unit);
out:
put_device(&port->dev);
return retval ? retval : (ssize_t) count;
@@ -290,6 +290,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct zfcp_unit *unit;
u64 fcp_lun;
int retval = -EINVAL;
+ struct scsi_device *sdev;
if (!(port && get_device(&port->dev)))
return -EBUSY;
@@ -303,8 +304,13 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
else
retval = 0;
- /* wait for possible timeout during SCSI probe */
- flush_work(&unit->scsi_work);
+ sdev = scsi_device_lookup(port->adapter->scsi_host, 0,
+ port->starget_id,
+ scsilun_to_int((struct scsi_lun *)&fcp_lun));
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
write_lock_irq(&port->unit_list_lock);
list_del(&unit->list);