aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2019-07-10 23:24:10 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2019-07-10 23:24:10 -0700
commit597473720f4dc69749542bfcfed4a927a43d935e (patch)
tree711bf773910fb93d1dd9120c633adc807685e0d8 /drivers/s390
parentInput: atmel_mxt_ts - fix leak in mxt_update_cfg() (diff)
parentInput: gpio_keys_polled - allow specifying name of input device (diff)
downloadlinux-dev-597473720f4dc69749542bfcfed4a927a43d935e.tar.xz
linux-dev-597473720f4dc69749542bfcfed4a927a43d935e.zip
Merge branch 'next' into for-linus
Prepare input updates for 5.3 merge window.
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c34
-rw-r--r--drivers/s390/block/dasd_eckd.c14
-rw-r--r--drivers/s390/block/dasd_ioctl.c22
-rw-r--r--drivers/s390/block/dasd_proc.c3
-rw-r--r--drivers/s390/char/con3270.c2
-rw-r--r--drivers/s390/char/fs3270.c3
-rw-r--r--drivers/s390/char/raw3270.c3
-rw-r--r--drivers/s390/char/raw3270.h4
-rw-r--r--drivers/s390/char/sclp.h4
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_early.c2
-rw-r--r--drivers/s390/char/tape_proc.c7
-rw-r--r--drivers/s390/char/tty3270.c3
-rw-r--r--drivers/s390/cio/chsc.c50
-rw-r--r--drivers/s390/cio/chsc.h1
-rw-r--r--drivers/s390/cio/qdio_debug.c18
-rw-r--r--drivers/s390/cio/qdio_setup.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c55
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c8
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c7
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h1
-rw-r--r--drivers/s390/crypto/ap_bus.c312
-rw-r--r--drivers/s390/crypto/ap_bus.h5
-rw-r--r--drivers/s390/crypto/ap_queue.c56
-rw-r--r--drivers/s390/crypto/pkey_api.c5
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c45
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c4
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h1
-rw-r--r--drivers/s390/crypto/zcrypt_api.c30
-rw-r--r--drivers/s390/crypto/zcrypt_error.h2
-rw-r--r--drivers/s390/net/Makefile2
-rw-r--r--drivers/s390/net/ctcm_main.c1
-rw-r--r--drivers/s390/net/ism_drv.c27
-rw-r--r--drivers/s390/net/qeth_core.h184
-rw-r--r--drivers/s390/net/qeth_core_main.c1333
-rw-r--r--drivers/s390/net/qeth_core_mpc.c24
-rw-r--r--drivers/s390/net/qeth_core_mpc.h43
-rw-r--r--drivers/s390/net/qeth_core_sys.c64
-rw-r--r--drivers/s390/net/qeth_ethtool.c370
-rw-r--r--drivers/s390/net/qeth_l2_main.c598
-rw-r--r--drivers/s390/net/qeth_l3_main.c502
-rw-r--r--drivers/s390/net/qeth_l3_sys.c12
-rw-r--r--drivers/s390/scsi/zfcp_aux.c83
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c25
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h14
-rw-r--r--drivers/s390/scsi/zfcp_def.h113
-rw-r--r--drivers/s390/scsi/zfcp_erp.c362
-rw-r--r--drivers/s390/scsi/zfcp_ext.h11
-rw-r--r--drivers/s390/scsi/zfcp_fc.c69
-rw-r--r--drivers/s390/scsi/zfcp_fc.h21
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c51
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h4
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c15
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h9
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c21
-rw-r--r--drivers/s390/virtio/virtio_ccw.c38
57 files changed, 2284 insertions, 2423 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 5e9ebdb0594c..e03304fe25bb 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1192,20 +1192,7 @@ static int dasd_hosts_show(struct seq_file *m, void *v)
return rc;
}
-static int dasd_hosts_open(struct inode *inode, struct file *file)
-{
- struct dasd_device *device = inode->i_private;
-
- return single_open(file, dasd_hosts_show, device);
-}
-
-static const struct file_operations dasd_hosts_fops = {
- .owner = THIS_MODULE,
- .open = dasd_hosts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
static void dasd_hosts_exit(struct dasd_device *device)
{
@@ -3978,13 +3965,11 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
- void *rdc_buffer,
int rdc_buffer_size,
int magic)
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
- unsigned long *idaw;
cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
NULL);
@@ -3999,16 +3984,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
ccw = cqr->cpaddr;
ccw->cmd_code = CCW_CMD_RDC;
- if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
- idaw = (unsigned long *) (cqr->data);
- ccw->cda = (__u32)(addr_t) idaw;
- ccw->flags = CCW_FLAG_IDA;
- idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
- } else {
- ccw->cda = (__u32)(addr_t) rdc_buffer;
- ccw->flags = 0;
- }
-
+ ccw->cda = (__u32)(addr_t) cqr->data;
+ ccw->flags = 0;
ccw->count = rdc_buffer_size;
cqr->startdev = device;
cqr->memdev = device;
@@ -4026,12 +4003,13 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
int ret;
struct dasd_ccw_req *cqr;
- cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
- magic);
+ cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
ret = dasd_sleep_on(cqr);
+ if (ret == 0)
+ memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
dasd_sfree_request(cqr, cqr->memdev);
return ret;
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 4e7b55a14b1a..f89f9d02e788 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2004,14 +2004,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
raw:
- block->blocks = (private->real_cyl *
+ block->blocks = ((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk);
dev_info(&device->cdev->dev,
- "DASD with %d KB/block, %d KB total size, %d KB/track, "
+ "DASD with %u KB/block, %lu KB total size, %u KB/track, "
"%s\n", (block->bp_block >> 10),
- ((private->real_cyl *
+ (((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk * (block->bp_block >> 9)) >> 1),
((blk_per_trk * block->bp_block) >> 10),
@@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
usrparm.psf_data &= 0x7fffffffULL;
usrparm.rssd_result &= 0x7fffffffULL;
}
+ /* at least 2 bytes are accessed and should be allocated */
+ if (usrparm.psf_data_len < 2) {
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "Symmetrix ioctl invalid data length %d",
+ usrparm.psf_data_len);
+ rc = -EINVAL;
+ goto out;
+ }
/* alloc I/O data area */
psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 2016e0ed5865..8e26001dc11c 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -412,6 +412,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
struct ccw_dev_id dev_id;
struct dasd_device *base;
struct ccw_device *cdev;
+ struct list_head *l;
unsigned long flags;
int rc;
@@ -462,23 +463,10 @@ static int dasd_ioctl_information(struct dasd_block *block,
memcpy(dasd_info->type, base->discipline->name, 4);
- if (block->request_queue->request_fn) {
- struct list_head *l;
-#ifdef DASD_EXTENDED_PROFILING
- {
- struct list_head *l;
- spin_lock_irqsave(&block->lock, flags);
- list_for_each(l, &block->request_queue->queue_head)
- dasd_info->req_queue_len++;
- spin_unlock_irqrestore(&block->lock, flags);
- }
-#endif /* DASD_EXTENDED_PROFILING */
- spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
- list_for_each(l, &base->ccw_queue)
- dasd_info->chanq_len++;
- spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
- flags);
- }
+ spin_lock_irqsave(&block->queue_lock, flags);
+ list_for_each(l, &base->ccw_queue)
+ dasd_info->chanq_len++;
+ spin_unlock_irqrestore(&block->queue_lock, flags);
rc = 0;
if (copy_to_user(argp, dasd_info,
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 5cb80c645489..1770b99f607e 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -339,8 +339,7 @@ dasd_proc_init(void)
dasd_proc_root_entry = proc_mkdir("dasd", NULL);
if (!dasd_proc_root_entry)
goto out_nodasd;
- dasd_devices_entry = proc_create_seq("devices",
- S_IFREG | S_IRUGO | S_IWUSR,
+ dasd_devices_entry = proc_create_seq("devices", 0444,
dasd_proc_root_entry,
&dasd_devices_seq_ops);
if (!dasd_devices_entry)
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index fd2146bcc0ad..e17364e13d2f 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -629,7 +629,7 @@ con3270_init(void)
(void (*)(unsigned long)) con3270_read_tasklet,
(unsigned long) condev->read);
- raw3270_add_view(&condev->view, &con3270_fn, 1);
+ raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) {
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 8f3a2eeb28dc..8b48ba9c598e 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp)
init_waitqueue_head(&fp->wait);
fp->fs_pid = get_pid(task_pid(current));
- rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+ rc = raw3270_add_view(&fp->view, &fs3270_fn, minor,
+ RAW3270_VIEW_LOCK_BH);
if (rc) {
fs3270_free_view(&fp->view);
goto out;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index f8cd2935fbfd..63a41b168761 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view)
* Add view to device with minor "minor".
*/
int
-raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass)
{
unsigned long flags;
struct raw3270 *rp;
@@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
view->cols = rp->cols;
view->ascebc = rp->ascebc;
spin_lock_init(&view->lock);
+ lockdep_set_subclass(&view->lock, subclass);
list_add(&view->list, &rp->view_list);
rc = 0;
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 114ca7cbf889..3afaa35f7351 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -150,6 +150,8 @@ struct raw3270_fn {
struct raw3270_view {
struct list_head list;
spinlock_t lock;
+#define RAW3270_VIEW_LOCK_IRQ 0
+#define RAW3270_VIEW_LOCK_BH 1
atomic_t ref_count;
struct raw3270 *dev;
struct raw3270_fn *fn;
@@ -158,7 +160,7 @@ struct raw3270_view {
unsigned char *ascebc; /* ascii -> ebcdic table */
};
-int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
int raw3270_activate_view(struct raw3270_view *);
void raw3270_del_view(struct raw3270_view *);
void raw3270_deactivate_view(struct raw3270_view *);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index b3fcc24b1182..367e9d384d85 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -195,7 +195,9 @@ struct read_info_sccb {
u16 hcpua; /* 120-121 */
u8 _pad_122[124 - 122]; /* 122-123 */
u32 hmfai; /* 124-127 */
- u8 _pad_128[4096 - 128]; /* 128-4095 */
+ u8 _pad_128[134 - 128]; /* 128-133 */
+ u8 byte_134; /* 134 */
+ u8 _pad_135[4096 - 135]; /* 135-4095 */
} __packed __aligned(PAGE_SIZE);
struct read_storage_sccb {
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 194ffd5c8580..039b2074db7e 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
+ lock_device_hotplug();
smp_rescan_cpus();
+ unlock_device_hotplug();
}
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index e792cee3b51c..8332788681c4 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -44,6 +44,8 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
+ if (sccb->cpuoff > 134)
+ sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp.rzm <<= 20;
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 32a14ee31c6b..2238d9df6c47 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -111,11 +111,8 @@ static const struct seq_operations tape_proc_seq = {
void
tape_proc_init(void)
{
- tape_proc_devices = proc_create_seq("tapedevices",
- S_IFREG | S_IRUGO | S_IWUSR, NULL, &tape_proc_seq);
- if (tape_proc_devices == NULL) {
- return;
- }
+ tape_proc_devices = proc_create_seq("tapedevices", 0444, NULL,
+ &tape_proc_seq);
}
/*
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 2b0c36c2c568..98d7fc152e32 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return PTR_ERR(tp);
rc = raw3270_add_view(&tp->view, &tty3270_fn,
- tty->index + RAW3270_FIRSTMINOR);
+ tty->index + RAW3270_FIRSTMINOR,
+ RAW3270_VIEW_LOCK_BH);
if (rc) {
tty3270_free_view(tp);
return rc;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a0baee25134c..a835b31aad99 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -24,6 +24,7 @@
#include <asm/crw.h>
#include <asm/isc.h>
#include <asm/ebcdic.h>
+#include <asm/ap.h>
#include "css.h"
#include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
" failed (rc=%d).\n", ret);
}
+static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
+{
+ CIO_CRW_EVENT(3, "chsc: ap config changed\n");
+ if (sei_area->rs != 5)
+ return;
+
+ ap_bus_cfg_chg();
+}
+
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
case 2: /* i/o resource accessibility */
chsc_process_sei_res_acc(sei_area);
break;
+ case 3: /* ap config changed */
+ chsc_process_sei_ap_cfg_chg(sei_area);
+ break;
case 7: /* channel-path-availability information */
chsc_process_sei_chp_avail(sei_area);
break;
@@ -1382,3 +1395,40 @@ int chsc_pnso_brinfo(struct subchannel_id schid,
return chsc_error_from_response(brinfo_area->response.code);
}
EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
+
+int chsc_sgib(u32 origin)
+{
+ struct {
+ struct chsc_header request;
+ u16 op;
+ u8 reserved01[2];
+ u8 reserved02:4;
+ u8 fmt:4;
+ u8 reserved03[7];
+ /* operation data area begin */
+ u8 reserved04[4];
+ u32 gib_origin;
+ u8 reserved05[10];
+ u8 aix;
+ u8 reserved06[4029];
+ struct chsc_header response;
+ u8 reserved07[4];
+ } *sgib_area;
+ int ret;
+
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sgib_area = chsc_page;
+ sgib_area->request.length = 0x0fe0;
+ sgib_area->request.code = 0x0021;
+ sgib_area->op = 0x1;
+ sgib_area->gib_origin = origin;
+
+ ret = chsc(sgib_area);
+ if (ret == 0)
+ ret = chsc_error_from_response(sgib_area->response.code);
+ spin_unlock_irq(&chsc_page_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_sgib);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 78aba8d94eec..e57d68e325a3 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -164,6 +164,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
u64 summary_indicator_addr, u64 subchannel_indicator_addr);
+int chsc_sgib(u32 origin);
int chsc_error_from_response(int response);
int chsc_siosl(struct subchannel_id schid);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 68a82f3e2e92..d2f98e5829d4 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -190,19 +190,7 @@ static int qstat_show(struct seq_file *m, void *v)
return 0;
}
-static int qstat_seq_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, qstat_show,
- file_inode(filp)->i_private);
-}
-
-static const struct file_operations debugfs_fops = {
- .owner = THIS_MODULE,
- .open = qstat_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qstat);
static char *qperf_names[] = {
"Assumed adapter interrupts",
@@ -305,8 +293,8 @@ static void setup_debugfs_entry(struct qdio_q *q)
snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
q->is_input_q ? "input" : "output",
q->nr);
- q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
- q->irq_ptr->debugfs_dev, q, &debugfs_fops);
+ q->debugfs_q = debugfs_create_file(name, 0444,
+ q->irq_ptr->debugfs_dev, q, &qstat_fops);
if (IS_ERR(q->debugfs_q))
q->debugfs_q = NULL;
}
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index e324d890a4f6..a59887fad13e 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -181,7 +181,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
- void **sbals_array, int i)
+ struct qdio_buffer **sbals_array, int i)
{
struct qdio_q *prev;
int j;
@@ -212,8 +212,8 @@ static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
struct qdio_q *q;
- void **input_sbal_array = qdio_init->input_sbal_addr_array;
- void **output_sbal_array = qdio_init->output_sbal_addr_array;
+ struct qdio_buffer **input_sbal_array = qdio_init->input_sbal_addr_array;
+ struct qdio_buffer **output_sbal_array = qdio_init->output_sbal_addr_array;
struct qdio_outbuf_state *output_sbal_state_array =
qdio_init->output_sbal_state_array;
int i;
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 70a006ba4d05..384b3987eeb4 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -283,6 +283,33 @@ static long copy_ccw_from_iova(struct channel_program *cp,
#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
+/*
+ * is_cpa_within_range()
+ *
+ * @cpa: channel program address being questioned
+ * @head: address of the beginning of a CCW chain
+ * @len: number of CCWs within the chain
+ *
+ * Determine whether the address of a CCW (whether a new chain,
+ * or the target of a TIC) falls within a range (including the end points).
+ *
+ * Returns 1 if yes, 0 if no.
+ */
+static inline int is_cpa_within_range(u32 cpa, u32 head, int len)
+{
+ u32 tail = head + (len - 1) * sizeof(struct ccw1);
+
+ return (head <= cpa && cpa <= tail);
+}
+
+static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len)
+{
+ if (!ccw_is_tic(ccw))
+ return 0;
+
+ return is_cpa_within_range(ccw->cda, head, len);
+}
+
static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
{
struct ccwchain *chain;
@@ -392,7 +419,15 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
return -EOPNOTSUPP;
}
- if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
+ /*
+ * We want to keep counting if the current CCW has the
+ * command-chaining flag enabled, or if it is a TIC CCW
+ * that loops back into the current chain. The latter
+ * is used for device orientation, where the CCW PRIOR to
+ * the TIC can either jump to the TIC or a CCW immediately
+ * after the TIC, depending on the results of its operation.
+ */
+ if (!ccw_is_chain(ccw) && !is_tic_within_range(ccw, iova, cnt))
break;
ccw++;
@@ -408,13 +443,11 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
{
struct ccwchain *chain;
- u32 ccw_head, ccw_tail;
+ u32 ccw_head;
list_for_each_entry(chain, &cp->ccwchain_list, next) {
ccw_head = chain->ch_iova;
- ccw_tail = ccw_head + (chain->ch_len - 1) * sizeof(struct ccw1);
-
- if ((ccw_head <= tic->cda) && (tic->cda <= ccw_tail))
+ if (is_cpa_within_range(tic->cda, ccw_head, chain->ch_len))
return 1;
}
@@ -481,13 +514,11 @@ static int ccwchain_fetch_tic(struct ccwchain *chain,
{
struct ccw1 *ccw = chain->ch_ccw + idx;
struct ccwchain *iter;
- u32 ccw_head, ccw_tail;
+ u32 ccw_head;
list_for_each_entry(iter, &cp->ccwchain_list, next) {
ccw_head = iter->ch_iova;
- ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1);
-
- if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) {
+ if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
(ccw->cda - ccw_head));
return 0;
@@ -829,7 +860,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
{
struct ccwchain *chain;
u32 cpa = scsw->cmd.cpa;
- u32 ccw_head, ccw_tail;
+ u32 ccw_head;
/*
* LATER:
@@ -839,9 +870,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
*/
list_for_each_entry(chain, &cp->ccwchain_list, next) {
ccw_head = (u32)(u64)chain->ch_ccw;
- ccw_tail = (u32)(u64)(chain->ch_ccw + chain->ch_len - 1);
-
- if ((ccw_head <= cpa) && (cpa <= ccw_tail)) {
+ if (is_cpa_within_range(cpa, ccw_head, chain->ch_len)) {
/*
* (cpa - ccw_head) is the offset value of the host
* physical ccw to its chain head.
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index a10cec0e86eb..0b3b9de45c60 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
struct irb *irb;
+ bool is_final;
private = container_of(work, struct vfio_ccw_private, io_work);
irb = &private->irb;
+ is_final = !(scsw_actl(&irb->scsw) &
+ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw);
- cp_free(&private->cp);
+ if (is_final)
+ cp_free(&private->cp);
}
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
if (private->io_trigger)
eventfd_signal(private->io_trigger, 1);
- if (private->mdev)
+ if (private->mdev && is_final)
private->state = VFIO_CCW_STATE_IDLE;
}
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index f94aa01f9c36..cab17865aafe 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -130,7 +130,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
struct mdev_device *mdev = private->mdev;
char *errstr = "request";
- private->state = VFIO_CCW_STATE_BOXED;
+ private->state = VFIO_CCW_STATE_BUSY;
memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
@@ -216,11 +216,6 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
},
- [VFIO_CCW_STATE_BOXED] = {
- [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
- [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
- [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
- },
[VFIO_CCW_STATE_BUSY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 078e46f9623d..08e9a7dc9176 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -63,7 +63,6 @@ enum vfio_ccw_state {
VFIO_CCW_STATE_NOT_OPER,
VFIO_CCW_STATE_STANDBY,
VFIO_CCW_STATE_IDLE,
- VFIO_CCW_STATE_BOXED,
VFIO_CCW_STATE_BUSY,
/* last element! */
NR_VFIO_CCW_STATES
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9f5a201c4c87..1546389d71db 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
static inline int ap_test_config_card_id(unsigned int id)
{
if (!ap_configuration) /* QCI not supported */
- return 1;
+ /* only ids 0...3F may be probed */
+ return id < 0x40 ? 1 : 0;
return ap_test_config(ap_configuration->apm, id);
}
@@ -299,7 +300,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
ap_max_domain_id = 15;
switch (*device_type) {
/* For CEX2 and CEX3 the available functions
- * are not refrected by the facilities bits.
+ * are not reflected by the facilities bits.
* Instead it is coded into the type. So here
* modify the function bits based on the type.
*/
@@ -809,11 +810,18 @@ static int ap_device_remove(struct device *dev)
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = ap_dev->drv;
+ /* prepare ap queue device removal */
if (is_queue_dev(dev))
- ap_queue_remove(to_ap_queue(dev));
+ ap_queue_prepare_remove(to_ap_queue(dev));
+
+ /* driver's chance to clean up gracefully */
if (ap_drv->remove)
ap_drv->remove(ap_dev);
+ /* now do the ap queue device remove */
+ if (is_queue_dev(dev))
+ ap_queue_remove(to_ap_queue(dev));
+
/* Remove queue/card from list of active queues/cards */
spin_lock_bh(&ap_list_lock);
if (is_card_dev(dev))
@@ -860,6 +868,16 @@ void ap_bus_force_rescan(void)
EXPORT_SYMBOL(ap_bus_force_rescan);
/*
+* A config change has happened, force an ap bus rescan.
+*/
+void ap_bus_cfg_chg(void)
+{
+ AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
+
+ ap_bus_force_rescan();
+}
+
+/*
* hex2bitmap() - parse hex mask string and set bitmap.
* Valid strings are "0x012345678" with at least one valid hex number.
* Rest of the bitmap to the right is padded with 0. No spaces allowed
@@ -1317,7 +1335,7 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
}
/*
- * helper function to be used with bus_find_dev
+ * Helper function to be used with bus_find_dev
* matches for the card device with the given id
*/
static int __match_card_device_with_id(struct device *dev, void *data)
@@ -1325,7 +1343,8 @@ static int __match_card_device_with_id(struct device *dev, void *data)
return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data;
}
-/* helper function to be used with bus_find_dev
+/*
+ * Helper function to be used with bus_find_dev
* matches for the queue device with a given qid
*/
static int __match_queue_device_with_qid(struct device *dev, void *data)
@@ -1333,143 +1352,200 @@ static int __match_queue_device_with_qid(struct device *dev, void *data)
return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
}
-/**
- * ap_scan_bus(): Scan the AP bus for new devices
- * Runs periodically, workqueue timer (ap_config_time)
+/*
+ * Helper function to be used with bus_find_dev
+ * matches any queue device with given queue id
*/
-static void ap_scan_bus(struct work_struct *unused)
+static int __match_queue_device_with_queue_id(struct device *dev, void *data)
{
- struct ap_queue *aq;
+ return is_queue_dev(dev)
+ && AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long) data;
+}
+
+/*
+ * Helper function for ap_scan_bus().
+ * Does the scan bus job for the given adapter id.
+ */
+static void _ap_scan_bus_adapter(int id)
+{
+ ap_qid_t qid;
+ unsigned int func;
struct ap_card *ac;
struct device *dev;
- ap_qid_t qid;
- int comp_type, depth = 0, type = 0;
- unsigned int func = 0;
- int rc, id, dom, borked, domains, defdomdevs = 0;
-
- AP_DBF(DBF_DEBUG, "%s running\n", __func__);
+ struct ap_queue *aq;
+ int rc, dom, depth, type, comp_type, borked;
+
+ /* check if there is a card device registered with this id */
+ dev = bus_find_device(&ap_bus_type, NULL,
+ (void *)(long) id,
+ __match_card_device_with_id);
+ ac = dev ? to_ap_card(dev) : NULL;
+ if (!ap_test_config_card_id(id)) {
+ if (dev) {
+ /* Card device has been removed from configuration */
+ bus_for_each_dev(&ap_bus_type, NULL,
+ (void *)(long) id,
+ __ap_queue_devices_with_id_unregister);
+ device_unregister(dev);
+ put_device(dev);
+ }
+ return;
+ }
- ap_query_configuration(ap_configuration);
- ap_select_domain();
+ /*
+ * This card id is enabled in the configuration. If we already have
+ * a card device with this id, check if type and functions are still
+ * the very same. Also verify that at least one queue is available.
+ */
+ if (ac) {
+ /* find the first valid queue */
+ for (dom = 0; dom < AP_DOMAINS; dom++) {
+ qid = AP_MKQID(id, dom);
+ if (ap_query_queue(qid, &depth, &type, &func) == 0)
+ break;
+ }
+ borked = 0;
+ if (dom >= AP_DOMAINS) {
+ /* no accessible queue on this card */
+ borked = 1;
+ } else if (ac->raw_hwtype != type) {
+ /* card type has changed */
+ AP_DBF(DBF_INFO, "card=%02x type changed.\n", id);
+ borked = 1;
+ } else if (ac->functions != func) {
+ /* card functions have changed */
+ AP_DBF(DBF_INFO, "card=%02x functions changed.\n", id);
+ borked = 1;
+ }
+ if (borked) {
+ /* unregister card device and associated queues */
+ bus_for_each_dev(&ap_bus_type, NULL,
+ (void *)(long) id,
+ __ap_queue_devices_with_id_unregister);
+ device_unregister(dev);
+ put_device(dev);
+ /* go back if there is no valid queue on this card */
+ if (dom >= AP_DOMAINS)
+ return;
+ ac = NULL;
+ }
+ }
- for (id = 0; id < AP_DEVICES; id++) {
- /* check if device is registered */
+ /*
+ * Go through all possible queue ids. Check and maybe create or release
+ * queue devices for this card. If there exists no card device yet,
+ * create a card device also.
+ */
+ for (dom = 0; dom < AP_DOMAINS; dom++) {
+ qid = AP_MKQID(id, dom);
dev = bus_find_device(&ap_bus_type, NULL,
- (void *)(long) id,
- __match_card_device_with_id);
- ac = dev ? to_ap_card(dev) : NULL;
- if (!ap_test_config_card_id(id)) {
+ (void *)(long) qid,
+ __match_queue_device_with_qid);
+ aq = dev ? to_ap_queue(dev) : NULL;
+ if (!ap_test_config_domain(dom)) {
if (dev) {
- /* Card device has been removed from
- * configuration, remove the belonging
- * queue devices.
+ /* Queue device exists but has been
+ * removed from configuration.
*/
- bus_for_each_dev(&ap_bus_type, NULL,
- (void *)(long) id,
- __ap_queue_devices_with_id_unregister);
- /* now remove the card device */
device_unregister(dev);
put_device(dev);
}
continue;
}
- /* According to the configuration there should be a card
- * device, so check if there is at least one valid queue
- * and maybe create queue devices and the card device.
- */
- domains = 0;
- for (dom = 0; dom < AP_DOMAINS; dom++) {
- qid = AP_MKQID(id, dom);
- dev = bus_find_device(&ap_bus_type, NULL,
- (void *)(long) qid,
- __match_queue_device_with_qid);
- aq = dev ? to_ap_queue(dev) : NULL;
- if (!ap_test_config_domain(dom)) {
- if (dev) {
- /* Queue device exists but has been
- * removed from configuration.
- */
- device_unregister(dev);
- put_device(dev);
- }
- continue;
- }
- rc = ap_query_queue(qid, &depth, &type, &func);
- if (dev) {
+ /* try to fetch infos about this queue */
+ rc = ap_query_queue(qid, &depth, &type, &func);
+ if (dev) {
+ if (rc == -ENODEV)
+ borked = 1;
+ else {
spin_lock_bh(&aq->lock);
- if (rc == -ENODEV ||
- /* adapter reconfiguration */
- (ac && ac->functions != func))
- aq->state = AP_STATE_BORKED;
borked = aq->state == AP_STATE_BORKED;
spin_unlock_bh(&aq->lock);
- if (borked) /* Remove broken device */
- device_unregister(dev);
- put_device(dev);
- if (!borked) {
- domains++;
- if (dom == ap_domain_index)
- defdomdevs++;
- continue;
- }
}
- if (rc)
- continue;
- /* a new queue device is needed, check out comp type */
- comp_type = ap_get_compatible_type(qid, type, func);
- if (!comp_type)
- continue;
- /* maybe a card device needs to be created first */
- if (!ac) {
- ac = ap_card_create(id, depth, type,
- comp_type, func);
- if (!ac)
- continue;
- ac->ap_dev.device.bus = &ap_bus_type;
- ac->ap_dev.device.parent = ap_root_device;
- dev_set_name(&ac->ap_dev.device,
- "card%02x", id);
- /* Register card with AP bus */
- rc = device_register(&ac->ap_dev.device);
- if (rc) {
- put_device(&ac->ap_dev.device);
- ac = NULL;
- break;
- }
- /* get it and thus adjust reference counter */
- get_device(&ac->ap_dev.device);
+ if (borked) {
+ /* Remove broken device */
+ AP_DBF(DBF_DEBUG,
+ "removing broken queue=%02x.%04x\n",
+ id, dom);
+ device_unregister(dev);
}
- /* now create the new queue device */
- aq = ap_queue_create(qid, comp_type);
- if (!aq)
+ put_device(dev);
+ continue;
+ }
+ if (rc)
+ continue;
+ /* a new queue device is needed, check out comp type */
+ comp_type = ap_get_compatible_type(qid, type, func);
+ if (!comp_type)
+ continue;
+ /* maybe a card device needs to be created first */
+ if (!ac) {
+ ac = ap_card_create(id, depth, type, comp_type, func);
+ if (!ac)
continue;
- aq->card = ac;
- aq->ap_dev.device.bus = &ap_bus_type;
- aq->ap_dev.device.parent = &ac->ap_dev.device;
- dev_set_name(&aq->ap_dev.device,
- "%02x.%04x", id, dom);
- /* Register device */
- rc = device_register(&aq->ap_dev.device);
+ ac->ap_dev.device.bus = &ap_bus_type;
+ ac->ap_dev.device.parent = ap_root_device;
+ dev_set_name(&ac->ap_dev.device, "card%02x", id);
+ /* Register card device with AP bus */
+ rc = device_register(&ac->ap_dev.device);
if (rc) {
- put_device(&aq->ap_dev.device);
- continue;
+ put_device(&ac->ap_dev.device);
+ ac = NULL;
+ break;
}
- domains++;
- if (dom == ap_domain_index)
- defdomdevs++;
- } /* end domain loop */
- if (ac) {
- /* remove card dev if there are no queue devices */
- if (!domains)
- device_unregister(&ac->ap_dev.device);
- put_device(&ac->ap_dev.device);
+ /* get it and thus adjust reference counter */
+ get_device(&ac->ap_dev.device);
}
- } /* end device loop */
+ /* now create the new queue device */
+ aq = ap_queue_create(qid, comp_type);
+ if (!aq)
+ continue;
+ aq->card = ac;
+ aq->ap_dev.device.bus = &ap_bus_type;
+ aq->ap_dev.device.parent = &ac->ap_dev.device;
+ dev_set_name(&aq->ap_dev.device, "%02x.%04x", id, dom);
+ /* Register queue device */
+ rc = device_register(&aq->ap_dev.device);
+ if (rc) {
+ put_device(&aq->ap_dev.device);
+ continue;
+ }
+ } /* end domain loop */
+
+ if (ac)
+ put_device(&ac->ap_dev.device);
+}
+
+/**
+ * ap_scan_bus(): Scan the AP bus for new devices
+ * Runs periodically, workqueue timer (ap_config_time)
+ */
+static void ap_scan_bus(struct work_struct *unused)
+{
+ int id;
- if (ap_domain_index >= 0 && defdomdevs < 1)
- AP_DBF(DBF_INFO,
- "no queue device with default domain %d available\n",
- ap_domain_index);
+ AP_DBF(DBF_DEBUG, "%s running\n", __func__);
+
+ ap_query_configuration(ap_configuration);
+ ap_select_domain();
+
+ /* loop over all possible adapters */
+ for (id = 0; id < AP_DEVICES; id++)
+ _ap_scan_bus_adapter(id);
+
+ /* check if there is at least one queue available with default domain */
+ if (ap_domain_index >= 0) {
+ struct device *dev =
+ bus_find_device(&ap_bus_type, NULL,
+ (void *)(long) ap_domain_index,
+ __match_queue_device_with_queue_id);
+ if (dev)
+ put_device(dev);
+ else
+ AP_DBF(DBF_INFO,
+ "no queue device with default domain %d available\n",
+ ap_domain_index);
+ }
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
}
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index bfc66e4a9de1..15a98a673c5c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -91,7 +91,9 @@ enum ap_state {
AP_STATE_WORKING,
AP_STATE_QUEUE_FULL,
AP_STATE_SUSPEND_WAIT,
- AP_STATE_BORKED,
+ AP_STATE_REMOVE, /* about to be removed from driver */
+ AP_STATE_UNBOUND, /* momentary not bound to a driver */
+ AP_STATE_BORKED, /* broken */
NR_AP_STATES
};
@@ -251,6 +253,7 @@ void ap_bus_force_rescan(void);
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 0aa4b3ccc948..5ea83dc4f1d7 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -14,6 +14,9 @@
#include <asm/facility.h>
#include "ap_bus.h"
+#include "ap_debug.h"
+
+static void __ap_flush_queue(struct ap_queue *aq);
/**
* ap_queue_enable_interruption(): Enable interruption on an AP queue.
@@ -417,6 +420,14 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_EVENT_POLL] = ap_sm_suspend_read,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
+ [AP_STATE_REMOVE] = {
+ [AP_EVENT_POLL] = ap_sm_nop,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_STATE_UNBOUND] = {
+ [AP_EVENT_POLL] = ap_sm_nop,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
[AP_STATE_BORKED] = {
[AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -541,7 +552,25 @@ static ssize_t reset_show(struct device *dev,
return rc;
}
-static DEVICE_ATTR_RO(reset);
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+
+ spin_lock_bh(&aq->lock);
+ __ap_flush_queue(aq);
+ aq->state = AP_STATE_RESET_START;
+ ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+
+ AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(reset);
static ssize_t interrupt_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -704,6 +733,7 @@ static void __ap_flush_queue(struct ap_queue *aq)
ap_msg->rc = -EAGAIN;
ap_msg->receive(aq, ap_msg, NULL);
}
+ aq->queue_count = 0;
}
void ap_flush_queue(struct ap_queue *aq)
@@ -714,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
}
EXPORT_SYMBOL(ap_flush_queue);
-void ap_queue_remove(struct ap_queue *aq)
+void ap_queue_prepare_remove(struct ap_queue *aq)
{
- ap_flush_queue(aq);
+ spin_lock_bh(&aq->lock);
+ /* flush queue */
+ __ap_flush_queue(aq);
+ /* set REMOVE state to prevent new messages are queued in */
+ aq->state = AP_STATE_REMOVE;
+ spin_unlock_bh(&aq->lock);
del_timer_sync(&aq->timeout);
+}
- /* reset with zero, also clears irq registration */
+void ap_queue_remove(struct ap_queue *aq)
+{
+ /*
+ * all messages have been flushed and the state is
+ * AP_STATE_REMOVE. Now reset with zero which also
+ * clears the irq registration and move the state
+ * to AP_STATE_UNBOUND to signal that this queue
+ * is not used by any driver currently.
+ */
spin_lock_bh(&aq->lock);
ap_zapq(aq->qid);
- aq->state = AP_STATE_BORKED;
+ aq->state = AP_STATE_UNBOUND;
spin_unlock_bh(&aq->lock);
}
-EXPORT_SYMBOL(ap_queue_remove);
void ap_queue_reinit_state(struct ap_queue *aq)
{
@@ -734,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
-EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 2f92bbed4bf6..45eb0c14b880 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -51,7 +51,8 @@ static debug_info_t *debug_info;
static void __init pkey_debug_init(void)
{
- debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
+ /* 5 arguments per dbf entry (including the format string ptr) */
+ debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
debug_register_view(debug_info, &debug_sprintf_view);
debug_set_level(debug_info, 3);
}
@@ -1079,7 +1080,7 @@ int pkey_verifykey(const struct pkey_seckey *seckey,
rc = mkvp_cache_fetch(cardnr, domain, mkvp);
if (rc)
goto out;
- if (t->mkvp == mkvp[1]) {
+ if (t->mkvp == mkvp[1] && t->mkvp != mkvp[0]) {
DEBUG_DBG("%s secure key has old mkvp\n", __func__);
if (pattributes)
*pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 7667b38728f0..e9824c35c34f 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -11,10 +11,10 @@
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <asm/facility.h>
#include "vfio_ap_private.h"
#define VFIO_AP_ROOT_NAME "vfio_ap"
-#define VFIO_AP_DEV_TYPE_NAME "ap_matrix"
#define VFIO_AP_DEV_NAME "matrix"
MODULE_AUTHOR("IBM Corporation");
@@ -23,10 +23,6 @@ MODULE_LICENSE("GPL v2");
static struct ap_driver vfio_ap_drv;
-static struct device_type vfio_ap_dev_type = {
- .name = VFIO_AP_DEV_TYPE_NAME,
-};
-
struct ap_matrix_dev *matrix_dev;
/* Only type 10 adapters (CEX4 and later) are supported
@@ -61,6 +57,22 @@ static void vfio_ap_matrix_dev_release(struct device *dev)
kfree(matrix_dev);
}
+static int matrix_bus_match(struct device *dev, struct device_driver *drv)
+{
+ return 1;
+}
+
+static struct bus_type matrix_bus = {
+ .name = "matrix",
+ .match = &matrix_bus_match,
+};
+
+static struct device_driver matrix_driver = {
+ .name = "vfio_ap",
+ .bus = &matrix_bus,
+ .suppress_bind_attrs = true,
+};
+
static int vfio_ap_matrix_dev_create(void)
{
int ret;
@@ -70,6 +82,10 @@ static int vfio_ap_matrix_dev_create(void)
if (IS_ERR(root_device))
return PTR_ERR(root_device);
+ ret = bus_register(&matrix_bus);
+ if (ret)
+ goto bus_register_err;
+
matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL);
if (!matrix_dev) {
ret = -ENOMEM;
@@ -86,30 +102,41 @@ static int vfio_ap_matrix_dev_create(void)
mutex_init(&matrix_dev->lock);
INIT_LIST_HEAD(&matrix_dev->mdev_list);
- matrix_dev->device.type = &vfio_ap_dev_type;
dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
matrix_dev->device.parent = root_device;
+ matrix_dev->device.bus = &matrix_bus;
matrix_dev->device.release = vfio_ap_matrix_dev_release;
- matrix_dev->device.driver = &vfio_ap_drv.driver;
+ matrix_dev->vfio_ap_drv = &vfio_ap_drv;
ret = device_register(&matrix_dev->device);
if (ret)
goto matrix_reg_err;
+ ret = driver_register(&matrix_driver);
+ if (ret)
+ goto matrix_drv_err;
+
return 0;
+matrix_drv_err:
+ device_unregister(&matrix_dev->device);
matrix_reg_err:
put_device(&matrix_dev->device);
matrix_alloc_err:
+ bus_unregister(&matrix_bus);
+bus_register_err:
root_device_unregister(root_device);
-
return ret;
}
static void vfio_ap_matrix_dev_destroy(void)
{
+ struct device *root_device = matrix_dev->device.parent;
+
+ driver_unregister(&matrix_driver);
device_unregister(&matrix_dev->device);
- root_device_unregister(matrix_dev->device.parent);
+ bus_unregister(&matrix_bus);
+ root_device_unregister(root_device);
}
static int __init vfio_ap_init(void)
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 272ef427dcc0..900b9cf20ca5 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -198,8 +198,8 @@ static int vfio_ap_verify_queue_reserved(unsigned long *apid,
qres.apqi = apqi;
qres.reserved = false;
- ret = driver_for_each_device(matrix_dev->device.driver, NULL, &qres,
- vfio_ap_has_queue);
+ ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &qres, vfio_ap_has_queue);
if (ret)
return ret;
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 5675492233c7..76b7f98e47e9 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -40,6 +40,7 @@ struct ap_matrix_dev {
struct ap_config_info info;
struct list_head mdev_list;
struct mutex lock;
+ struct ap_driver *vfio_ap_drv;
};
extern struct ap_matrix_dev *matrix_dev;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index eb93c2d27d0a..689c2af7026a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
+ struct module **pmod,
unsigned int weight)
{
if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
atomic_add(weight, &zc->load);
atomic_add(weight, &zq->load);
zq->request_count++;
+ *pmod = zq->queue->ap_dev.drv->driver.owner;
return zq;
}
static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
+ struct module *mod,
unsigned int weight)
{
- struct module *mod = zq->queue->ap_dev.drv->driver.owner;
-
zq->request_count--;
atomic_sub(weight, &zc->load);
atomic_sub(weight, &zq->load);
@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
unsigned int weight, pref_weight;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
unsigned int weight, pref_weight;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(crt, TP_ICARSACRT);
@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
unsigned int func_code;
unsigned short *domain;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
unsigned int func_code;
struct ap_message ap_msg;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out_free:
@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
struct ap_message ap_msg;
unsigned int domain;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 240b27f3f5f6..f34ee41cbed8 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -51,6 +51,7 @@ struct error_hdr {
#define REP82_ERROR_FORMAT_FIELD 0x29
#define REP82_ERROR_INVALID_COMMAND 0x30
#define REP82_ERROR_MALFORMED_MSG 0x40
+#define REP82_ERROR_INVALID_SPECIAL_CMD 0x41
#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
#define REP82_ERROR_WORD_ALIGNMENT 0x60
@@ -89,6 +90,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP88_ERROR_MESSAGE_MALFORMD:
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
case REP82_ERROR_INVALID_DOMAIN_PENDING:
+ case REP82_ERROR_INVALID_SPECIAL_CMD:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index f2d6bbe57a6f..bc55ec316adb 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o
obj-$(CONFIG_LCS) += lcs.o
-qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
+qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_ethtool.o
obj-$(CONFIG_QETH) += qeth.o
qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
obj-$(CONFIG_QETH_L2) += qeth_l2.o
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 7617d21cb296..f63c5c871d3d 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1595,6 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
if (priv->channel[direction] == NULL) {
if (direction == CTCM_WRITE)
channel_free(priv->channel[CTCM_READ]);
+ result = -ENODEV;
goto out_dev;
}
priv->channel[direction]->netdev = dev;
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index dcbf5c857743..3e132592c1fe 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -89,8 +89,8 @@ static int register_sba(struct ism_dev *ism)
dma_addr_t dma_handle;
struct ism_sba *sba;
- sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
- &dma_handle, GFP_KERNEL);
+ sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
+ GFP_KERNEL);
if (!sba)
return -ENOMEM;
@@ -116,8 +116,8 @@ static int register_ieq(struct ism_dev *ism)
dma_addr_t dma_handle;
struct ism_eq *ieq;
- ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
- &dma_handle, GFP_KERNEL);
+ ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
+ GFP_KERNEL);
if (!ieq)
return -ENOMEM;
@@ -141,10 +141,13 @@ static int register_ieq(struct ism_dev *ism)
static int unregister_sba(struct ism_dev *ism)
{
+ int ret;
+
if (!ism->sba)
return 0;
- if (ism_cmd_simple(ism, ISM_UNREG_SBA))
+ ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
+ if (ret && ret != ISM_ERROR)
return -EIO;
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
@@ -158,10 +161,13 @@ static int unregister_sba(struct ism_dev *ism)
static int unregister_ieq(struct ism_dev *ism)
{
+ int ret;
+
if (!ism->ieq)
return 0;
- if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
+ ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
+ if (ret && ret != ISM_ERROR)
return -EIO;
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
@@ -234,10 +240,9 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
- dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len,
- &dmb->dma_addr, GFP_KERNEL |
- __GFP_NOWARN | __GFP_NOMEMALLOC |
- __GFP_COMP | __GFP_NORETRY);
+ dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
+ &dmb->dma_addr,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY);
if (!dmb->cpu_addr)
clear_bit(dmb->sba_idx, ism->sba_bitmap);
@@ -288,7 +293,7 @@ static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
cmd.request.dmb_tok = dmb->dmb_tok;
ret = ism_cmd(ism, &cmd);
- if (ret)
+ if (ret && ret != ISM_ERROR)
goto out;
ism_free_dmb(ism, dmb);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 04e294d1d16d..c851cf6e01c4 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -18,10 +18,10 @@
#include <linux/in6.h>
#include <linux/bitops.h>
#include <linux/seq_file.h>
-#include <linux/ethtool.h>
#include <linux/hashtable.h>
#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
@@ -34,6 +34,8 @@
#include <asm/ccwgroup.h>
#include <asm/sysinfo.h>
+#include <uapi/linux/if_link.h>
+
#include "qeth_core_mpc.h"
/**
@@ -112,59 +114,6 @@ static inline u32 qeth_get_device_id(struct ccw_device *cdev)
#define CCW_DEVID(cdev) (qeth_get_device_id(cdev))
#define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card)))
-/**
- * card stuff
- */
-struct qeth_perf_stats {
- unsigned int bufs_rec;
- unsigned int bufs_sent;
- unsigned int buf_elements_sent;
-
- unsigned int skbs_sent_pack;
- unsigned int bufs_sent_pack;
-
- unsigned int sc_dp_p;
- unsigned int sc_p_dp;
- /* qdio_cq_handler: number of times called, time spent in */
- __u64 cq_start_time;
- unsigned int cq_cnt;
- unsigned int cq_time;
- /* qdio_input_handler: number of times called, time spent in */
- __u64 inbound_start_time;
- unsigned int inbound_cnt;
- unsigned int inbound_time;
- /* qeth_send_packet: number of times called, time spent in */
- __u64 outbound_start_time;
- unsigned int outbound_cnt;
- unsigned int outbound_time;
- /* qdio_output_handler: number of times called, time spent in */
- __u64 outbound_handler_start_time;
- unsigned int outbound_handler_cnt;
- unsigned int outbound_handler_time;
- /* number of calls to and time spent in do_QDIO for inbound queue */
- __u64 inbound_do_qdio_start_time;
- unsigned int inbound_do_qdio_cnt;
- unsigned int inbound_do_qdio_time;
- /* number of calls to and time spent in do_QDIO for outbound queues */
- __u64 outbound_do_qdio_start_time;
- unsigned int outbound_do_qdio_cnt;
- unsigned int outbound_do_qdio_time;
- unsigned int large_send_bytes;
- unsigned int large_send_cnt;
- unsigned int sg_skbs_sent;
- /* initial values when measuring starts */
- unsigned long initial_rx_packets;
- unsigned long initial_tx_packets;
- /* inbound scatter gather data */
- unsigned int sg_skbs_rx;
- unsigned int sg_frags_rx;
- unsigned int sg_alloc_page_rx;
- unsigned int tx_csum;
- unsigned int tx_lin;
- unsigned int tx_linfail;
- unsigned int rx_csum;
-};
-
/* Routing stuff */
struct qeth_routing_info {
enum qeth_routing_types type;
@@ -314,7 +263,7 @@ struct qeth_hdr_layer3 {
__u16 frame_offset;
union {
/* TX: */
- u8 ipv6_addr[16];
+ struct in6_addr ipv6_addr;
struct ipv4 {
u8 res[12];
u32 addr;
@@ -494,10 +443,53 @@ enum qeth_out_q_states {
QETH_OUT_Q_LOCKED_FLUSH,
};
+#define QETH_CARD_STAT_ADD(_c, _stat, _val) ((_c)->stats._stat += (_val))
+#define QETH_CARD_STAT_INC(_c, _stat) QETH_CARD_STAT_ADD(_c, _stat, 1)
+
+#define QETH_TXQ_STAT_ADD(_q, _stat, _val) ((_q)->stats._stat += (_val))
+#define QETH_TXQ_STAT_INC(_q, _stat) QETH_TXQ_STAT_ADD(_q, _stat, 1)
+
+struct qeth_card_stats {
+ u64 rx_bufs;
+ u64 rx_skb_csum;
+ u64 rx_sg_skbs;
+ u64 rx_sg_frags;
+ u64 rx_sg_alloc_page;
+
+ /* rtnl_link_stats64 */
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_errors;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 tx_errors;
+};
+
+struct qeth_out_q_stats {
+ u64 bufs;
+ u64 bufs_pack;
+ u64 buf_elements;
+ u64 skbs_pack;
+ u64 skbs_sg;
+ u64 skbs_csum;
+ u64 skbs_tso;
+ u64 skbs_linearized;
+ u64 skbs_linearized_fail;
+ u64 tso_bytes;
+ u64 packing_mode_switch;
+
+ /* rtnl_link_stats64 */
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_errors;
+ u64 tx_dropped;
+};
+
struct qeth_qdio_out_q {
struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
+ struct qeth_out_q_stats stats;
int queue_no;
struct qeth_card *card;
atomic_t state;
@@ -527,7 +519,7 @@ struct qeth_qdio_info {
/* output */
int no_out_queues;
- struct qeth_qdio_out_q **out_qs;
+ struct qeth_qdio_out_q *out_qs[QETH_MAX_QUEUES];
struct qdio_outbuf_state *out_bufstates;
/* priority queueing */
@@ -559,8 +551,6 @@ enum qeth_card_states {
CARD_STATE_DOWN,
CARD_STATE_HARDSETUP,
CARD_STATE_SOFTSETUP,
- CARD_STATE_UP,
- CARD_STATE_RECOVER,
};
/**
@@ -594,8 +584,8 @@ struct qeth_channel;
struct qeth_cmd_buffer {
enum qeth_cmd_buffer_state state;
struct qeth_channel *channel;
+ struct qeth_reply *reply;
unsigned char *data;
- int rc;
void (*callback)(struct qeth_card *card, struct qeth_channel *channel,
struct qeth_cmd_buffer *iob);
};
@@ -665,7 +655,6 @@ struct qeth_card_blkt {
#define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
-#define QETH_LAYER2_MAC_READ 0x01
#define QETH_LAYER2_MAC_REGISTERED 0x02
struct qeth_card_info {
unsigned short unit_addr2;
@@ -673,6 +662,7 @@ struct qeth_card_info {
unsigned short chpid;
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
+ u8 open_when_online:1;
int guestlan;
int mac_bits;
enum qeth_card_types type;
@@ -702,7 +692,6 @@ struct qeth_card_options {
struct qeth_vnicc_info vnicc; /* VNICC options */
int fake_broadcast;
enum qeth_discipline_id layer;
- int performance_stats;
int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
enum qeth_ipa_isolation_modes prev_isolation;
@@ -742,11 +731,6 @@ struct qeth_discipline {
struct qeth_ipa_cmd *cmd);
};
-struct qeth_vlan_vid {
- struct list_head list;
- unsigned short vid;
-};
-
enum qeth_addr_disposition {
QETH_DISP_ADDR_DELETE = 0,
QETH_DISP_ADDR_DO_NOTHING = 1,
@@ -775,7 +759,6 @@ struct qeth_switch_info {
#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
struct qeth_card {
- struct list_head list;
enum qeth_card_states state;
spinlock_t lock;
struct ccwgroup_device *gdev;
@@ -784,18 +767,16 @@ struct qeth_card {
struct qeth_channel data;
struct net_device *dev;
- struct net_device_stats stats;
-
+ struct qeth_card_stats stats;
struct qeth_card_info info;
struct qeth_token token;
struct qeth_seqno seqno;
struct qeth_card_options options;
+ struct workqueue_struct *event_wq;
wait_queue_head_t wait_q;
spinlock_t mclock;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct mutex vid_list_mutex; /* vid_list */
- struct list_head vid_list;
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
DECLARE_HASHTABLE(ip_mc_htable, 4);
@@ -804,13 +785,11 @@ struct qeth_card {
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
- struct task_struct *recovery_task;
spinlock_t ip_lock;
struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
- struct qeth_perf_stats perf_stats;
int read_or_write_problem;
struct qeth_osn_info osn_info;
struct qeth_discipline *discipline;
@@ -827,10 +806,10 @@ struct qeth_card {
struct work_struct close_dev_work;
};
-struct qeth_card_list_struct {
- struct list_head list;
- rwlock_t rwlock;
-};
+static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
+{
+ return card->state == CARD_STATE_SOFTSETUP;
+}
struct qeth_trap_id {
__u16 lparnr;
@@ -872,11 +851,6 @@ static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
return PFN_UP(end) - PFN_DOWN(start);
}
-static inline int qeth_get_micros(void)
-{
- return (int) (get_tod_clock() >> 12);
-}
-
static inline int qeth_get_ip_version(struct sk_buff *skb)
{
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
@@ -901,8 +875,7 @@ static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
if ((card->dev->features & NETIF_F_RXCSUM) &&
(flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (card->options.performance_stats)
- card->perf_stats.rx_csum++;
+ QETH_CARD_STAT_INC(card, rx_skb_csum);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
@@ -964,30 +937,27 @@ static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
+extern const struct ethtool_ops qeth_ethtool_ops;
+extern const struct ethtool_ops qeth_osn_ethtool_ops;
extern const struct attribute_group *qeth_generic_attr_groups[];
extern const struct attribute_group *qeth_osn_attr_groups[];
extern const struct attribute_group qeth_device_attr_group;
extern const struct attribute_group qeth_device_blkt_group;
extern const struct device_type qeth_generic_devtype;
-extern struct workqueue_struct *qeth_wq;
-int qeth_card_hw_is_reachable(struct qeth_card *);
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_realloc_buffer_pool(struct qeth_card *, int);
int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
void qeth_core_free_discipline(struct qeth_card *);
/* exports for qeth discipline device drivers */
-extern struct qeth_card_list_struct qeth_core_card_list;
extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
struct net_device *qeth_clone_netdev(struct net_device *orig);
-void qeth_set_recovery_task(struct qeth_card *);
-void qeth_clear_recovery_task(struct qeth_card *);
+struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_wait_for_threads(struct qeth_card *, unsigned long);
int qeth_do_run_thread(struct qeth_card *, unsigned long);
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
@@ -1011,41 +981,29 @@ void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_clear_cmd_buffers(struct qeth_channel *);
void qeth_clear_qdio_buffers(struct qeth_card *);
void qeth_setadp_promisc_mode(struct qeth_card *);
-struct net_device_stats *qeth_get_stats(struct net_device *);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
void qeth_prepare_control_data(struct qeth_card *, int,
struct qeth_cmd_buffer *);
void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob);
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ u16 cmd_length);
struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
-int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
- int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
- void *reply_param);
+int qeth_query_card_info(struct qeth_card *card,
+ struct carrier_info *carrier_info);
unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
-int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
- struct qeth_hdr *hdr, unsigned int offset,
- unsigned int hd_len);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int offset, unsigned int hd_len,
int elements_needed);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-int qeth_core_get_sset_count(struct net_device *, int);
-void qeth_core_get_ethtool_stats(struct net_device *,
- struct ethtool_stats *, u64 *);
-void qeth_core_get_strings(struct net_device *, u32, u8 *);
-void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
-int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd);
int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
-void qeth_close_dev(struct qeth_card *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
enum qeth_ipa_funcs,
@@ -1057,16 +1015,16 @@ netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
+void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
+int qeth_open(struct net_device *dev);
+int qeth_stop(struct net_device *dev);
+
int qeth_vm_request_mac(struct qeth_card *card);
-int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr **hdr, unsigned int hdr_len,
- unsigned int proto_len, unsigned int *elements);
-void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
- struct sk_buff *skb, unsigned int proto_len);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
- void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type,
+ void (*fill_header)(struct qeth_qdio_out_q *queue,
+ struct qeth_hdr *hdr, struct sk_buff *skb,
+ int ipv, int cast_type,
unsigned int data_len));
/* exports for OSN */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 254065271867..44bd6f04c145 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -54,8 +54,6 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
};
EXPORT_SYMBOL_GPL(qeth_dbf);
-struct qeth_card_list_struct qeth_core_card_list;
-EXPORT_SYMBOL_GPL(qeth_core_card_list);
struct kmem_cache *qeth_core_header_cache;
EXPORT_SYMBOL_GPL(qeth_core_header_cache);
static struct kmem_cache *qeth_qdio_outbuf_cache;
@@ -76,35 +74,15 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
-struct workqueue_struct *qeth_wq;
-EXPORT_SYMBOL_GPL(qeth_wq);
-
-int qeth_card_hw_is_reachable(struct qeth_card *card)
-{
- return (card->state == CARD_STATE_SOFTSETUP) ||
- (card->state == CARD_STATE_UP);
-}
-EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
-
static void qeth_close_dev_handler(struct work_struct *work)
{
struct qeth_card *card;
card = container_of(work, struct qeth_card, close_dev_work);
QETH_CARD_TEXT(card, 2, "cldevhdl");
- rtnl_lock();
- dev_close(card->dev);
- rtnl_unlock();
ccwgroup_set_offline(card->gdev);
}
-void qeth_close_dev(struct qeth_card *card)
-{
- QETH_CARD_TEXT(card, 2, "cldevsubm");
- queue_work(qeth_wq, &card->close_dev_work);
-}
-EXPORT_SYMBOL_GPL(qeth_close_dev);
-
static const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
@@ -195,23 +173,6 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "n/a";
}
-void qeth_set_recovery_task(struct qeth_card *card)
-{
- card->recovery_task = current;
-}
-EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
-
-void qeth_clear_recovery_task(struct qeth_card *card)
-{
- card->recovery_task = NULL;
-}
-EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
-
-static bool qeth_is_recovery_task(const struct qeth_card *card)
-{
- return card->recovery_task == current;
-}
-
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask)
{
@@ -238,15 +199,6 @@ int qeth_threads_running(struct qeth_card *card, unsigned long threads)
}
EXPORT_SYMBOL_GPL(qeth_threads_running);
-int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
-{
- if (qeth_is_recovery_task(card))
- return 0;
- return wait_event_interruptible(card->wait_q,
- qeth_threads_running(card, threads) == 0);
-}
-EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
-
void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
@@ -294,8 +246,7 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
{
QETH_CARD_TEXT(card, 2, "realcbp");
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
+ if (card->state != CARD_STATE_DOWN)
return -EPERM;
/* TODO: steel/add buffers from/to a running card's buffer pool (?) */
@@ -568,6 +519,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
rc, CARD_DEVID(card));
atomic_set(&channel->irq_pending, 0);
+ qeth_release_buffer(channel, iob);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
wake_up(&card->wait_q);
@@ -594,6 +546,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
if (reply) {
refcount_set(&reply->refcnt, 1);
atomic_set(&reply->received, 0);
+ init_waitqueue_head(&reply->wait_q);
}
return reply;
}
@@ -609,6 +562,26 @@ static void qeth_put_reply(struct qeth_reply *reply)
kfree(reply);
}
+static void qeth_enqueue_reply(struct qeth_card *card, struct qeth_reply *reply)
+{
+ spin_lock_irq(&card->lock);
+ list_add_tail(&reply->list, &card->cmd_waiter_list);
+ spin_unlock_irq(&card->lock);
+}
+
+static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply)
+{
+ spin_lock_irq(&card->lock);
+ list_del(&reply->list);
+ spin_unlock_irq(&card->lock);
+}
+
+static void qeth_notify_reply(struct qeth_reply *reply)
+{
+ atomic_inc(&reply->received);
+ wake_up(&reply->wait_q);
+}
+
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
{
@@ -646,7 +619,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
dev_err(&card->gdev->dev,
"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
QETH_CARD_IFNAME(card));
- qeth_close_dev(card);
+ schedule_work(&card->close_dev_work);
} else {
dev_warn(&card->gdev->dev,
"The link for interface %s on CHPID 0x%X failed\n",
@@ -685,19 +658,15 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
void qeth_clear_ipacmd_list(struct qeth_card *card)
{
- struct qeth_reply *reply, *r;
+ struct qeth_reply *reply;
unsigned long flags;
QETH_CARD_TEXT(card, 4, "clipalst");
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
- qeth_get_reply(reply);
+ list_for_each_entry(reply, &card->cmd_waiter_list, list) {
reply->rc = -EIO;
- atomic_inc(&reply->received);
- list_del_init(&reply->list);
- wake_up(&reply->wait_q);
- qeth_put_reply(reply);
+ qeth_notify_reply(reply);
}
spin_unlock_irqrestore(&card->lock, flags);
}
@@ -754,7 +723,10 @@ void qeth_release_buffer(struct qeth_channel *channel,
spin_lock_irqsave(&channel->iob_lock, flags);
iob->state = BUF_STATE_FREE;
iob->callback = qeth_send_control_data_cb;
- iob->rc = 0;
+ if (iob->reply) {
+ qeth_put_reply(iob->reply);
+ iob->reply = NULL;
+ }
spin_unlock_irqrestore(&channel->iob_lock, flags);
wake_up(&channel->wait_q);
}
@@ -767,6 +739,17 @@ static void qeth_release_buffer_cb(struct qeth_card *card,
qeth_release_buffer(channel, iob);
}
+static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
+{
+ struct qeth_reply *reply = iob->reply;
+
+ if (reply) {
+ reply->rc = rc;
+ qeth_notify_reply(reply);
+ }
+ qeth_release_buffer(iob->channel, iob);
+}
+
static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
{
struct qeth_cmd_buffer *buffer = NULL;
@@ -802,9 +785,9 @@ static void qeth_send_control_data_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
{
struct qeth_ipa_cmd *cmd = NULL;
- struct qeth_reply *reply, *r;
+ struct qeth_reply *reply = NULL;
+ struct qeth_reply *r;
unsigned long flags;
- int keep_reply;
int rc = 0;
QETH_CARD_TEXT(card, 4, "sndctlcb");
@@ -836,44 +819,40 @@ static void qeth_send_control_data_cb(struct qeth_card *card,
goto out;
}
+ /* match against pending cmd requests */
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
- if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
- ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
+ list_for_each_entry(r, &card->cmd_waiter_list, list) {
+ if ((r->seqno == QETH_IDX_COMMAND_SEQNO) ||
+ (cmd && (r->seqno == cmd->hdr.seqno))) {
+ reply = r;
+ /* take the object outside the lock */
qeth_get_reply(reply);
- list_del_init(&reply->list);
- spin_unlock_irqrestore(&card->lock, flags);
- keep_reply = 0;
- if (reply->callback != NULL) {
- if (cmd) {
- reply->offset = (__u16)((char *)cmd -
- (char *)iob->data);
- keep_reply = reply->callback(card,
- reply,
- (unsigned long)cmd);
- } else
- keep_reply = reply->callback(card,
- reply,
- (unsigned long)iob);
- }
- if (cmd)
- reply->rc = (u16) cmd->hdr.return_code;
- else if (iob->rc)
- reply->rc = iob->rc;
- if (keep_reply) {
- spin_lock_irqsave(&card->lock, flags);
- list_add_tail(&reply->list,
- &card->cmd_waiter_list);
- spin_unlock_irqrestore(&card->lock, flags);
- } else {
- atomic_inc(&reply->received);
- wake_up(&reply->wait_q);
- }
- qeth_put_reply(reply);
- goto out;
+ break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
+
+ if (!reply)
+ goto out;
+
+ if (!reply->callback) {
+ rc = 0;
+ } else {
+ if (cmd) {
+ reply->offset = (u16)((char *)cmd - (char *)iob->data);
+ rc = reply->callback(card, reply, (unsigned long)cmd);
+ } else {
+ rc = reply->callback(card, reply, (unsigned long)iob);
+ }
+ }
+
+ if (rc <= 0) {
+ reply->rc = rc;
+ qeth_notify_reply(reply);
+ }
+
+ qeth_put_reply(reply);
+
out:
memcpy(&card->seqno.pdu_hdr_ack,
QETH_PDU_HEADER_SEQ_NO(iob->data),
@@ -1004,9 +983,8 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
return 0;
}
-static long qeth_check_irb_error(struct qeth_card *card,
- struct ccw_device *cdev, unsigned long intparm,
- struct irb *irb)
+static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
+ unsigned long intparm, struct irb *irb)
{
if (!IS_ERR(irb))
return 0;
@@ -1017,7 +995,7 @@ static long qeth_check_irb_error(struct qeth_card *card,
CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
- break;
+ return -EIO;
case -ETIMEDOUT:
dev_warn(&cdev->dev, "A hardware operation timed out"
" on the device\n");
@@ -1029,14 +1007,14 @@ static long qeth_check_irb_error(struct qeth_card *card,
wake_up(&card->wait_q);
}
}
- break;
+ return -ETIMEDOUT;
default:
QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
PTR_ERR(irb), CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT(card, 2, " rc???");
+ return PTR_ERR(irb);
}
- return PTR_ERR(irb);
}
static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
@@ -1071,10 +1049,11 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
if (qeth_intparm_is_iob(intparm))
iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
- if (qeth_check_irb_error(card, cdev, intparm, irb)) {
+ rc = qeth_check_irb_error(card, cdev, intparm, irb);
+ if (rc) {
/* IO was terminated, free its resources. */
if (iob)
- qeth_release_buffer(iob->channel, iob);
+ qeth_cancel_cmd(iob, rc);
atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
return;
@@ -1129,6 +1108,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
rc = qeth_get_problem(card, cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
+ if (iob)
+ qeth_cancel_cmd(iob, rc);
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
goto out;
@@ -1169,13 +1150,16 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
{
+ struct sk_buff *skb;
+
/* release may never happen from within CQ tasklet scope */
WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
- __skb_queue_purge(&buf->skb_list);
+ while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
+ consume_skb(skb);
}
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
@@ -1290,7 +1274,6 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
channel->iob[cnt].state = BUF_STATE_FREE;
channel->iob[cnt].channel = channel;
channel->iob[cnt].callback = qeth_send_control_data_cb;
- channel->iob[cnt].rc = 0;
}
if (cnt < QETH_CMD_BUFFER_NO) {
qeth_clean_channel(channel);
@@ -1432,7 +1415,6 @@ static void qeth_setup_card(struct qeth_card *card)
spin_lock_init(&card->thread_mask_lock);
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
- mutex_init(&card->vid_list_mutex);
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
@@ -1468,6 +1450,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
CARD_RDEV(card) = gdev->cdev[0];
CARD_WDEV(card) = gdev->cdev[1];
CARD_DDEV(card) = gdev->cdev[2];
+
+ card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
+ if (!card->event_wq)
+ goto out_wq;
if (qeth_setup_channel(&card->read, true))
goto out_ip;
if (qeth_setup_channel(&card->write, true))
@@ -1483,6 +1469,8 @@ out_data:
out_channel:
qeth_clean_channel(&card->read);
out_ip:
+ destroy_workqueue(card->event_wq);
+out_wq:
dev_set_drvdata(&gdev->dev, NULL);
kfree(card);
out:
@@ -1811,6 +1799,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card,
QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
atomic_set(&channel->irq_pending, 0);
+ qeth_release_buffer(channel, iob);
wake_up(&card->wait_q);
return rc;
}
@@ -1880,6 +1869,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
rc);
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
atomic_set(&channel->irq_pending, 0);
+ qeth_release_buffer(channel, iob);
wake_up(&card->wait_q);
return rc;
}
@@ -2010,7 +2000,7 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
card->seqno.pdu_hdr++;
memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
&card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
- QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
}
EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
@@ -2027,24 +2017,22 @@ EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
* for the IPA commands.
* @reply_param: private pointer passed to the callback
*
- * Returns the value of the `return_code' field of the response
- * block returned from the hardware, or other error indication.
- * Value of zero indicates successful execution of the command.
- *
* Callback function gets called one or more times, with cb_cmd
* pointing to the response returned by the hardware. Callback
- * function must return non-zero if more reply blocks are expected,
- * and zero if the last or only reply block is received. Callback
- * function can get the value of the reply_param pointer from the
+ * function must return
+ * > 0 if more reply blocks are expected,
+ * 0 if the last or only reply block is received, and
+ * < 0 on error.
+ * Callback function can get the value of the reply_param pointer from the
* field 'param' of the structure qeth_reply.
*/
-int qeth_send_control_data(struct qeth_card *card, int len,
- struct qeth_cmd_buffer *iob,
- int (*reply_cb)(struct qeth_card *cb_card,
- struct qeth_reply *cb_reply,
- unsigned long cb_cmd),
- void *reply_param)
+static int qeth_send_control_data(struct qeth_card *card, int len,
+ struct qeth_cmd_buffer *iob,
+ int (*reply_cb)(struct qeth_card *cb_card,
+ struct qeth_reply *cb_reply,
+ unsigned long cb_cmd),
+ void *reply_param)
{
struct qeth_channel *channel = iob->channel;
int rc;
@@ -2060,12 +2048,15 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
reply = qeth_alloc_reply(card);
if (!reply) {
+ qeth_release_buffer(channel, iob);
return -ENOMEM;
}
reply->callback = reply_cb;
reply->param = reply_param;
- init_waitqueue_head(&reply->wait_q);
+ /* pairs with qeth_release_buffer(): */
+ qeth_get_reply(reply);
+ iob->reply = reply;
while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
@@ -2080,9 +2071,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
qeth_prepare_control_data(card, len, iob);
- spin_lock_irq(&card->lock);
- list_add_tail(&reply->list, &card->cmd_waiter_list);
- spin_unlock_irq(&card->lock);
+ qeth_enqueue_reply(card, reply);
timeout = jiffies + event_timeout;
@@ -2095,10 +2084,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
- spin_lock_irq(&card->lock);
- list_del_init(&reply->list);
+ qeth_dequeue_reply(card, reply);
qeth_put_reply(reply);
- spin_unlock_irq(&card->lock);
qeth_release_buffer(channel, iob);
atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
@@ -2120,21 +2107,16 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
}
+ qeth_dequeue_reply(card, reply);
rc = reply->rc;
qeth_put_reply(reply);
return rc;
time_err:
- reply->rc = -ETIME;
- spin_lock_irq(&card->lock);
- list_del_init(&reply->list);
- spin_unlock_irq(&card->lock);
- atomic_inc(&reply->received);
- rc = reply->rc;
+ qeth_dequeue_reply(card, reply);
qeth_put_reply(reply);
- return rc;
+ return -ETIME;
}
-EXPORT_SYMBOL_GPL(qeth_send_control_data);
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
@@ -2339,9 +2321,8 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
QETH_DBF_TEXT(SETUP, 2, "olmlimit");
dev_err(&card->gdev->dev, "A connection could not be "
"established because of an OLM limit\n");
- iob->rc = -EMLINK;
+ return -EMLINK;
}
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
@@ -2391,11 +2372,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
return 0;
}
-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
+static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
{
if (!q)
return;
+ qeth_clear_outq_buffers(q, 1);
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
kfree(q);
}
@@ -2434,12 +2416,6 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
goto out_freeinq;
/* outbound */
- card->qdio.out_qs =
- kcalloc(card->qdio.no_out_queues,
- sizeof(struct qeth_qdio_out_q *),
- GFP_KERNEL);
- if (!card->qdio.out_qs)
- goto out_freepool;
for (i = 0; i < card->qdio.no_out_queues; ++i) {
card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
if (!card->qdio.out_qs[i])
@@ -2470,12 +2446,9 @@ out_freeoutqbufs:
}
out_freeoutq:
while (i > 0) {
- qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+ qeth_free_output_queue(card->qdio.out_qs[--i]);
+ card->qdio.out_qs[i] = NULL;
}
- kfree(card->qdio.out_qs);
- card->qdio.out_qs = NULL;
-out_freepool:
qeth_free_buffer_pool(card);
out_freeinq:
qeth_free_qdio_queue(card->qdio.in_q);
@@ -2504,13 +2477,9 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
/* inbound buffer pool */
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
- if (card->qdio.out_qs) {
- for (i = 0; i < card->qdio.no_out_queues; ++i) {
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
- qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
- }
- kfree(card->qdio.out_qs);
- card->qdio.out_qs = NULL;
+ for (i = 0; i < card->qdio.no_out_queues; i++) {
+ qeth_free_output_queue(card->qdio.out_qs[i]);
+ card->qdio.out_qs[i] = NULL;
}
}
@@ -2717,8 +2686,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
} else {
free_page((unsigned long)entry->elements[i]);
entry->elements[i] = page_address(page);
- if (card->options.performance_stats)
- card->perf_stats.sg_alloc_page_rx++;
+ QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
}
}
}
@@ -2837,6 +2805,23 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
cmd->hdr.prot_version = prot;
}
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ u16 cmd_length)
+{
+ u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length;
+ u8 prot_type = qeth_mpc_select_prot_type(card);
+
+ memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+ memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
+ memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
+ memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
+ memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
+ memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+ &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
+}
+EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
{
@@ -2844,6 +2829,7 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
iob = qeth_get_buffer(&card->write);
if (iob) {
+ qeth_prepare_ipa_cmd(card, iob, sizeof(struct qeth_ipa_cmd));
qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
} else {
dev_warn(&card->gdev->dev,
@@ -2856,16 +2842,13 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
+static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
{
- u8 prot_type = qeth_mpc_select_prot_type(card);
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
- memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
- memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
- memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
- &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+ return (cmd->hdr.return_code) ? -EIO : 0;
}
-EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
/**
* qeth_send_ipa_cmd() - send an IPA command
@@ -2878,12 +2861,15 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
unsigned long),
void *reply_param)
{
+ u16 length;
int rc;
QETH_CARD_TEXT(card, 4, "sendipa");
- qeth_prepare_ipa_cmd(card, iob);
- rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
- iob, reply_cb, reply_param);
+
+ if (reply_cb == NULL)
+ reply_cb = qeth_send_ipa_cmd_cb;
+ memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2);
+ rc = qeth_send_control_data(card, length, iob, reply_cb, reply_param);
if (rc == -ETIME) {
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
@@ -2892,9 +2878,19 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
+static int qeth_send_startlan_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+ if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
+ return -ENETDOWN;
+
+ return (cmd->hdr.return_code) ? -EIO : 0;
+}
+
static int qeth_send_startlan(struct qeth_card *card)
{
- int rc;
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "strtlan");
@@ -2902,8 +2898,7 @@ static int qeth_send_startlan(struct qeth_card *card)
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
if (!iob)
return -ENOMEM;
- rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
- return rc;
+ return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
}
static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
@@ -2921,7 +2916,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 3, "quyadpcb");
if (qeth_setadpparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
card->info.link_type =
@@ -2976,19 +2971,18 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
cmd = (struct qeth_ipa_cmd *) data;
switch (cmd->hdr.return_code) {
+ case IPA_RC_SUCCESS:
+ break;
case IPA_RC_NOTSUPP:
case IPA_RC_L2_UNSUPPORTED_CMD:
QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
- return 0;
+ return -EOPNOTSUPP;
default:
- if (cmd->hdr.return_code) {
- QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
- CARD_DEVID(card),
- cmd->hdr.return_code);
- return 0;
- }
+ QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
+ CARD_DEVID(card), cmd->hdr.return_code);
+ return -EIO;
}
if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
@@ -3026,7 +3020,7 @@ static int qeth_query_switch_attributes_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "qswiatcb");
if (qeth_setadpparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
sw_info = (struct qeth_switch_info *)reply->param;
attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
@@ -3058,15 +3052,15 @@ int qeth_query_switch_attributes(struct qeth_card *card,
static int qeth_query_setdiagass_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
- __u16 rc;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ u16 rc = cmd->hdr.return_code;
- cmd = (struct qeth_ipa_cmd *)data;
- rc = cmd->hdr.return_code;
- if (rc)
+ if (rc) {
QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
- else
- card->info.diagass_support = cmd->data.diagass.ext;
+ return -EIO;
+ }
+
+ card->info.diagass_support = cmd->data.diagass.ext;
return 0;
}
@@ -3113,13 +3107,13 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
static int qeth_hw_trap_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
- __u16 rc;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ u16 rc = cmd->hdr.return_code;
- cmd = (struct qeth_ipa_cmd *)data;
- rc = cmd->hdr.return_code;
- if (rc)
+ if (rc) {
QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
+ return -EIO;
+ }
return 0;
}
@@ -3168,7 +3162,7 @@ static int qeth_check_qdio_errors(struct qeth_card *card,
buf->element[14].sflags);
QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
if ((buf->element[15].sflags) == 0x12) {
- card->stats.rx_dropped++;
+ QETH_CARD_STAT_INC(card, rx_dropped);
return 0;
} else
return 1;
@@ -3232,17 +3226,8 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
* 'index') un-requeued -> this buffer is the first buffer that
* will be requeued the next time
*/
- if (card->options.performance_stats) {
- card->perf_stats.inbound_do_qdio_cnt++;
- card->perf_stats.inbound_do_qdio_start_time =
- qeth_get_micros();
- }
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
queue->next_buf_to_init, count);
- if (card->options.performance_stats)
- card->perf_stats.inbound_do_qdio_time +=
- qeth_get_micros() -
- card->perf_stats.inbound_do_qdio_start_time;
if (rc) {
QETH_CARD_TEXT(card, 2, "qinberr");
}
@@ -3319,8 +3304,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
>= QETH_HIGH_WATERMARK_PACK){
/* switch non-PACKING -> PACKING */
QETH_CARD_TEXT(queue->card, 6, "np->pack");
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.sc_dp_p++;
+ QETH_TXQ_STAT_INC(queue, packing_mode_switch);
queue->do_pack = 1;
}
}
@@ -3339,8 +3323,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
QETH_CARD_TEXT(queue->card, 6, "pack->np");
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.sc_p_dp++;
+ QETH_TXQ_STAT_INC(queue, packing_mode_switch);
queue->do_pack = 0;
return qeth_prep_flush_pack_buffer(queue);
}
@@ -3394,25 +3377,16 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
}
+ QETH_TXQ_STAT_ADD(queue, bufs, count);
netif_trans_update(queue->card->dev);
- if (queue->card->options.performance_stats) {
- queue->card->perf_stats.outbound_do_qdio_cnt++;
- queue->card->perf_stats.outbound_do_qdio_start_time =
- qeth_get_micros();
- }
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
atomic_add(count, &queue->used_buffers);
-
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.outbound_do_qdio_time +=
- qeth_get_micros() -
- queue->card->perf_stats.outbound_do_qdio_start_time;
if (rc) {
- queue->card->stats.tx_errors += count;
+ QETH_TXQ_STAT_ADD(queue, tx_errors, count);
/* ignore temporary SIGA errors without busy condition */
if (rc == -ENOBUFS)
return;
@@ -3427,8 +3401,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
qeth_schedule_recovery(queue->card);
return;
}
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.bufs_sent += count;
}
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
@@ -3459,10 +3431,8 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
if (!flush_cnt &&
!atomic_read(&queue->set_pci_flags_count))
flush_cnt += qeth_prep_flush_pack_buffer(queue);
- if (queue->card->options.performance_stats &&
- q_was_packing)
- queue->card->perf_stats.bufs_sent_pack +=
- flush_cnt;
+ if (q_was_packing)
+ QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
if (flush_cnt)
qeth_flush_buffers(queue, index, flush_cnt);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
@@ -3492,8 +3462,7 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
goto out;
}
- if (card->state != CARD_STATE_DOWN &&
- card->state != CARD_STATE_RECOVER) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -1;
goto out;
}
@@ -3517,7 +3486,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
int rc;
if (!qeth_is_cq(card, queue))
- goto out;
+ return;
QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
@@ -3526,12 +3495,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
if (qdio_err) {
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
- goto out;
- }
-
- if (card->options.performance_stats) {
- card->perf_stats.cq_cnt++;
- card->perf_stats.cq_start_time = qeth_get_micros();
+ return;
}
for (i = first_element; i < first_element + count; ++i) {
@@ -3559,16 +3523,6 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
}
card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
+ count) % QDIO_MAX_BUFFERS_PER_Q;
-
- netif_wake_queue(card->dev);
-
- if (card->options.performance_stats) {
- int delta_t = qeth_get_micros();
- delta_t -= card->perf_stats.cq_start_time;
- card->perf_stats.cq_time += delta_t;
- }
-out:
- return;
}
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
@@ -3604,11 +3558,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
qeth_schedule_recovery(card);
return;
}
- if (card->options.performance_stats) {
- card->perf_stats.outbound_handler_cnt++;
- card->perf_stats.outbound_handler_start_time =
- qeth_get_micros();
- }
+
for (i = first_element; i < (first_element + count); ++i) {
int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = queue->bufs[bidx];
@@ -3654,9 +3604,6 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
qeth_check_outbound_queue(queue);
netif_wake_queue(queue->card->dev);
- if (card->options.performance_stats)
- card->perf_stats.outbound_handler_time += qeth_get_micros() -
- card->perf_stats.outbound_handler_start_time;
}
/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
@@ -3777,11 +3724,12 @@ EXPORT_SYMBOL_GPL(qeth_count_elements);
* The number of needed buffer elements is returned in @elements.
* Error to create the hdr is indicated by returning with < 0.
*/
-int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr **hdr, unsigned int hdr_len,
- unsigned int proto_len, unsigned int *elements)
+static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
+ struct sk_buff *skb, struct qeth_hdr **hdr,
+ unsigned int hdr_len, unsigned int proto_len,
+ unsigned int *elements)
{
- const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+ const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card);
const unsigned int contiguous = proto_len ? proto_len : 1;
unsigned int __elements;
addr_t start, end;
@@ -3820,15 +3768,12 @@ check_layout:
}
rc = skb_linearize(skb);
- if (card->options.performance_stats) {
- if (rc)
- card->perf_stats.tx_linfail++;
- else
- card->perf_stats.tx_lin++;
- }
- if (rc)
+ if (rc) {
+ QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
return rc;
+ }
+ QETH_TXQ_STAT_INC(queue, skbs_linearized);
/* Linearization changed the layout, re-evaluate: */
goto check_layout;
}
@@ -3849,7 +3794,6 @@ check_layout:
skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
return 0;
}
-EXPORT_SYMBOL_GPL(qeth_add_hw_header);
static void __qeth_fill_buffer(struct sk_buff *skb,
struct qeth_qdio_out_buffer *buf,
@@ -3931,7 +3875,6 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
{
struct qdio_buffer *buffer = buf->buffer;
bool is_first_elem = true;
- int flush_cnt = 0;
__skb_queue_tail(&buf->skb_list, skb);
@@ -3952,29 +3895,26 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
if (!queue->do_pack) {
QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
- /* set state to PRIMED -> will be flushed */
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
- flush_cnt = 1;
} else {
QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.skbs_sent_pack++;
- if (buf->next_element_to_fill >=
- QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
- /*
- * packed buffer if full -> set state PRIMED
- * -> will be flushed
- */
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
- flush_cnt = 1;
- }
+
+ QETH_TXQ_STAT_INC(queue, skbs_pack);
+ /* If the buffer still has free elements, keep using it. */
+ if (buf->next_element_to_fill <
+ QETH_MAX_BUFFER_ELEMENTS(queue->card))
+ return 0;
}
- return flush_cnt;
+
+ /* flush out the buffer */
+ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+ queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ return 1;
}
-int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
- struct qeth_hdr *hdr, unsigned int offset,
- unsigned int hd_len)
+static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int offset, unsigned int hd_len)
{
int index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
@@ -3985,12 +3925,10 @@ int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
qeth_flush_buffers(queue, index, 1);
return 0;
}
-EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
@@ -4044,10 +3982,9 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
}
}
- tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
- queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
- QDIO_MAX_BUFFERS_PER_Q;
- flush_count += tmp;
+
+ flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset,
+ hd_len);
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
@@ -4075,15 +4012,16 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
out:
/* at this point the queue is UNLOCKED again */
- if (queue->card->options.performance_stats && do_pack)
- queue->card->perf_stats.bufs_sent_pack += flush_count;
+ if (do_pack)
+ QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet);
-void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
- struct sk_buff *skb, unsigned int proto_len)
+static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
+ unsigned int payload_len, struct sk_buff *skb,
+ unsigned int proto_len)
{
struct qeth_hdr_ext_tso *ext = &hdr->ext;
@@ -4096,12 +4034,12 @@ void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
ext->mss = skb_shinfo(skb)->gso_size;
ext->dg_hdr_len = proto_len;
}
-EXPORT_SYMBOL_GPL(qeth_fill_tso_ext);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
- void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type,
+ void (*fill_header)(struct qeth_qdio_out_q *queue,
+ struct qeth_hdr *hdr, struct sk_buff *skb,
+ int ipv, int cast_type,
unsigned int data_len))
{
unsigned int proto_len, hw_hdr_len;
@@ -4119,14 +4057,14 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
} else {
hw_hdr_len = sizeof(struct qeth_hdr);
- proto_len = IS_IQD(card) ? ETH_HLEN : 0;
+ proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
}
rc = skb_cow_head(skb, hw_hdr_len);
if (rc)
return rc;
- push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
+ push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
&elements);
if (push_len < 0)
return push_len;
@@ -4136,7 +4074,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
data_offset = push_len + proto_len;
}
memset(hdr, 0, hw_hdr_len);
- fill_header(card, hdr, skb, ipv, cast_type, frame_len);
+ fill_header(queue, hdr, skb, ipv, cast_type, frame_len);
if (is_tso)
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb, proto_len);
@@ -4153,14 +4091,12 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
}
if (!rc) {
- if (card->options.performance_stats) {
- card->perf_stats.buf_elements_sent += elements;
- if (is_sg)
- card->perf_stats.sg_skbs_sent++;
- if (is_tso) {
- card->perf_stats.large_send_bytes += frame_len;
- card->perf_stats.large_send_cnt++;
- }
+ QETH_TXQ_STAT_ADD(queue, buf_elements, elements);
+ if (is_sg)
+ QETH_TXQ_STAT_INC(queue, skbs_sg);
+ if (is_tso) {
+ QETH_TXQ_STAT_INC(queue, skbs_tso);
+ QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len);
}
} else {
if (!push_len)
@@ -4187,7 +4123,7 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
card->info.promisc_mode = setparms->data.mode;
- return 0;
+ return (cmd->hdr.return_code) ? -EIO : 0;
}
void qeth_setadp_promisc_mode(struct qeth_card *card)
@@ -4219,32 +4155,25 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
-struct net_device_stats *qeth_get_stats(struct net_device *dev)
-{
- struct qeth_card *card;
-
- card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 5, "getstat");
-
- return &card->stats;
-}
-EXPORT_SYMBOL_GPL(qeth_get_stats);
-
static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_ipacmd_setadpparms *adp_cmd;
QETH_CARD_TEXT(card, 4, "chgmaccb");
if (qeth_setadpparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
- if (IS_LAYER3(card) || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
- ether_addr_copy(card->dev->dev_addr,
- cmd->data.setadapterparms.data.change_addr.addr);
- card->info.mac_bits |= QETH_LAYER2_MAC_READ;
- }
+ adp_cmd = &cmd->data.setadapterparms;
+ if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
+ return -EADDRNOTAVAIL;
+
+ if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
+ !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
+ return -EADDRNOTAVAIL;
+
+ ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
return 0;
}
@@ -4281,7 +4210,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setaccb");
if (cmd->hdr.return_code)
- return 0;
+ return -EIO;
qeth_setadpparms_inspect_rc(cmd);
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
@@ -4355,7 +4284,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
card->options.isolation = card->options.prev_isolation;
break;
}
- return 0;
+ return (cmd->hdr.return_code) ? -EIO : 0;
}
static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
@@ -4419,7 +4348,7 @@ void qeth_tx_timeout(struct net_device *dev)
card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "txtimeo");
- card->stats.tx_errors++;
+ QETH_CARD_STAT_INC(card, tx_errors);
qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);
@@ -4489,30 +4418,6 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
return rc;
}
-static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob, int len,
- int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
- unsigned long),
- void *reply_param)
-{
- u16 s1, s2;
-
- QETH_CARD_TEXT(card, 4, "sendsnmp");
-
- memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
- memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
- &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
- /* adjust PDU length fields in IPA_PDU_HEADER */
- s1 = (u32) IPA_PDU_HEADER_SIZE + len;
- s2 = (u32) len;
- memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
- return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
- reply_cb, reply_param);
-}
-
static int qeth_snmp_command_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long sdata)
{
@@ -4530,13 +4435,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
if (cmd->hdr.return_code) {
QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
- return 0;
+ return -EIO;
}
if (cmd->data.setadapterparms.hdr.return_code) {
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
- return 0;
+ return -EIO;
}
data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
if (cmd->data.setadapterparms.hdr.seq_no == 1) {
@@ -4551,9 +4456,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
- QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
- cmd->hdr.return_code = IPA_RC_ENOMEM;
- return 0;
+ QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
+ return -ENOSPC;
}
QETH_CARD_TEXT_(card, 4, "snore%i",
cmd->data.setadapterparms.hdr.used_total);
@@ -4618,10 +4522,13 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
rc = -ENOMEM;
goto out;
}
+
+ /* for large requests, fix-up the length fields: */
+ qeth_prepare_ipa_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len);
+
cmd = __ipa_cmd(iob);
memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
- rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
- qeth_snmp_command_cb, (void *)&qinfo);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
CARD_DEVID(card), rc);
@@ -4645,16 +4552,14 @@ static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 3, "qoatcb");
if (qeth_setadpparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
priv = (struct qeth_qoat_priv *)reply->param;
resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
resdata = (char *)data + 28;
- if (resdatalen > (priv->buffer_len - priv->response_len)) {
- cmd->hdr.return_code = IPA_RC_FFFF;
- return 0;
- }
+ if (resdatalen > (priv->buffer_len - priv->response_len))
+ return -ENOSPC;
memcpy((priv->buffer + priv->response_len), resdata,
resdatalen);
@@ -4727,9 +4632,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
if (copy_to_user(udata, &oat_data,
sizeof(struct qeth_query_oat_data)))
rc = -EFAULT;
- } else
- if (rc == IPA_RC_FFFF)
- rc = -EFAULT;
+ }
out_free:
vfree(priv.buffer);
@@ -4746,7 +4649,7 @@ static int qeth_query_card_info_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "qcrdincb");
if (qeth_setadpparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
card_info = &cmd->data.setadapterparms.data.card_info;
carrier_info->card_type = card_info->card_type;
@@ -4755,8 +4658,8 @@ static int qeth_query_card_info_cb(struct qeth_card *card,
return 0;
}
-static int qeth_query_card_info(struct qeth_card *card,
- struct carrier_info *carrier_info)
+int qeth_query_card_info(struct qeth_card *card,
+ struct carrier_info *carrier_info)
{
struct qeth_cmd_buffer *iob;
@@ -4984,8 +4887,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.output_handler = qeth_qdio_output_handler;
init_data.queue_start_poll_array = queue_start_poll;
init_data.int_parm = (unsigned long) card;
- init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
- init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
+ init_data.input_sbal_addr_array = in_sbal_ptrs;
+ init_data.output_sbal_addr_array = out_sbal_ptrs;
init_data.output_sbal_state_array = card->qdio.out_bufstates;
init_data.scan_threshold =
(card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
@@ -5033,6 +4936,7 @@ static void qeth_core_free_card(struct qeth_card *card)
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
qeth_clean_channel(&card->data);
+ destroy_workqueue(card->event_wq);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL);
@@ -5147,25 +5051,16 @@ retriable:
rc = qeth_send_startlan(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
- if (rc == IPA_RC_LAN_OFFLINE) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
+ if (rc == -ENETDOWN) {
+ dev_warn(&card->gdev->dev, "The LAN is offline\n");
*carrier_ok = false;
} else {
- rc = -ENODEV;
goto out;
}
} else {
*carrier_ok = true;
}
- if (qeth_netdev_is_registered(card->dev)) {
- if (*carrier_ok)
- netif_carrier_on(card->dev);
- else
- netif_carrier_off(card->dev);
- }
-
card->options.ipa4.supported_funcs = 0;
card->options.ipa6.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
@@ -5311,7 +5206,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "unexeob");
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
dev_kfree_skb_any(skb);
- card->stats.rx_errors++;
+ QETH_CARD_STAT_INC(card, rx_errors);
return NULL;
}
element++;
@@ -5323,16 +5218,17 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
}
*__element = element;
*__offset = offset;
- if (use_rx_sg && card->options.performance_stats) {
- card->perf_stats.sg_skbs_rx++;
- card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
+ if (use_rx_sg) {
+ QETH_CARD_STAT_INC(card, rx_sg_skbs);
+ QETH_CARD_STAT_ADD(card, rx_sg_frags,
+ skb_shinfo(skb)->nr_frags);
}
return skb;
no_mem:
if (net_ratelimit()) {
QETH_CARD_TEXT(card, 2, "noskbmem");
}
- card->stats.rx_dropped++;
+ QETH_CARD_STAT_INC(card, rx_dropped);
return NULL;
}
EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
@@ -5345,11 +5241,6 @@ int qeth_poll(struct napi_struct *napi, int budget)
int done;
int new_budget = budget;
- if (card->options.performance_stats) {
- card->perf_stats.inbound_cnt++;
- card->perf_stats.inbound_start_time = qeth_get_micros();
- }
-
while (1) {
if (!card->rx.b_count) {
card->rx.qdio_err = 0;
@@ -5378,8 +5269,7 @@ int qeth_poll(struct napi_struct *napi, int budget)
done = 1;
if (done) {
- if (card->options.performance_stats)
- card->perf_stats.bufs_rec++;
+ QETH_CARD_STAT_INC(card, rx_bufs);
qeth_put_buffer_pool_entry(card,
buffer->pool_entry);
qeth_queue_input_buffer(card, card->rx.b_index);
@@ -5407,9 +5297,6 @@ int qeth_poll(struct napi_struct *napi, int budget)
if (qdio_start_irq(card->data.ccwdev, 0))
napi_schedule(&card->napi);
out:
- if (card->options.performance_stats)
- card->perf_stats.inbound_time += qeth_get_micros() -
- card->perf_stats.inbound_start_time;
return work_done;
}
EXPORT_SYMBOL_GPL(qeth_poll);
@@ -5429,7 +5316,7 @@ static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
struct qeth_ipa_caps *caps = reply->param;
if (qeth_setassparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
caps->supported = cmd->data.setassparms.data.caps.supported;
caps->enabled = cmd->data.setassparms.data.caps.enabled;
@@ -5439,18 +5326,18 @@ static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
int qeth_setassparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT(card, 4, "defadpcb");
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code == 0) {
- cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
- if (cmd->hdr.prot_version == QETH_PROT_IPV4)
- card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
- if (cmd->hdr.prot_version == QETH_PROT_IPV6)
- card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
- }
+ if (cmd->hdr.return_code)
+ return -EIO;
+
+ cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+ if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
@@ -5477,34 +5364,11 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
-static int qeth_send_setassparms(struct qeth_card *card,
- struct qeth_cmd_buffer *iob, u16 len,
- long data, int (*reply_cb)(struct qeth_card *,
- struct qeth_reply *,
- unsigned long),
- void *reply_param)
-{
- int rc;
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 4, "sendassp");
-
- cmd = __ipa_cmd(iob);
- if (len <= sizeof(__u32))
- cmd->data.setassparms.data.flags_32bit = (__u32) data;
- else /* (len > sizeof(__u32)) */
- memcpy(&cmd->data.setassparms.data, (void *) data, len);
-
- rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
- return rc;
-}
-
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
u16 cmd_code, long data,
enum qeth_prot_versions prot)
{
- int rc;
int length = 0;
struct qeth_cmd_buffer *iob;
@@ -5514,9 +5378,9 @@ int qeth_send_simple_setassparms_prot(struct qeth_card *card,
iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
if (!iob)
return -ENOMEM;
- rc = qeth_send_setassparms(card, iob, length, data,
- qeth_setassparms_cb, NULL);
- return rc;
+
+ __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
+ return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
@@ -5719,7 +5583,10 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev);
- if (!IS_OSN(card)) {
+ if (IS_OSN(card)) {
+ dev->ethtool_ops = &qeth_osn_ethtool_ops;
+ } else {
+ dev->ethtool_ops = &qeth_ethtool_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
@@ -5803,9 +5670,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
}
- write_lock_irq(&qeth_core_card_list.rwlock);
- list_add_tail(&card->list, &qeth_core_card_list.list);
- write_unlock_irq(&qeth_core_card_list.rwlock);
return 0;
err_disc:
@@ -5830,9 +5694,6 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
qeth_core_free_discipline(card);
}
- write_lock_irq(&qeth_core_card_list.rwlock);
- list_del(&card->list);
- write_unlock_irq(&qeth_core_card_list.rwlock);
free_netdev(card->dev);
qeth_core_free_card(card);
put_device(&gdev->dev);
@@ -5947,6 +5808,21 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.restore = qeth_core_restore,
};
+struct qeth_card *qeth_get_card_by_busid(char *bus_id)
+{
+ struct ccwgroup_device *gdev;
+ struct qeth_card *card;
+
+ gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
+ if (!gdev)
+ return NULL;
+
+ card = dev_get_drvdata(&gdev->dev);
+ put_device(&gdev->dev);
+ return card;
+}
+EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
+
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct qeth_card *card = dev->ml_priv;
@@ -5956,12 +5832,6 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!card)
return -ENODEV;
- if (!qeth_card_hw_is_reachable(card))
- return -ENODEV;
-
- if (card->info.type == QETH_CARD_TYPE_OSN)
- return -EPERM;
-
switch (cmd) {
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
@@ -6001,454 +5871,91 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
EXPORT_SYMBOL_GPL(qeth_do_ioctl);
-static struct {
- const char str[ETH_GSTRING_LEN];
-} qeth_ethtool_stats_keys[] = {
-/* 0 */{"rx skbs"},
- {"rx buffers"},
- {"tx skbs"},
- {"tx buffers"},
- {"tx skbs no packing"},
- {"tx buffers no packing"},
- {"tx skbs packing"},
- {"tx buffers packing"},
- {"tx sg skbs"},
- {"tx buffer elements"},
-/* 10 */{"rx sg skbs"},
- {"rx sg frags"},
- {"rx sg page allocs"},
- {"tx large kbytes"},
- {"tx large count"},
- {"tx pk state ch n->p"},
- {"tx pk state ch p->n"},
- {"tx pk watermark low"},
- {"tx pk watermark high"},
- {"queue 0 buffer usage"},
-/* 20 */{"queue 1 buffer usage"},
- {"queue 2 buffer usage"},
- {"queue 3 buffer usage"},
- {"rx poll time"},
- {"rx poll count"},
- {"rx do_QDIO time"},
- {"rx do_QDIO count"},
- {"tx handler time"},
- {"tx handler count"},
- {"tx time"},
-/* 30 */{"tx count"},
- {"tx do_QDIO time"},
- {"tx do_QDIO count"},
- {"tx csum"},
- {"tx lin"},
- {"tx linfail"},
- {"cq handler count"},
- {"cq handler time"},
- {"rx csum"}
-};
-
-int qeth_core_get_sset_count(struct net_device *dev, int stringset)
-{
- switch (stringset) {
- case ETH_SS_STATS:
- return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
-
-void qeth_core_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct qeth_card *card = dev->ml_priv;
- data[0] = card->stats.rx_packets -
- card->perf_stats.initial_rx_packets;
- data[1] = card->perf_stats.bufs_rec;
- data[2] = card->stats.tx_packets -
- card->perf_stats.initial_tx_packets;
- data[3] = card->perf_stats.bufs_sent;
- data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
- - card->perf_stats.skbs_sent_pack;
- data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
- data[6] = card->perf_stats.skbs_sent_pack;
- data[7] = card->perf_stats.bufs_sent_pack;
- data[8] = card->perf_stats.sg_skbs_sent;
- data[9] = card->perf_stats.buf_elements_sent;
- data[10] = card->perf_stats.sg_skbs_rx;
- data[11] = card->perf_stats.sg_frags_rx;
- data[12] = card->perf_stats.sg_alloc_page_rx;
- data[13] = (card->perf_stats.large_send_bytes >> 10);
- data[14] = card->perf_stats.large_send_cnt;
- data[15] = card->perf_stats.sc_dp_p;
- data[16] = card->perf_stats.sc_p_dp;
- data[17] = QETH_LOW_WATERMARK_PACK;
- data[18] = QETH_HIGH_WATERMARK_PACK;
- data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
- data[20] = (card->qdio.no_out_queues > 1) ?
- atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
- data[21] = (card->qdio.no_out_queues > 2) ?
- atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
- data[22] = (card->qdio.no_out_queues > 3) ?
- atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
- data[23] = card->perf_stats.inbound_time;
- data[24] = card->perf_stats.inbound_cnt;
- data[25] = card->perf_stats.inbound_do_qdio_time;
- data[26] = card->perf_stats.inbound_do_qdio_cnt;
- data[27] = card->perf_stats.outbound_handler_time;
- data[28] = card->perf_stats.outbound_handler_cnt;
- data[29] = card->perf_stats.outbound_time;
- data[30] = card->perf_stats.outbound_cnt;
- data[31] = card->perf_stats.outbound_do_qdio_time;
- data[32] = card->perf_stats.outbound_do_qdio_cnt;
- data[33] = card->perf_stats.tx_csum;
- data[34] = card->perf_stats.tx_lin;
- data[35] = card->perf_stats.tx_linfail;
- data[36] = card->perf_stats.cq_cnt;
- data[37] = card->perf_stats.cq_time;
- data[38] = card->perf_stats.rx_csum;
-}
-EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
-
-void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
- switch (stringset) {
- case ETH_SS_STATS:
- memcpy(data, &qeth_ethtool_stats_keys,
- sizeof(qeth_ethtool_stats_keys));
- break;
- default:
- WARN_ON(1);
- break;
- }
-}
-EXPORT_SYMBOL_GPL(qeth_core_get_strings);
-
-void qeth_core_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct qeth_card *card = dev->ml_priv;
-
- strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
- sizeof(info->driver));
- strlcpy(info->version, "1.0", sizeof(info->version));
- strlcpy(info->fw_version, card->info.mcl_level,
- sizeof(info->fw_version));
- snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
- CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
-}
-EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
-
-/* Helper function to fill 'advertising' and 'supported' which are the same. */
-/* Autoneg and full-duplex are supported and advertised unconditionally. */
-/* Always advertise and support all speeds up to specified, and only one */
-/* specified port type. */
-static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
- int maxspeed, int porttype)
-{
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
-
- ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
-
- switch (porttype) {
- case PORT_TP:
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
- break;
- case PORT_FIBRE:
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
- break;
- default:
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
- WARN_ON_ONCE(1);
- }
-
- /* partially does fall through, to also select lower speeds */
- switch (maxspeed) {
- case SPEED_25000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 25000baseSR_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 25000baseSR_Full);
- break;
- case SPEED_10000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10000baseT_Full);
- case SPEED_1000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 1000baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 1000baseT_Half);
- case SPEED_100:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 100baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 100baseT_Half);
- case SPEED_10:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Half);
- /* end fallthrough */
- break;
- default:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Half);
- WARN_ON_ONCE(1);
- }
-}
-
-int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct qeth_card *card = netdev->ml_priv;
- enum qeth_link_types link_type;
- struct carrier_info carrier_info;
- int rc;
-
- if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
- link_type = QETH_LINK_TYPE_10GBIT_ETH;
- else
- link_type = card->info.link_type;
-
- cmd->base.duplex = DUPLEX_FULL;
- cmd->base.autoneg = AUTONEG_ENABLE;
- cmd->base.phy_address = 0;
- cmd->base.mdio_support = 0;
- cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
- cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
-
- switch (link_type) {
- case QETH_LINK_TYPE_FAST_ETH:
- case QETH_LINK_TYPE_LANE_ETH100:
- cmd->base.speed = SPEED_100;
- cmd->base.port = PORT_TP;
- break;
- case QETH_LINK_TYPE_GBIT_ETH:
- case QETH_LINK_TYPE_LANE_ETH1000:
- cmd->base.speed = SPEED_1000;
- cmd->base.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_10GBIT_ETH:
- cmd->base.speed = SPEED_10000;
- cmd->base.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_25GBIT_ETH:
- cmd->base.speed = SPEED_25000;
- cmd->base.port = PORT_FIBRE;
- break;
- default:
- cmd->base.speed = SPEED_10;
- cmd->base.port = PORT_TP;
- }
- qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
-
- /* Check if we can obtain more accurate information. */
- /* If QUERY_CARD_INFO command is not supported or fails, */
- /* just return the heuristics that was filled above. */
- if (!qeth_card_hw_is_reachable(card))
- return -ENODEV;
- rc = qeth_query_card_info(card, &carrier_info);
- if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
- return 0;
- if (rc) /* report error from the hardware operation */
- return rc;
- /* on success, fill in the information got from the hardware */
-
- netdev_dbg(netdev,
- "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
- carrier_info.card_type,
- carrier_info.port_mode,
- carrier_info.port_speed);
-
- /* Update attributes for which we've obtained more authoritative */
- /* information, leave the rest the way they where filled above. */
- switch (carrier_info.card_type) {
- case CARD_INFO_TYPE_1G_COPPER_A:
- case CARD_INFO_TYPE_1G_COPPER_B:
- cmd->base.port = PORT_TP;
- qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
- break;
- case CARD_INFO_TYPE_1G_FIBRE_A:
- case CARD_INFO_TYPE_1G_FIBRE_B:
- cmd->base.port = PORT_FIBRE;
- qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
- break;
- case CARD_INFO_TYPE_10G_FIBRE_A:
- case CARD_INFO_TYPE_10G_FIBRE_B:
- cmd->base.port = PORT_FIBRE;
- qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
- break;
- }
-
- switch (carrier_info.port_mode) {
- case CARD_INFO_PORTM_FULLDUPLEX:
- cmd->base.duplex = DUPLEX_FULL;
- break;
- case CARD_INFO_PORTM_HALFDUPLEX:
- cmd->base.duplex = DUPLEX_HALF;
- break;
- }
-
- switch (carrier_info.port_speed) {
- case CARD_INFO_PORTS_10M:
- cmd->base.speed = SPEED_10;
- break;
- case CARD_INFO_PORTS_100M:
- cmd->base.speed = SPEED_100;
- break;
- case CARD_INFO_PORTS_1G:
- cmd->base.speed = SPEED_1000;
- break;
- case CARD_INFO_PORTS_10G:
- cmd->base.speed = SPEED_10000;
- break;
- case CARD_INFO_PORTS_25G:
- cmd->base.speed = SPEED_25000;
- break;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings);
-
-/* Callback to handle checksum offload command reply from OSA card.
- * Verify that required features have been enabled on the card.
- * Return error in hdr->return_code as this value is checked by caller.
- *
- * Always returns zero to indicate no further messages from the OSA card.
- */
-static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
+static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
- struct qeth_checksum_cmd *chksum_cb =
- (struct qeth_checksum_cmd *)reply->param;
+ u32 *features = reply->param;
- QETH_CARD_TEXT(card, 4, "chkdoccb");
if (qeth_setassparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
- memset(chksum_cb, 0, sizeof(*chksum_cb));
- if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
- chksum_cb->supported =
- cmd->data.setassparms.data.chksum.supported;
- QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported);
- }
- if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) {
- chksum_cb->supported =
- cmd->data.setassparms.data.chksum.supported;
- chksum_cb->enabled =
- cmd->data.setassparms.data.chksum.enabled;
- QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported);
- QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled);
- }
+ *features = cmd->data.setassparms.data.flags_32bit;
return 0;
}
-/* Send command to OSA card and check results. */
-static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
- enum qeth_ipa_funcs ipa_func,
- __u16 cmd_code, long data,
- struct qeth_checksum_cmd *chksum_cb,
- enum qeth_prot_versions prot)
+static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
+ enum qeth_prot_versions prot)
{
- struct qeth_cmd_buffer *iob;
- int rc = -ENOMEM;
-
- QETH_CARD_TEXT(card, 4, "chkdocmd");
- iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
- sizeof(__u32), prot);
- if (iob)
- rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
- qeth_ipa_checksum_run_cmd_cb,
- chksum_cb);
- return rc;
+ return qeth_send_simple_setassparms_prot(card, cstype,
+ IPA_CMD_ASS_STOP, 0, prot);
}
-static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
- enum qeth_prot_versions prot)
+static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
+ enum qeth_prot_versions prot)
{
u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
- struct qeth_checksum_cmd chksum_cb;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_caps caps;
+ u32 features;
int rc;
- if (prot == QETH_PROT_IPV4)
+ /* some L3 HW requires combined L3+L4 csum offload: */
+ if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
+ cstype == IPA_OUTBOUND_CHECKSUM)
required_features |= QETH_IPA_CHECKSUM_IP_HDR;
- rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
- &chksum_cb, prot);
- if (!rc) {
- if ((required_features & chksum_cb.supported) !=
- required_features)
- rc = -EIO;
- else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) &&
- cstype == IPA_INBOUND_CHECKSUM)
- dev_warn(&card->gdev->dev,
- "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
- QETH_CARD_IFNAME(card));
- }
- if (rc) {
- qeth_send_simple_setassparms_prot(card, cstype,
- IPA_CMD_ASS_STOP, 0, prot);
- dev_warn(&card->gdev->dev,
- "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n",
- prot, QETH_CARD_IFNAME(card));
+
+ iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
+ prot);
+ if (!iob)
+ return -ENOMEM;
+
+ rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
+ if (rc)
return rc;
+
+ if ((required_features & features) != required_features) {
+ qeth_set_csum_off(card, cstype, prot);
+ return -EOPNOTSUPP;
}
- rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
- chksum_cb.supported, &chksum_cb,
+
+ iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 4,
prot);
- if (!rc) {
- if ((required_features & chksum_cb.enabled) !=
- required_features)
- rc = -EIO;
+ if (!iob) {
+ qeth_set_csum_off(card, cstype, prot);
+ return -ENOMEM;
}
+
+ if (features & QETH_IPA_CHECKSUM_LP2LP)
+ required_features |= QETH_IPA_CHECKSUM_LP2LP;
+ __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
+ rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
if (rc) {
- qeth_send_simple_setassparms_prot(card, cstype,
- IPA_CMD_ASS_STOP, 0, prot);
- dev_warn(&card->gdev->dev,
- "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n",
- prot, QETH_CARD_IFNAME(card));
+ qeth_set_csum_off(card, cstype, prot);
return rc;
}
+ if (!qeth_ipa_caps_supported(&caps, required_features) ||
+ !qeth_ipa_caps_enabled(&caps, required_features)) {
+ qeth_set_csum_off(card, cstype, prot);
+ return -EOPNOTSUPP;
+ }
+
dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
+ if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
+ cstype == IPA_OUTBOUND_CHECKSUM)
+ dev_warn(&card->gdev->dev,
+ "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
+ QETH_CARD_IFNAME(card));
return 0;
}
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
enum qeth_prot_versions prot)
{
- int rc = (on) ? qeth_send_checksum_on(card, cstype, prot)
- : qeth_send_simple_setassparms_prot(card, cstype,
- IPA_CMD_ASS_STOP, 0,
- prot);
- return rc ? -EIO : 0;
+ return on ? qeth_set_csum_on(card, cstype, prot) :
+ qeth_set_csum_off(card, cstype, prot);
}
static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -6458,7 +5965,7 @@ static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
struct qeth_tso_start_data *tso_data = reply->param;
if (qeth_setassparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
tso_data->mss = cmd->data.setassparms.data.tso.mss;
tso_data->supported = cmd->data.setassparms.data.tso.supported;
@@ -6485,8 +5992,7 @@ static int qeth_set_tso_on(struct qeth_card *card,
if (!iob)
return -ENOMEM;
- rc = qeth_send_setassparms(card, iob, 0, 0 /* unused */,
- qeth_start_tso_cb, &tso_data);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
if (rc)
return rc;
@@ -6503,10 +6009,9 @@ static int qeth_set_tso_on(struct qeth_card *card,
}
/* enable TSO capability */
- caps.supported = 0;
- caps.enabled = QETH_IPA_LARGE_SEND_TCP;
- rc = qeth_send_setassparms(card, iob, sizeof(caps), (long) &caps,
- qeth_setassparms_get_caps_cb, &caps);
+ __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
+ QETH_IPA_LARGE_SEND_TCP;
+ rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
if (rc) {
qeth_set_tso_off(card, prot);
return rc;
@@ -6526,10 +6031,7 @@ static int qeth_set_tso_on(struct qeth_card *card,
static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
enum qeth_prot_versions prot)
{
- int rc = on ? qeth_set_tso_on(card, prot) :
- qeth_set_tso_off(card, prot);
-
- return rc ? -EIO : 0;
+ return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
}
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
@@ -6555,8 +6057,6 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
}
-#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
- NETIF_F_IPV6_CSUM | NETIF_F_TSO6)
/**
* qeth_enable_hw_features() - (Re-)Enable HW functions for device features
* @dev: a net_device
@@ -6566,17 +6066,20 @@ void qeth_enable_hw_features(struct net_device *dev)
struct qeth_card *card = dev->ml_priv;
netdev_features_t features;
- rtnl_lock();
features = dev->features;
- /* force-off any feature that needs an IPA sequence.
+ /* force-off any feature that might need an IPA sequence.
* netdev_update_features() will restart them.
*/
- dev->features &= ~QETH_HW_FEATURES;
+ dev->features &= ~dev->hw_features;
+ /* toggle VLAN filter, so that VIDs are re-programmed: */
+ if (IS_LAYER2(card) && IS_VM_NIC(card)) {
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
netdev_update_features(dev);
if (features != dev->features)
dev_warn(&card->gdev->dev,
"Device recovery failed to restore all offload features\n");
- rtnl_unlock();
}
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
@@ -6645,10 +6148,7 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
features &= ~NETIF_F_TSO;
if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO6;
- /* if the card isn't up, remove features that require hw changes */
- if (card->state == CARD_STATE_DOWN ||
- card->state == CARD_STATE_RECOVER)
- features &= ~QETH_HW_FEATURES;
+
QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
return features;
}
@@ -6680,19 +6180,69 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(qeth_features_check);
+void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct qeth_card *card = dev->ml_priv;
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
+
+ QETH_CARD_TEXT(card, 5, "getstat");
+
+ stats->rx_packets = card->stats.rx_packets;
+ stats->rx_bytes = card->stats.rx_bytes;
+ stats->rx_errors = card->stats.rx_errors;
+ stats->rx_dropped = card->stats.rx_dropped;
+ stats->multicast = card->stats.rx_multicast;
+ stats->tx_errors = card->stats.tx_errors;
+
+ for (i = 0; i < card->qdio.no_out_queues; i++) {
+ queue = card->qdio.out_qs[i];
+
+ stats->tx_packets += queue->stats.tx_packets;
+ stats->tx_bytes += queue->stats.tx_bytes;
+ stats->tx_errors += queue->stats.tx_errors;
+ stats->tx_dropped += queue->stats.tx_dropped;
+ }
+}
+EXPORT_SYMBOL_GPL(qeth_get_stats64);
+
+int qeth_open(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_CARD_TEXT(card, 4, "qethopen");
+
+ if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
+ return -EIO;
+
+ card->data.state = CH_STATE_UP;
+ netif_start_queue(dev);
+
+ napi_enable(&card->napi);
+ local_bh_disable();
+ napi_schedule(&card->napi);
+ /* kick-start the NAPI softirq: */
+ local_bh_enable();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_open);
+
+int qeth_stop(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_CARD_TEXT(card, 4, "qethstop");
+ netif_tx_disable(dev);
+ napi_disable(&card->napi);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_stop);
+
static int __init qeth_core_init(void)
{
int rc;
pr_info("loading core functions\n");
- INIT_LIST_HEAD(&qeth_core_card_list.list);
- rwlock_init(&qeth_core_card_list.rwlock);
-
- qeth_wq = create_singlethread_workqueue("qeth_wq");
- if (!qeth_wq) {
- rc = -ENOMEM;
- goto out_err;
- }
rc = qeth_register_dbf_views();
if (rc)
@@ -6735,8 +6285,6 @@ slab_err:
register_err:
qeth_unregister_dbf_views();
dbf_err:
- destroy_workqueue(qeth_wq);
-out_err:
pr_err("Initializing the qeth device driver failed\n");
return rc;
}
@@ -6744,7 +6292,6 @@ out_err:
static void __exit qeth_core_exit(void)
{
qeth_clear_dbf_list();
- destroy_workqueue(qeth_wq);
ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
ccw_driver_unregister(&qeth_ccw_driver);
kmem_cache_destroy(qeth_qdio_outbuf_cache);
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index e891c0b52f4c..e3f4866c158e 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -125,26 +125,14 @@ unsigned char DM_ACT[] = {
unsigned char IPA_PDU_HEADER[] = {
0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77,
- 0x00, 0x00, 0x00, 0x14, 0x00, 0x00,
- (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256,
- (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00,
0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x24,
- sizeof(struct qeth_ipa_cmd) / 256,
- sizeof(struct qeth_ipa_cmd) % 256,
- 0x00,
- sizeof(struct qeth_ipa_cmd) / 256,
- sizeof(struct qeth_ipa_cmd) % 256,
- 0x05,
- 0x77, 0x77, 0x77, 0x77,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x05, 0x77, 0x77, 0x77, 0x77,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x00,
- sizeof(struct qeth_ipa_cmd) / 256,
- sizeof(struct qeth_ipa_cmd) % 256,
- 0x00, 0x00, 0x00, 0x40,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,
};
-EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
struct ipa_rc_msg {
enum qeth_ipa_return_codes rc;
@@ -213,12 +201,10 @@ static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
{IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"},
{IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
- {IPA_RC_ENOMEM, "Memory problem"},
+ /* default for qeth_get_ipa_msg(): */
{IPA_RC_FFFF, "Unknown Error"}
};
-
-
const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
{
int x;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 3e54be201b27..f8c5d4a9be13 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -21,8 +21,6 @@
extern unsigned char IPA_PDU_HEADER[];
#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c)
-#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
-
#define QETH_SEQ_NO_LENGTH 4
#define QETH_MPC_TOKEN_LENGTH 4
#define QETH_MCL_LENGTH 4
@@ -80,7 +78,11 @@ enum qeth_card_types {
};
#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD)
+#define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD)
+#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM)
#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
+#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX)
+#define IS_VM_NIC(card) ((card)->info.guestlan)
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
/* only the first two bytes are looked at in qeth_get_cardname_short */
@@ -228,7 +230,6 @@ enum qeth_ipa_return_codes {
IPA_RC_LAN_OFFLINE = 0xe080,
IPA_RC_VEPA_TO_VEB_TRANSITION = 0xe090,
IPA_RC_INVALID_IP_VERSION2 = 0xf001,
- IPA_RC_ENOMEM = 0xfffe,
IPA_RC_FFFF = 0xffff
};
/* for VNIC Characteristics */
@@ -416,12 +417,6 @@ enum qeth_ipa_checksum_bits {
QETH_IPA_CHECKSUM_LP2LP = 0x0020
};
-/* IPA Assist checksum offload reply layout. */
-struct qeth_checksum_cmd {
- __u32 supported;
- __u32 enabled;
-} __packed;
-
enum qeth_ipa_large_send_caps {
QETH_IPA_LARGE_SEND_TCP = 0x00000001,
};
@@ -437,7 +432,6 @@ struct qeth_ipacmd_setassparms {
union {
__u32 flags_32bit;
struct qeth_ipa_caps caps;
- struct qeth_checksum_cmd chksum;
struct qeth_arp_cache_entry arp_entry;
struct qeth_arp_query_data query_arp;
struct qeth_tso_start_data tso;
@@ -529,17 +523,20 @@ struct qeth_query_switch_attributes {
__u8 reserved3[8];
};
+#define QETH_SETADP_FLAGS_VIRTUAL_MAC 0x80 /* for CHANGE_ADDR_READ_MAC */
+
struct qeth_ipacmd_setadpparms_hdr {
- __u32 supp_hw_cmds;
- __u32 reserved1;
- __u16 cmdlength;
- __u16 reserved2;
- __u32 command_code;
- __u16 return_code;
- __u8 used_total;
- __u8 seq_no;
- __u32 reserved3;
-} __attribute__ ((packed));
+ u32 supp_hw_cmds;
+ u32 reserved1;
+ u16 cmdlength;
+ u16 reserved2;
+ u32 command_code;
+ u16 return_code;
+ u8 used_total;
+ u8 seq_no;
+ u8 flags;
+ u8 reserved3[3];
+};
struct qeth_ipacmd_setadpparms {
struct qeth_ipacmd_setadpparms_hdr hdr;
@@ -828,16 +825,10 @@ enum qeth_ipa_arp_return_codes {
extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
-#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
- sizeof(struct qeth_ipacmd_setassparms_hdr))
-#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \
- QETH_SETASS_BASE_LEN)
#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
sizeof(struct qeth_ipacmd_setadpparms_hdr))
#define QETH_SNMP_SETADP_CMDLENGTH 16
-#define QETH_ARP_DATA_SIZE 3968
-#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
/* Helper functions */
#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
(cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 30f61608fa22..56deeb6f7bc0 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -29,13 +29,11 @@ static ssize_t qeth_dev_state_show(struct device *dev,
case CARD_STATE_HARDSETUP:
return sprintf(buf, "HARDSETUP\n");
case CARD_STATE_SOFTSETUP:
+ if (card->dev->flags & IFF_UP)
+ return sprintf(buf, "UP (LAN %s)\n",
+ netif_carrier_ok(card->dev) ? "ONLINE" :
+ "OFFLINE");
return sprintf(buf, "SOFTSETUP\n");
- case CARD_STATE_UP:
- return sprintf(buf, "UP (LAN %s)\n",
- netif_carrier_ok(card->dev) ? "ONLINE" :
- "OFFLINE");
- case CARD_STATE_RECOVER:
- return sprintf(buf, "RECOVER\n");
default:
return sprintf(buf, "UNKNOWN\n");
}
@@ -126,8 +124,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
@@ -202,8 +199,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
@@ -285,8 +281,7 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
@@ -316,7 +311,7 @@ static ssize_t qeth_dev_recover_store(struct device *dev,
if (!card)
return -EINVAL;
- if (card->state != CARD_STATE_UP)
+ if (!qeth_card_hw_is_reachable(card))
return -EPERM;
i = simple_strtoul(buf, &tmp, 16);
@@ -336,35 +331,36 @@ static ssize_t qeth_dev_performance_stats_show(struct device *dev,
if (!card)
return -EINVAL;
- return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
+ return sprintf(buf, "1\n");
}
static ssize_t qeth_dev_performance_stats_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
- int i, rc = 0;
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
+ bool reset;
+ int rc;
if (!card)
return -EINVAL;
- mutex_lock(&card->conf_mutex);
- i = simple_strtoul(buf, &tmp, 16);
- if ((i == 0) || (i == 1)) {
- if (i == card->options.performance_stats)
- goto out;
- card->options.performance_stats = i;
- if (i == 0)
- memset(&card->perf_stats, 0,
- sizeof(struct qeth_perf_stats));
- card->perf_stats.initial_rx_packets = card->stats.rx_packets;
- card->perf_stats.initial_tx_packets = card->stats.tx_packets;
- } else
- rc = -EINVAL;
-out:
- mutex_unlock(&card->conf_mutex);
- return rc ? rc : count;
+ rc = kstrtobool(buf, &reset);
+ if (rc)
+ return rc;
+
+ if (reset) {
+ memset(&card->stats, 0, sizeof(card->stats));
+ for (i = 0; i < card->qdio.no_out_queues; i++) {
+ queue = card->qdio.out_qs[i];
+ if (!queue)
+ break;
+ memset(&queue->stats, 0, sizeof(queue->stats));
+ }
+ }
+
+ return count;
}
static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
@@ -420,7 +416,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
goto out;
}
- card->info.mac_bits = 0;
if (card->discipline) {
/* start with a new, pristine netdevice: */
ndev = qeth_clone_netdev(card->dev);
@@ -633,8 +628,7 @@ static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
new file mode 100644
index 000000000000..93a53fed4cf8
--- /dev/null
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2018
+ */
+
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/ethtool.h>
+#include "qeth_core.h"
+
+
+#define QETH_TXQ_STAT(_name, _stat) { \
+ .name = _name, \
+ .offset = offsetof(struct qeth_out_q_stats, _stat) \
+}
+
+#define QETH_CARD_STAT(_name, _stat) { \
+ .name = _name, \
+ .offset = offsetof(struct qeth_card_stats, _stat) \
+}
+
+struct qeth_stats {
+ char name[ETH_GSTRING_LEN];
+ unsigned int offset;
+};
+
+static const struct qeth_stats txq_stats[] = {
+ QETH_TXQ_STAT("IO buffers", bufs),
+ QETH_TXQ_STAT("IO buffer elements", buf_elements),
+ QETH_TXQ_STAT("packed IO buffers", bufs_pack),
+ QETH_TXQ_STAT("skbs", tx_packets),
+ QETH_TXQ_STAT("packed skbs", skbs_pack),
+ QETH_TXQ_STAT("SG skbs", skbs_sg),
+ QETH_TXQ_STAT("HW csum skbs", skbs_csum),
+ QETH_TXQ_STAT("TSO skbs", skbs_tso),
+ QETH_TXQ_STAT("linearized skbs", skbs_linearized),
+ QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail),
+ QETH_TXQ_STAT("TSO bytes", tso_bytes),
+ QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
+};
+
+static const struct qeth_stats card_stats[] = {
+ QETH_CARD_STAT("rx0 IO buffers", rx_bufs),
+ QETH_CARD_STAT("rx0 HW csum skbs", rx_skb_csum),
+ QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs),
+ QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags),
+ QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page),
+};
+
+#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats)
+#define CARD_STATS_LEN ARRAY_SIZE(card_stats)
+
+static void qeth_add_stat_data(u64 **dst, void *src,
+ const struct qeth_stats stats[],
+ unsigned int size)
+{
+ unsigned int i;
+ char *stat;
+
+ for (i = 0; i < size; i++) {
+ stat = (char *)src + stats[i].offset;
+ **dst = *(u64 *)stat;
+ (*dst)++;
+ }
+}
+
+static void qeth_add_stat_strings(u8 **data, const char *prefix,
+ const struct qeth_stats stats[],
+ unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ snprintf(*data, ETH_GSTRING_LEN, "%s%s", prefix, stats[i].name);
+ *data += ETH_GSTRING_LEN;
+ }
+}
+
+static int qeth_get_sset_count(struct net_device *dev, int stringset)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ return CARD_STATS_LEN +
+ card->qdio.no_out_queues * TXQ_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void qeth_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct qeth_card *card = dev->ml_priv;
+ unsigned int i;
+
+ qeth_add_stat_data(&data, &card->stats, card_stats, CARD_STATS_LEN);
+ for (i = 0; i < card->qdio.no_out_queues; i++)
+ qeth_add_stat_data(&data, &card->qdio.out_qs[i]->stats,
+ txq_stats, TXQ_STATS_LEN);
+}
+
+static void qeth_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ param->rx_max_pending = QDIO_MAX_BUFFERS_PER_Q;
+ param->rx_mini_max_pending = 0;
+ param->rx_jumbo_max_pending = 0;
+ param->tx_max_pending = QDIO_MAX_BUFFERS_PER_Q;
+
+ param->rx_pending = card->qdio.in_buf_pool.buf_count;
+ param->rx_mini_pending = 0;
+ param->rx_jumbo_pending = 0;
+ param->tx_pending = QDIO_MAX_BUFFERS_PER_Q;
+}
+
+static void qeth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ struct qeth_card *card = dev->ml_priv;
+ char prefix[ETH_GSTRING_LEN] = "";
+ unsigned int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ qeth_add_stat_strings(&data, prefix, card_stats,
+ CARD_STATS_LEN);
+ for (i = 0; i < card->qdio.no_out_queues; i++) {
+ snprintf(prefix, ETH_GSTRING_LEN, "tx%u ", i);
+ qeth_add_stat_strings(&data, prefix, txq_stats,
+ TXQ_STATS_LEN);
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void qeth_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
+ sizeof(info->driver));
+ strlcpy(info->version, "1.0", sizeof(info->version));
+ strlcpy(info->fw_version, card->info.mcl_level,
+ sizeof(info->fw_version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
+ CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
+}
+
+/* Helper function to fill 'advertising' and 'supported' which are the same. */
+/* Autoneg and full-duplex are supported and advertised unconditionally. */
+/* Always advertise and support all speeds up to specified, and only one */
+/* specified port type. */
+static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
+ int maxspeed, int porttype)
+{
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+
+ switch (porttype) {
+ case PORT_TP:
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+ break;
+ case PORT_FIBRE:
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+ break;
+ default:
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+ WARN_ON_ONCE(1);
+ }
+
+ /* partially does fall through, to also select lower speeds */
+ switch (maxspeed) {
+ case SPEED_25000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 25000baseSR_Full);
+ break;
+ case SPEED_10000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10000baseT_Full);
+ /* fall through */
+ case SPEED_1000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Half);
+ /* fall through */
+ case SPEED_100:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Half);
+ /* fall through */
+ case SPEED_10:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Half);
+ break;
+ default:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Half);
+ WARN_ON_ONCE(1);
+ }
+}
+
+
+static int qeth_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct qeth_card *card = netdev->ml_priv;
+ enum qeth_link_types link_type;
+ struct carrier_info carrier_info;
+ int rc;
+
+ if (IS_IQD(card) || IS_VM_NIC(card))
+ link_type = QETH_LINK_TYPE_10GBIT_ETH;
+ else
+ link_type = card->info.link_type;
+
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ cmd->base.phy_address = 0;
+ cmd->base.mdio_support = 0;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
+ switch (link_type) {
+ case QETH_LINK_TYPE_FAST_ETH:
+ case QETH_LINK_TYPE_LANE_ETH100:
+ cmd->base.speed = SPEED_100;
+ cmd->base.port = PORT_TP;
+ break;
+ case QETH_LINK_TYPE_GBIT_ETH:
+ case QETH_LINK_TYPE_LANE_ETH1000:
+ cmd->base.speed = SPEED_1000;
+ cmd->base.port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_10GBIT_ETH:
+ cmd->base.speed = SPEED_10000;
+ cmd->base.port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ cmd->base.speed = SPEED_25000;
+ cmd->base.port = PORT_FIBRE;
+ break;
+ default:
+ cmd->base.speed = SPEED_10;
+ cmd->base.port = PORT_TP;
+ }
+ qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
+
+ /* Check if we can obtain more accurate information. */
+ /* If QUERY_CARD_INFO command is not supported or fails, */
+ /* just return the heuristics that was filled above. */
+ rc = qeth_query_card_info(card, &carrier_info);
+ if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
+ return 0;
+ if (rc) /* report error from the hardware operation */
+ return rc;
+ /* on success, fill in the information got from the hardware */
+
+ netdev_dbg(netdev,
+ "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
+ carrier_info.card_type,
+ carrier_info.port_mode,
+ carrier_info.port_speed);
+
+ /* Update attributes for which we've obtained more authoritative */
+ /* information, leave the rest the way they where filled above. */
+ switch (carrier_info.card_type) {
+ case CARD_INFO_TYPE_1G_COPPER_A:
+ case CARD_INFO_TYPE_1G_COPPER_B:
+ cmd->base.port = PORT_TP;
+ qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
+ break;
+ case CARD_INFO_TYPE_1G_FIBRE_A:
+ case CARD_INFO_TYPE_1G_FIBRE_B:
+ cmd->base.port = PORT_FIBRE;
+ qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
+ break;
+ case CARD_INFO_TYPE_10G_FIBRE_A:
+ case CARD_INFO_TYPE_10G_FIBRE_B:
+ cmd->base.port = PORT_FIBRE;
+ qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
+ break;
+ }
+
+ switch (carrier_info.port_mode) {
+ case CARD_INFO_PORTM_FULLDUPLEX:
+ cmd->base.duplex = DUPLEX_FULL;
+ break;
+ case CARD_INFO_PORTM_HALFDUPLEX:
+ cmd->base.duplex = DUPLEX_HALF;
+ break;
+ }
+
+ switch (carrier_info.port_speed) {
+ case CARD_INFO_PORTS_10M:
+ cmd->base.speed = SPEED_10;
+ break;
+ case CARD_INFO_PORTS_100M:
+ cmd->base.speed = SPEED_100;
+ break;
+ case CARD_INFO_PORTS_1G:
+ cmd->base.speed = SPEED_1000;
+ break;
+ case CARD_INFO_PORTS_10G:
+ cmd->base.speed = SPEED_10000;
+ break;
+ case CARD_INFO_PORTS_25G:
+ cmd->base.speed = SPEED_25000;
+ break;
+ }
+
+ return 0;
+}
+
+const struct ethtool_ops qeth_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = qeth_get_ringparam,
+ .get_strings = qeth_get_strings,
+ .get_ethtool_stats = qeth_get_ethtool_stats,
+ .get_sset_count = qeth_get_sset_count,
+ .get_drvinfo = qeth_get_drvinfo,
+ .get_link_ksettings = qeth_get_link_ksettings,
+};
+
+const struct ethtool_ops qeth_osn_ethtool_ops = {
+ .get_strings = qeth_get_strings,
+ .get_ethtool_stats = qeth_get_ethtool_stats,
+ .get_sset_count = qeth_get_sset_count,
+ .get_drvinfo = qeth_get_drvinfo,
+};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2914a1a69f83..c3067fd3bd9e 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -25,7 +25,6 @@
#include "qeth_l2.h"
static int qeth_l2_set_offline(struct ccwgroup_device *);
-static int qeth_l2_stop(struct net_device *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -36,29 +35,7 @@ static void qeth_l2_vnicc_init(struct qeth_card *card);
static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
u32 *timeout);
-static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
-{
- struct qeth_card *card;
- struct net_device *ndev;
- __u16 temp_dev_no;
- unsigned long flags;
- struct ccw_dev_id read_devid;
-
- ndev = NULL;
- memcpy(&temp_dev_no, read_dev_no, 2);
- read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
- list_for_each_entry(card, &qeth_core_card_list.list, list) {
- ccw_device_get_id(CARD_RDEV(card), &read_devid);
- if (read_devid.devno == temp_dev_no) {
- ndev = card->dev;
- break;
- }
- }
- read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
- return ndev;
-}
-
-static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode)
+static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
{
int rc;
@@ -85,9 +62,6 @@ static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode)
case IPA_RC_L2_MAC_NOT_FOUND:
rc = -ENOENT;
break;
- case -ENOMEM:
- rc = -ENOMEM;
- break;
default:
rc = -EIO;
break;
@@ -95,6 +69,15 @@ static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode)
return rc;
}
+static int qeth_l2_send_setdelmac_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+ return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code);
+}
+
static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
enum qeth_ipa_cmds ipacmd)
{
@@ -108,8 +91,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
cmd = __ipa_cmd(iob);
cmd->data.setdelmac.mac_length = ETH_ALEN;
ether_addr_copy(cmd->data.setdelmac.mac, mac);
- return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob,
- NULL, NULL));
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL);
}
static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
@@ -120,8 +102,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
if (rc == 0) {
dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- mac, card->dev->name);
+ "MAC address %pM successfully registered\n", mac);
} else {
switch (rc) {
case -EEXIST:
@@ -193,9 +174,9 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
return RTN_UNICAST;
}
-static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type,
- unsigned int data_len)
+static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
+ struct qeth_hdr *hdr, struct sk_buff *skb,
+ int ipv, int cast_type, unsigned int data_len)
{
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
@@ -207,8 +188,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
+ QETH_TXQ_STAT_INC(queue, skbs_csum);
}
}
@@ -229,7 +209,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
}
}
-static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode)
+static int qeth_l2_setdelvlan_makerc(struct qeth_card *card, u16 retcode)
{
if (retcode)
QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
@@ -245,8 +225,6 @@ static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode)
return -ENOENT;
case IPA_RC_L2_VLAN_ID_NOT_ALLOWED:
return -EPERM;
- case -ENOMEM:
- return -ENOMEM;
default:
return -EIO;
}
@@ -264,9 +242,8 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
cmd->data.setdelvlan.vlan_id,
CARD_DEVID(card), cmd->hdr.return_code);
QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
- QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
}
- return 0;
+ return qeth_l2_setdelvlan_makerc(card, cmd->hdr.return_code);
}
static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
@@ -281,101 +258,40 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.setdelvlan.vlan_id = i;
- return qeth_setdelvlan_makerc(card, qeth_send_ipa_cmd(card, iob,
- qeth_l2_send_setdelvlan_cb, NULL));
-}
-
-static void qeth_l2_process_vlans(struct qeth_card *card)
-{
- struct qeth_vlan_vid *id;
-
- QETH_CARD_TEXT(card, 3, "L2prcvln");
- mutex_lock(&card->vid_list_mutex);
- list_for_each_entry(id, &card->vid_list, list) {
- qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
- }
- mutex_unlock(&card->vid_list_mutex);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelvlan_cb, NULL);
}
static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct qeth_card *card = dev->ml_priv;
- struct qeth_vlan_vid *id;
- int rc;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
return 0;
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "aidREC");
- return 0;
- }
- id = kmalloc(sizeof(*id), GFP_KERNEL);
- if (id) {
- id->vid = vid;
- rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
- if (rc) {
- kfree(id);
- return rc;
- }
- mutex_lock(&card->vid_list_mutex);
- list_add_tail(&id->list, &card->vid_list);
- mutex_unlock(&card->vid_list_mutex);
- } else {
- return -ENOMEM;
- }
- return 0;
+
+ return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
}
static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
- struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
- int rc = 0;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "kidREC");
+ if (!vid)
return 0;
- }
- mutex_lock(&card->vid_list_mutex);
- list_for_each_entry(id, &card->vid_list, list) {
- if (id->vid == vid) {
- list_del(&id->list);
- tmpid = id;
- break;
- }
- }
- mutex_unlock(&card->vid_list_mutex);
- if (tmpid) {
- rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
- kfree(tmpid);
- }
- return rc;
+
+ return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
}
-static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
+static void qeth_l2_stop_card(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP , 2, "stopcard");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_set_allowed_threads(card, 0, 1);
- if (card->read.state == CH_STATE_UP &&
- card->write.state == CH_STATE_UP &&
- (card->state == CARD_STATE_UP)) {
- if (recovery_mode &&
- card->info.type != QETH_CARD_TYPE_OSN) {
- qeth_l2_stop(card->dev);
- } else {
- rtnl_lock();
- dev_close(card->dev);
- rtnl_unlock();
- }
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- card->state = CARD_STATE_SOFTSETUP;
- }
+
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_l2_del_all_macs(card);
qeth_clear_ipacmd_list(card);
@@ -391,6 +307,9 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
qeth_clear_cmd_buffers(&card->read);
qeth_clear_cmd_buffers(&card->write);
}
+
+ flush_workqueue(card->event_wq);
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
}
static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
@@ -438,8 +357,8 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
}
work_done++;
budget--;
- card->stats.rx_packets++;
- card->stats.rx_bytes += len;
+ QETH_CARD_STAT_INC(card, rx_packets);
+ QETH_CARD_STAT_ADD(card, rx_bytes, len);
}
return work_done;
}
@@ -461,10 +380,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
/* fall back to alternative mechanism: */
}
- if (card->info.type == QETH_CARD_TYPE_IQD ||
- card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSX ||
- card->info.guestlan) {
+ if (!IS_OSN(card)) {
rc = qeth_setadpparms_change_macaddr(card);
if (!rc)
goto out;
@@ -485,6 +401,26 @@ out:
return 0;
}
+static void qeth_l2_register_dev_addr(struct qeth_card *card)
+{
+ if (!is_valid_ether_addr(card->dev->dev_addr))
+ qeth_l2_request_initial_mac(card);
+
+ if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr))
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+}
+
+static int qeth_l2_validate_addr(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+ return eth_validate_addr(dev);
+
+ QETH_CARD_TEXT(card, 4, "nomacadr");
+ return -EPERM;
+}
+
static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
@@ -494,9 +430,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
QETH_CARD_TEXT(card, 3, "setmac");
- if (card->info.type == QETH_CARD_TYPE_OSN ||
- card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSX) {
+ if (IS_OSM(card) || IS_OSX(card)) {
QETH_CARD_TEXT(card, 3, "setmcTYP");
return -EOPNOTSUPP;
}
@@ -504,39 +438,22 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "setmcREC");
- return -ERESTARTSYS;
- }
-
- /* avoid racing against concurrent state change: */
- if (!mutex_trylock(&card->conf_mutex))
- return -EAGAIN;
-
- if (!qeth_card_hw_is_reachable(card)) {
- ether_addr_copy(dev->dev_addr, addr->sa_data);
- goto out_unlock;
- }
-
/* don't register the same address twice */
if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
- goto out_unlock;
+ return 0;
/* add the new address, switch over, drop the old */
rc = qeth_l2_send_setmac(card, addr->sa_data);
if (rc)
- goto out_unlock;
+ return rc;
ether_addr_copy(old_addr, dev->dev_addr);
ether_addr_copy(dev->dev_addr, addr->sa_data);
if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
qeth_l2_remove_mac(card, old_addr);
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-
-out_unlock:
- mutex_unlock(&card->conf_mutex);
- return rc;
+ return 0;
}
static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -607,13 +524,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
int i;
int rc;
- if (card->info.type == QETH_CARD_TYPE_OSN)
- return;
-
QETH_CARD_TEXT(card, 3, "setmulti");
- if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
- (card->state != CARD_STATE_UP))
- return;
spin_lock_bh(&card->mclock);
@@ -698,17 +609,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
int tx_bytes = skb->len;
int rc;
- if (card->state != CARD_STATE_UP) {
- card->stats.tx_carrier_errors++;
- goto tx_drop;
- }
-
queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
- if (card->options.performance_stats) {
- card->perf_stats.outbound_cnt++;
- card->perf_stats.outbound_start_time = qeth_get_micros();
- }
netif_stop_queue(dev);
if (IS_OSN(card))
@@ -718,81 +620,20 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
qeth_l2_fill_header);
if (!rc) {
- card->stats.tx_packets++;
- card->stats.tx_bytes += tx_bytes;
- if (card->options.performance_stats)
- card->perf_stats.outbound_time += qeth_get_micros() -
- card->perf_stats.outbound_start_time;
+ QETH_TXQ_STAT_INC(queue, tx_packets);
+ QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
return NETDEV_TX_BUSY;
} /* else fall through */
-tx_drop:
- card->stats.tx_dropped++;
- card->stats.tx_errors++;
- dev_kfree_skb_any(skb);
+ QETH_TXQ_STAT_INC(queue, tx_dropped);
+ kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
-static int __qeth_l2_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
- int rc = 0;
-
- QETH_CARD_TEXT(card, 4, "qethopen");
- if (card->state == CARD_STATE_UP)
- return rc;
- if (card->state != CARD_STATE_SOFTSETUP)
- return -ENODEV;
-
- if ((card->info.type != QETH_CARD_TYPE_OSN) &&
- (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
- QETH_CARD_TEXT(card, 4, "nomacadr");
- return -EPERM;
- }
- card->data.state = CH_STATE_UP;
- card->state = CARD_STATE_UP;
- netif_start_queue(dev);
-
- if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
- napi_enable(&card->napi);
- local_bh_disable();
- napi_schedule(&card->napi);
- /* kick-start the NAPI softirq: */
- local_bh_enable();
- } else
- rc = -EIO;
- return rc;
-}
-
-static int qeth_l2_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 5, "qethope_");
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "openREC");
- return -ERESTARTSYS;
- }
- return __qeth_l2_open(dev);
-}
-
-static int qeth_l2_stop(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 4, "qethstop");
- netif_tx_disable(dev);
- if (card->state == CARD_STATE_UP) {
- card->state = CARD_STATE_SOFTSETUP;
- napi_disable(&card->napi);
- }
- return 0;
-}
-
static const struct device_type qeth_l2_devtype = {
.name = "qeth_layer2",
.groups = qeth_l2_attr_groups,
@@ -803,15 +644,15 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
+ qeth_l2_vnicc_set_defaults(card);
+
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l2_create_device_attributes(&gdev->dev);
if (rc)
return rc;
}
- INIT_LIST_HEAD(&card->vid_list);
+
hash_init(card->mac_htable);
- card->info.hwtrap = 0;
- qeth_l2_vnicc_set_defaults(card);
return 0;
}
@@ -826,33 +667,19 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l2_set_offline(cgdev);
+
+ cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
}
-static const struct ethtool_ops qeth_l2_ethtool_ops = {
- .get_link = ethtool_op_get_link,
- .get_strings = qeth_core_get_strings,
- .get_ethtool_stats = qeth_core_get_ethtool_stats,
- .get_sset_count = qeth_core_get_sset_count,
- .get_drvinfo = qeth_core_get_drvinfo,
- .get_link_ksettings = qeth_core_ethtool_get_link_ksettings,
-};
-
-static const struct ethtool_ops qeth_l2_osn_ops = {
- .get_strings = qeth_core_get_strings,
- .get_ethtool_stats = qeth_core_get_ethtool_stats,
- .get_sset_count = qeth_core_get_sset_count,
- .get_drvinfo = qeth_core_get_drvinfo,
-};
-
static const struct net_device_ops qeth_l2_netdev_ops = {
- .ndo_open = qeth_l2_open,
- .ndo_stop = qeth_l2_stop,
- .ndo_get_stats = qeth_get_stats,
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
+ .ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
- .ndo_validate_addr = eth_validate_addr,
+ .ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
.ndo_set_mac_address = qeth_l2_set_mac_address,
@@ -863,27 +690,36 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_set_features = qeth_set_features
};
+static const struct net_device_ops qeth_osn_netdev_ops = {
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
+ .ndo_get_stats64 = qeth_get_stats64,
+ .ndo_start_xmit = qeth_l2_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = qeth_tx_timeout,
+};
+
static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
{
int rc;
- if (qeth_netdev_is_registered(card->dev))
- return 0;
-
- card->dev->priv_flags |= IFF_UNICAST_FLT;
- card->dev->netdev_ops = &qeth_l2_netdev_ops;
- if (card->info.type == QETH_CARD_TYPE_OSN) {
- card->dev->ethtool_ops = &qeth_l2_osn_ops;
+ if (IS_OSN(card)) {
+ card->dev->netdev_ops = &qeth_osn_netdev_ops;
card->dev->flags |= IFF_NOARP;
- } else {
- card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
- card->dev->needed_headroom = sizeof(struct qeth_hdr);
+ goto add_napi;
}
- if (card->info.type == QETH_CARD_TYPE_OSM)
+ card->dev->needed_headroom = sizeof(struct qeth_hdr);
+ card->dev->netdev_ops = &qeth_l2_netdev_ops;
+ card->dev->priv_flags |= IFF_UNICAST_FLT;
+
+ if (IS_OSM(card)) {
card->dev->features |= NETIF_F_VLAN_CHALLENGED;
- else
+ } else {
+ if (!IS_VM_NIC(card))
+ card->dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->features |= NETIF_F_SG;
@@ -917,7 +753,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
- qeth_l2_request_initial_mac(card);
+add_napi:
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
if (!rc && carrier_ok)
@@ -948,11 +784,11 @@ static void qeth_l2_trace_features(struct qeth_card *card)
sizeof(card->options.vnicc.sup_chars));
}
-static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
+static int qeth_l2_set_online(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ struct net_device *dev = card->dev;
int rc = 0;
- enum qeth_card_states recover_flag;
bool carrier_ok;
mutex_lock(&card->discipline_mutex);
@@ -960,25 +796,12 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- recover_flag = card->state;
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
- qeth_bridgeport_query_support(card);
- if (card->options.sbp.supported_funcs)
- dev_info(&card->gdev->dev,
- "The device represents a Bridge Capable Port\n");
-
- rc = qeth_l2_setup_netdev(card, carrier_ok);
- if (rc)
- goto out_remove;
-
- if (card->info.type != QETH_CARD_TYPE_OSN &&
- !qeth_l2_send_setmac(card, card->dev->dev_addr))
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
if (card->info.hwtrap &&
@@ -987,6 +810,13 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
} else
card->info.hwtrap = 0;
+ qeth_bridgeport_query_support(card);
+ if (card->options.sbp.supported_funcs)
+ dev_info(&card->gdev->dev,
+ "The device represents a Bridge Capable Port\n");
+
+ qeth_l2_register_dev_addr(card);
+
/* for the rx_bcast characteristic, init VNICC after setmac */
qeth_l2_vnicc_init(card);
@@ -1008,11 +838,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- if (card->info.type != QETH_CARD_TYPE_OSN)
- qeth_l2_process_vlans(card);
-
- netif_tx_disable(card->dev);
-
rc = qeth_init_qdio_queues(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
@@ -1023,17 +848,25 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_set_allowed_threads(card, 0xffffffff, 0);
- qeth_enable_hw_features(card->dev);
- if (recover_flag == CARD_STATE_RECOVER) {
- if (recovery_mode &&
- card->info.type != QETH_CARD_TYPE_OSN) {
- __qeth_l2_open(card->dev);
- qeth_l2_set_rx_mode(card->dev);
- } else {
- rtnl_lock();
- dev_open(card->dev);
- rtnl_unlock();
+ if (!qeth_netdev_is_registered(dev)) {
+ rc = qeth_l2_setup_netdev(card, carrier_ok);
+ if (rc)
+ goto out_remove;
+ } else {
+ rtnl_lock();
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ netif_device_attach(dev);
+ qeth_enable_hw_features(dev);
+
+ if (card->info.open_when_online) {
+ card->info.open_when_online = 0;
+ dev_open(dev, NULL);
}
+ rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -1042,44 +875,42 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
return 0;
out_remove:
- qeth_l2_stop_card(card, 0);
+ qeth_l2_stop_card(card);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
qdio_free(CARD_DDEV(card));
- if (recover_flag == CARD_STATE_RECOVER)
- card->state = CARD_STATE_RECOVER;
- else
- card->state = CARD_STATE_DOWN;
+ card->state = CARD_STATE_DOWN;
+
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
return rc;
}
-static int qeth_l2_set_online(struct ccwgroup_device *gdev)
-{
- return __qeth_l2_set_online(gdev, 0);
-}
-
static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
int recovery_mode)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
int rc = 0, rc2 = 0, rc3 = 0;
- enum qeth_card_states recover_flag;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
- netif_carrier_off(card->dev);
- recover_flag = card->state;
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
card->info.hwtrap = 1;
}
- qeth_l2_stop_card(card, recovery_mode);
+
+ rtnl_lock();
+ card->info.open_when_online = card->dev->flags & IFF_UP;
+ dev_close(card->dev);
+ netif_device_detach(card->dev);
+ netif_carrier_off(card->dev);
+ rtnl_unlock();
+
+ qeth_l2_stop_card(card);
rc = ccw_device_set_offline(CARD_DDEV(card));
rc2 = ccw_device_set_offline(CARD_WDEV(card));
rc3 = ccw_device_set_offline(CARD_RDEV(card));
@@ -1088,8 +919,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
qdio_free(CARD_DDEV(card));
- if (recover_flag == CARD_STATE_UP)
- card->state = CARD_STATE_RECOVER;
+
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
@@ -1114,18 +944,16 @@ static int qeth_l2_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_recovery_task(card);
__qeth_l2_set_offline(card->gdev, 1);
- rc = __qeth_l2_set_online(card->gdev, 1);
+ rc = qeth_l2_set_online(card->gdev);
if (!rc)
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
else {
- qeth_close_dev(card);
+ ccwgroup_set_offline(card->gdev);
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
- qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
@@ -1146,37 +974,23 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- netif_device_detach(card->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
- if (card->state == CARD_STATE_UP) {
- if (card->info.hwtrap)
- qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
- __qeth_l2_set_offline(card->gdev, 1);
- } else
- __qeth_l2_set_offline(card->gdev, 0);
+
+ qeth_l2_set_offline(gdev);
return 0;
}
static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc = 0;
+ int rc;
- if (card->state == CARD_STATE_RECOVER) {
- rc = __qeth_l2_set_online(card->gdev, 1);
- if (rc) {
- rtnl_lock();
- dev_close(card->dev);
- rtnl_unlock();
- }
- } else
- rc = __qeth_l2_set_online(card->gdev, 0);
+ rc = qeth_l2_set_online(gdev);
qeth_set_allowed_threads(card, 0xffffffff, 0);
- netif_device_attach(card->dev);
if (rc)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
@@ -1248,20 +1062,14 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
}
static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob, int data_len)
+ struct qeth_cmd_buffer *iob)
{
- u16 s1, s2;
+ u16 length;
QETH_CARD_TEXT(card, 4, "osndipa");
- qeth_prepare_ipa_cmd(card, iob);
- s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
- s2 = (u16)data_len;
- memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
- return qeth_osn_send_control_data(card, s1, iob);
+ memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2);
+ return qeth_osn_send_control_data(card, length, iob);
}
int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
@@ -1278,8 +1086,9 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
if (!qeth_card_hw_is_reachable(card))
return -ENODEV;
iob = qeth_wait_for_buffer(&card->write);
+ qeth_prepare_ipa_cmd(card, iob, (u16) data_len);
memcpy(__ipa_cmd(iob), data, data_len);
- return qeth_osn_send_ipa_cmd(card, iob, data_len);
+ return qeth_osn_send_ipa_cmd(card, iob);
}
EXPORT_SYMBOL(qeth_osn_assist);
@@ -1288,13 +1097,16 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
int (*data_cb)(struct sk_buff *))
{
struct qeth_card *card;
+ char bus_id[16];
+ u16 devno;
- *dev = qeth_l2_netdev_by_devno(read_dev_no);
- if (*dev == NULL)
- return -ENODEV;
- card = (*dev)->ml_priv;
- if (!card)
+ memcpy(&devno, read_dev_no, 2);
+ sprintf(bus_id, "0.0.%04x", devno);
+ card = qeth_get_card_by_busid(bus_id);
+ if (!card || !IS_OSN(card))
return -ENODEV;
+ *dev = card->dev;
+
QETH_CARD_TEXT(card, 2, "osnreg");
if ((assist_cb == NULL) || (data_cb == NULL))
return -EINVAL;
@@ -1455,7 +1267,7 @@ static void qeth_bridge_state_change(struct qeth_card *card,
data->card = card;
memcpy(&data->qports, qports,
sizeof(struct qeth_sbp_state_change) + extrasize);
- queue_work(qeth_wq, &data->worker);
+ queue_work(card->event_wq, &data->worker);
}
struct qeth_bridge_host_data {
@@ -1527,14 +1339,12 @@ static void qeth_bridge_host_event(struct qeth_card *card,
data->card = card;
memcpy(&data->hostevs, hostevs,
sizeof(struct qeth_ipacmd_addr_change) + extrasize);
- queue_work(qeth_wq, &data->worker);
+ queue_work(card->event_wq, &data->worker);
}
/* SETBRIDGEPORT support; sending commands */
struct _qeth_sbp_cbctl {
- u16 ipa_rc;
- u16 cmd_rc;
union {
u32 supported;
struct {
@@ -1544,23 +1354,21 @@ struct _qeth_sbp_cbctl {
} data;
};
-/**
- * qeth_bridgeport_makerc() - derive "traditional" error from hardware codes.
- * @card: qeth_card structure pointer, for debug messages.
- * @cbctl: state structure with hardware return codes.
- * @setcmd: IPA command code
- *
- * Returns negative errno-compatible error indication or 0 on success.
- */
static int qeth_bridgeport_makerc(struct qeth_card *card,
- struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd)
+ struct qeth_ipa_cmd *cmd)
{
+ struct qeth_ipacmd_setbridgeport *sbp = &cmd->data.sbp;
+ enum qeth_ipa_sbp_cmd setcmd = sbp->hdr.command_code;
+ u16 ipa_rc = cmd->hdr.return_code;
+ u16 sbp_rc = sbp->hdr.return_code;
int rc;
- int is_iqd = (card->info.type == QETH_CARD_TYPE_IQD);
- if ((is_iqd && (cbctl->ipa_rc == IPA_RC_SUCCESS)) ||
- (!is_iqd && (cbctl->ipa_rc == cbctl->cmd_rc)))
- switch (cbctl->cmd_rc) {
+ if (ipa_rc == IPA_RC_SUCCESS && sbp_rc == IPA_RC_SUCCESS)
+ return 0;
+
+ if ((IS_IQD(card) && ipa_rc == IPA_RC_SUCCESS) ||
+ (!IS_IQD(card) && ipa_rc == sbp_rc)) {
+ switch (sbp_rc) {
case IPA_RC_SUCCESS:
rc = 0;
break;
@@ -1624,8 +1432,8 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
default:
rc = -EIO;
}
- else
- switch (cbctl->ipa_rc) {
+ } else {
+ switch (ipa_rc) {
case IPA_RC_NOTSUPP:
rc = -EOPNOTSUPP;
break;
@@ -1635,10 +1443,11 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
default:
rc = -EIO;
}
+ }
if (rc) {
- QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc);
- QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc);
+ QETH_CARD_TEXT_(card, 2, "SBPi%04x", ipa_rc);
+ QETH_CARD_TEXT_(card, 2, "SBPc%04x", sbp_rc);
}
return rc;
}
@@ -1670,15 +1479,15 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+ int rc;
+
QETH_CARD_TEXT(card, 2, "brqsupcb");
- cbctl->ipa_rc = cmd->hdr.return_code;
- cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
- if ((cbctl->ipa_rc == 0) && (cbctl->cmd_rc == 0)) {
- cbctl->data.supported =
- cmd->data.sbp.data.query_cmds_supp.supported_cmds;
- } else {
- cbctl->data.supported = 0;
- }
+ rc = qeth_bridgeport_makerc(card, cmd);
+ if (rc)
+ return rc;
+
+ cbctl->data.supported =
+ cmd->data.sbp.data.query_cmds_supp.supported_cmds;
return 0;
}
@@ -1699,12 +1508,11 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
sizeof(struct qeth_sbp_query_cmds_supp));
if (!iob)
return;
+
if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
- (void *)&cbctl) ||
- qeth_bridgeport_makerc(card, &cbctl,
- IPA_SBP_QUERY_COMMANDS_SUPPORTED)) {
- /* non-zero makerc signifies failure, and produce messages */
+ &cbctl)) {
card->options.sbp.role = QETH_SBP_ROLE_NONE;
+ card->options.sbp.supported_funcs = 0;
return;
}
card->options.sbp.supported_funcs = cbctl.data.supported;
@@ -1716,16 +1524,16 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports;
struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+ int rc;
QETH_CARD_TEXT(card, 2, "brqprtcb");
- cbctl->ipa_rc = cmd->hdr.return_code;
- cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
- if ((cbctl->ipa_rc != 0) || (cbctl->cmd_rc != 0))
- return 0;
+ rc = qeth_bridgeport_makerc(card, cmd);
+ if (rc)
+ return rc;
+
if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
- cbctl->cmd_rc = 0xffff;
QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
- return 0;
+ return -EINVAL;
}
/* first entry contains the state of the local port */
if (qports->num_entries > 0) {
@@ -1750,7 +1558,6 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
int qeth_bridgeport_query_ports(struct qeth_card *card,
enum qeth_sbp_roles *role, enum qeth_sbp_states *state)
{
- int rc = 0;
struct qeth_cmd_buffer *iob;
struct _qeth_sbp_cbctl cbctl = {
.data = {
@@ -1767,22 +1574,18 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0);
if (!iob)
return -ENOMEM;
- rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
- (void *)&cbctl);
- if (rc < 0)
- return rc;
- return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
+
+ return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
+ &cbctl);
}
static int qeth_bridgeport_set_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
- struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+
QETH_CARD_TEXT(card, 2, "brsetrcb");
- cbctl->ipa_rc = cmd->hdr.return_code;
- cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
- return 0;
+ return qeth_bridgeport_makerc(card, cmd);
}
/**
@@ -1794,10 +1597,8 @@ static int qeth_bridgeport_set_cb(struct qeth_card *card,
*/
int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
{
- int rc = 0;
int cmdlength;
struct qeth_cmd_buffer *iob;
- struct _qeth_sbp_cbctl cbctl;
enum qeth_ipa_sbp_cmd setcmd;
QETH_CARD_TEXT(card, 2, "brsetrol");
@@ -1822,11 +1623,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
iob = qeth_sbp_build_cmd(card, setcmd, cmdlength);
if (!iob)
return -ENOMEM;
- rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb,
- (void *)&cbctl);
- if (rc < 0)
- return rc;
- return qeth_bridgeport_makerc(card, &cbctl, setcmd);
+
+ return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL);
}
/**
@@ -1930,7 +1728,7 @@ static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
/* VNIC Characteristics support */
/* handle VNICC IPA command return codes; convert to error codes */
-static int qeth_l2_vnicc_makerc(struct qeth_card *card, int ipa_rc)
+static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
{
int rc;
@@ -1988,7 +1786,7 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "vniccrcb");
if (cmd->hdr.return_code)
- return 0;
+ return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code);
/* return results to caller */
card->options.vnicc.sup_chars = rep->hdr.sup;
card->options.vnicc.cur_chars = rep->hdr.cur;
@@ -2009,7 +1807,6 @@ static int qeth_l2_vnicc_request(struct qeth_card *card,
struct qeth_ipacmd_vnicc *req;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- int rc;
QETH_CARD_TEXT(card, 2, "vniccreq");
@@ -2052,10 +1849,7 @@ static int qeth_l2_vnicc_request(struct qeth_card *card,
}
/* send request */
- rc = qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb,
- (void *) cbctl);
-
- return qeth_l2_vnicc_makerc(card, rc);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, cbctl);
}
/* VNICC query VNIC characteristics request */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index f08b745c2007..53712cf26406 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -40,7 +40,6 @@
static int qeth_l3_set_offline(struct ccwgroup_device *);
-static int qeth_l3_stop(struct net_device *);
static void qeth_l3_set_rx_mode(struct net_device *dev);
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
@@ -254,8 +253,7 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
} else
rc = qeth_l3_register_addr_entry(card, addr);
- if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) ||
- (rc == IPA_RC_LAN_OFFLINE)) {
+ if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
if (addr->ref_counter < 1) {
qeth_l3_deregister_addr_entry(card, addr);
@@ -339,10 +337,28 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
}
+static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+ switch (cmd->hdr.return_code) {
+ case IPA_RC_SUCCESS:
+ return 0;
+ case IPA_RC_DUPLICATE_IP_ADDRESS:
+ return -EADDRINUSE;
+ case IPA_RC_MC_ADDR_NOT_FOUND:
+ return -ENOENT;
+ case IPA_RC_LAN_OFFLINE:
+ return -ENETDOWN;
+ default:
+ return -EIO;
+ }
+}
+
static int qeth_l3_send_setdelmc(struct qeth_card *card,
struct qeth_ipaddr *addr, int ipacmd)
{
- int rc;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
@@ -359,9 +375,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
else
memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
- rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
-
- return rc;
+ return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
@@ -423,7 +437,7 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
cmd->data.setdelip4.flags = flags;
}
- return qeth_send_ipa_cmd(card, iob, NULL, NULL);
+ return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
static int qeth_l3_send_setrouting(struct qeth_card *card,
@@ -943,15 +957,13 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code == 0)
- ether_addr_copy(card->dev->dev_addr,
- cmd->data.create_destroy_addr.unique_id);
- else
- eth_random_addr(card->dev->dev_addr);
+ if (cmd->hdr.return_code)
+ return -EIO;
+ ether_addr_copy(card->dev->dev_addr,
+ cmd->data.create_destroy_addr.unique_id);
return 0;
}
@@ -979,19 +991,18 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code == 0)
+ if (cmd->hdr.return_code == 0) {
card->info.unique_id = *((__u16 *)
&cmd->data.create_destroy_addr.unique_id[6]);
- else {
- card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
- UNIQUE_ID_NOT_BY_CARD;
- dev_warn(&card->gdev->dev, "The network adapter failed to "
- "generate a unique ID\n");
+ return 0;
}
- return 0;
+
+ card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
+ UNIQUE_ID_NOT_BY_CARD;
+ dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n");
+ return -EIO;
}
static int qeth_l3_get_unique_id(struct qeth_card *card)
@@ -1074,7 +1085,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
cmd->data.diagass.action, CARD_DEVID(card));
}
- return 0;
+ return rc ? -EIO : 0;
}
static int
@@ -1284,10 +1295,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "kidREC");
- return 0;
- }
clear_bit(vid, card->active_vlans);
qeth_l3_set_rx_mode(dev);
return 0;
@@ -1308,12 +1315,11 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
else
ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
-
- card->stats.multicast++;
+ QETH_CARD_STAT_INC(card, rx_multicast);
break;
case QETH_CAST_BROADCAST:
ether_addr_copy(tg_addr, card->dev->broadcast);
- card->stats.multicast++;
+ QETH_CARD_STAT_INC(card, rx_multicast);
break;
default:
if (card->options.sniffer)
@@ -1394,33 +1400,23 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
}
work_done++;
budget--;
- card->stats.rx_packets++;
- card->stats.rx_bytes += len;
+ QETH_CARD_STAT_INC(card, rx_packets);
+ QETH_CARD_STAT_ADD(card, rx_bytes, len);
}
return work_done;
}
-static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
+static void qeth_l3_stop_card(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP, 2, "stopcard");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_set_allowed_threads(card, 0, 1);
+
if (card->options.sniffer &&
(card->info.promisc_mode == SET_PROMISC_MODE_ON))
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
- if (card->read.state == CH_STATE_UP &&
- card->write.state == CH_STATE_UP &&
- (card->state == CARD_STATE_UP)) {
- if (recovery_mode)
- qeth_l3_stop(card->dev);
- else {
- rtnl_lock();
- dev_close(card->dev);
- rtnl_unlock();
- }
- card->state = CARD_STATE_SOFTSETUP;
- }
+
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_l3_clear_ip_htable(card, 1);
qeth_clear_ipacmd_list(card);
@@ -1436,6 +1432,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
qeth_clear_cmd_buffers(&card->read);
qeth_clear_cmd_buffers(&card->write);
}
+
+ flush_workqueue(card->event_wq);
}
/*
@@ -1476,9 +1474,7 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
int i, rc;
QETH_CARD_TEXT(card, 3, "setmulti");
- if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
- (card->state != CARD_STATE_UP))
- return;
+
if (!card->options.sniffer) {
spin_lock_bh(&card->mclock);
@@ -1489,14 +1485,14 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
switch (addr->disp_flag) {
case QETH_DISP_ADDR_DELETE:
rc = qeth_l3_deregister_addr_entry(card, addr);
- if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) {
+ if (!rc || rc == -ENOENT) {
hash_del(&addr->hnode);
kfree(addr);
}
break;
case QETH_DISP_ADDR_ADD:
rc = qeth_l3_register_addr_entry(card, addr);
- if (rc && rc != IPA_RC_LAN_OFFLINE) {
+ if (rc && rc != -ENETDOWN) {
hash_del(&addr->hnode);
kfree(addr);
break;
@@ -1517,7 +1513,7 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
qeth_l3_handle_promisc_mode(card);
}
-static int qeth_l3_arp_makerc(int rc)
+static int qeth_l3_arp_makerc(u16 rc)
{
switch (rc) {
case IPA_RC_SUCCESS:
@@ -1534,8 +1530,18 @@ static int qeth_l3_arp_makerc(int rc)
}
}
+static int qeth_l3_arp_cmd_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+ qeth_setassparms_cb(card, reply, data);
+ return qeth_l3_arp_makerc(cmd->hdr.return_code);
+}
+
static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
{
+ struct qeth_cmd_buffer *iob;
int rc;
QETH_CARD_TEXT(card, 3, "arpstnoe");
@@ -1550,13 +1556,19 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
}
- rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
- no_entries);
+
+ iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 4,
+ QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
+
+ __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (u32) no_entries;
+ rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
if (rc)
QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
CARD_DEVID(card), rc);
- return qeth_l3_arp_makerc(rc);
+ return rc;
}
static __u32 get_arp_entry_size(struct qeth_card *card,
@@ -1607,7 +1619,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
struct qeth_arp_query_data *qdata;
struct qeth_arp_query_info *qinfo;
- int i;
int e;
int entrybytes_done;
int stripped_bytes;
@@ -1621,13 +1632,13 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
if (cmd->hdr.return_code) {
QETH_CARD_TEXT(card, 4, "arpcberr");
QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
- return 0;
+ return qeth_l3_arp_makerc(cmd->hdr.return_code);
}
if (cmd->data.setassparms.hdr.return_code) {
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
QETH_CARD_TEXT(card, 4, "setaperr");
QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
- return 0;
+ return qeth_l3_arp_makerc(cmd->hdr.return_code);
}
qdata = &cmd->data.setassparms.data.query_arp;
QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
@@ -1654,9 +1665,9 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
break;
if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
- QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
- cmd->hdr.return_code = IPA_RC_ENOMEM;
- goto out_error;
+ QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOSPC);
+ memset(qinfo->udata, 0, 4);
+ return -ENOSPC;
}
memcpy(qinfo->udata + qinfo->udata_offset,
@@ -1679,25 +1690,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
QETH_CARD_TEXT_(card, 4, "rc%i", 0);
return 0;
-out_error:
- i = 0;
- memcpy(qinfo->udata, &i, 4);
- return 0;
-}
-
-static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob, int len,
- int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
- unsigned long),
- void *reply_param)
-{
- QETH_CARD_TEXT(card, 4, "sendarp");
-
- memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
- memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
- &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
- return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
- reply_cb, reply_param);
}
static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
@@ -1719,15 +1711,11 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
- cmd->data.setassparms.data.query_arp.reply_bits = 0;
- cmd->data.setassparms.data.query_arp.no_entries = 0;
- rc = qeth_l3_send_ipa_arp_cmd(card, iob,
- QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
- qeth_l3_arp_query_cb, (void *)qinfo);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_query_cb, qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
CARD_DEVID(card), rc);
- return qeth_l3_arp_makerc(rc);
+ return rc;
}
static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
@@ -1809,16 +1797,16 @@ static int qeth_l3_arp_modify_entry(struct qeth_card *card,
cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
- rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
if (rc)
QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
arp_cmd, CARD_DEVID(card), rc);
-
- return qeth_l3_arp_makerc(rc);
+ return rc;
}
static int qeth_l3_arp_flush_cache(struct qeth_card *card)
{
+ struct qeth_cmd_buffer *iob;
int rc;
QETH_CARD_TEXT(card, 3, "arpflush");
@@ -1833,12 +1821,18 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
}
- rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
+
+ iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_FLUSH_CACHE, 0,
+ QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
+
+ rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
if (rc)
QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
CARD_DEVID(card), rc);
- return qeth_l3_arp_makerc(rc);
+ return rc;
}
static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1929,22 +1923,6 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
}
}
-static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
- unsigned int data_len)
-{
- char daddr[16];
-
- hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
- hdr->hdr.l3.length = data_len;
- hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
-
- memset(daddr, 0, sizeof(daddr));
- daddr[0] = 0xfe;
- daddr[1] = 0x80;
- memcpy(&daddr[8], iucv_trans_hdr(skb)->destUserID, 8);
- memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
-}
-
static u8 qeth_l3_cast_type_to_flag(int cast_type)
{
if (cast_type == RTN_MULTICAST)
@@ -1956,11 +1934,13 @@ static u8 qeth_l3_cast_type_to_flag(int cast_type)
return QETH_CAST_UNICAST;
}
-static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type,
- unsigned int data_len)
+static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
+ struct qeth_hdr *hdr, struct sk_buff *skb,
+ int ipv, int cast_type, unsigned int data_len)
{
+ struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
+ struct qeth_card *card = queue->card;
hdr->hdr.l3.length = data_len;
@@ -1968,13 +1948,21 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO;
} else {
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+
+ if (skb->protocol == htons(ETH_P_AF_IUCV)) {
+ l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+ l3_hdr->next_hop.ipv6_addr.s6_addr16[0] = htons(0xfe80);
+ memcpy(&l3_hdr->next_hop.ipv6_addr.s6_addr32[2],
+ iucv_trans_hdr(skb)->destUserID, 8);
+ return;
+ }
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
/* some HW requires combined L3+L4 csum offload: */
if (ipv == 4)
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
+ QETH_TXQ_STAT_INC(queue, skbs_csum);
}
}
@@ -2012,13 +2000,11 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
} else {
/* IPv6 */
const struct rt6_info *rt = skb_rt6_info(skb);
- const struct in6_addr *next_hop;
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
- next_hop = &rt->rt6i_gateway;
+ l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
else
- next_hop = &ipv6_hdr(skb)->daddr;
- memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
+ l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr;
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
if (card->info.type != QETH_CARD_TYPE_IQD)
@@ -2044,84 +2030,25 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{
- unsigned int hw_hdr_len, proto_len, frame_len, elements;
unsigned char eth_hdr[ETH_HLEN];
- bool is_tso = skb_is_gso(skb);
- unsigned int data_offset = 0;
- struct qeth_hdr *hdr = NULL;
- unsigned int hd_len = 0;
- int push_len, rc;
- bool is_sg;
-
- if (is_tso) {
- hw_hdr_len = sizeof(struct qeth_hdr_tso);
- proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb) -
- ETH_HLEN;
- } else {
- hw_hdr_len = sizeof(struct qeth_hdr);
- proto_len = 0;
- }
+ unsigned int hw_hdr_len;
+ int rc;
/* re-use the L2 header area for the HW header: */
+ hw_hdr_len = skb_is_gso(skb) ? sizeof(struct qeth_hdr_tso) :
+ sizeof(struct qeth_hdr);
rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
if (rc)
return rc;
skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
skb_pull(skb, ETH_HLEN);
- frame_len = skb->len;
qeth_l3_fixup_headers(skb);
- push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
- &elements);
- if (push_len < 0)
- return push_len;
- if (is_tso || !push_len) {
- /* HW header needs its own buffer element. */
- hd_len = hw_hdr_len + proto_len;
- data_offset = push_len + proto_len;
- }
- memset(hdr, 0, hw_hdr_len);
-
- if (skb->protocol == htons(ETH_P_AF_IUCV)) {
- qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
- } else {
- qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
- if (is_tso)
- qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
- frame_len - proto_len, skb,
- proto_len);
- }
-
- is_sg = skb_is_nonlinear(skb);
- if (IS_IQD(card)) {
- rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
- hd_len);
- } else {
- /* TODO: drop skb_orphan() once TX completion is fast enough */
- skb_orphan(skb);
- rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
- hd_len, elements);
- }
-
- if (!rc) {
- if (card->options.performance_stats) {
- card->perf_stats.buf_elements_sent += elements;
- if (is_sg)
- card->perf_stats.sg_skbs_sent++;
- if (is_tso) {
- card->perf_stats.large_send_bytes += frame_len;
- card->perf_stats.large_send_cnt++;
- }
- }
- } else {
- if (!push_len)
- kmem_cache_free(qeth_core_header_cache, hdr);
- if (rc == -EBUSY) {
- /* roll back to ETH header */
- skb_pull(skb, push_len);
- skb_push(skb, ETH_HLEN);
- skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
- }
+ rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
+ if (rc == -EBUSY) {
+ /* roll back to ETH header */
+ skb_push(skb, ETH_HLEN);
+ skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
}
return rc;
}
@@ -2136,6 +2063,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
int tx_bytes = skb->len;
int rc;
+ queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+
if (IS_IQD(card)) {
if (card->options.sniffer)
goto tx_drop;
@@ -2145,20 +2074,9 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
goto tx_drop;
}
- if (card->state != CARD_STATE_UP) {
- card->stats.tx_carrier_errors++;
- goto tx_drop;
- }
-
if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
goto tx_drop;
- queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
-
- if (card->options.performance_stats) {
- card->perf_stats.outbound_cnt++;
- card->perf_stats.outbound_start_time = qeth_get_micros();
- }
netif_stop_queue(dev);
if (ipv == 4 || IS_IQD(card))
@@ -2168,11 +2086,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
qeth_l3_fill_header);
if (!rc) {
- card->stats.tx_packets++;
- card->stats.tx_bytes += tx_bytes;
- if (card->options.performance_stats)
- card->perf_stats.outbound_time += qeth_get_micros() -
- card->perf_stats.outbound_start_time;
+ QETH_TXQ_STAT_INC(queue, tx_packets);
+ QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
@@ -2180,72 +2095,12 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
} /* else fall through */
tx_drop:
- card->stats.tx_dropped++;
- card->stats.tx_errors++;
- dev_kfree_skb_any(skb);
+ QETH_TXQ_STAT_INC(queue, tx_dropped);
+ kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
-static int __qeth_l3_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
- int rc = 0;
-
- QETH_CARD_TEXT(card, 4, "qethopen");
- if (card->state == CARD_STATE_UP)
- return rc;
- if (card->state != CARD_STATE_SOFTSETUP)
- return -ENODEV;
- card->data.state = CH_STATE_UP;
- card->state = CARD_STATE_UP;
- netif_start_queue(dev);
-
- if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
- napi_enable(&card->napi);
- local_bh_disable();
- napi_schedule(&card->napi);
- /* kick-start the NAPI softirq: */
- local_bh_enable();
- } else
- rc = -EIO;
- return rc;
-}
-
-static int qeth_l3_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 5, "qethope_");
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "openREC");
- return -ERESTARTSYS;
- }
- return __qeth_l3_open(dev);
-}
-
-static int qeth_l3_stop(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 4, "qethstop");
- netif_tx_disable(dev);
- if (card->state == CARD_STATE_UP) {
- card->state = CARD_STATE_SOFTSETUP;
- napi_disable(&card->napi);
- }
- return 0;
-}
-
-static const struct ethtool_ops qeth_l3_ethtool_ops = {
- .get_link = ethtool_op_get_link,
- .get_strings = qeth_core_get_strings,
- .get_ethtool_stats = qeth_core_get_ethtool_stats,
- .get_sset_count = qeth_core_get_sset_count,
- .get_drvinfo = qeth_core_get_drvinfo,
- .get_link_ksettings = qeth_core_ethtool_get_link_ksettings,
-};
-
/*
* we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
* NOARP on the netdevice is no option because it also turns off neighbor
@@ -2280,9 +2135,9 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
}
static const struct net_device_ops qeth_l3_netdev_ops = {
- .ndo_open = qeth_l3_open,
- .ndo_stop = qeth_l3_stop,
- .ndo_get_stats = qeth_get_stats,
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
+ .ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
@@ -2295,9 +2150,9 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
};
static const struct net_device_ops qeth_l3_osa_netdev_ops = {
- .ndo_open = qeth_l3_open,
- .ndo_stop = qeth_l3_stop,
- .ndo_get_stats = qeth_get_stats,
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
+ .ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_l3_osa_features_check,
.ndo_validate_addr = eth_validate_addr,
@@ -2316,9 +2171,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
unsigned int headroom;
int rc;
- if (qeth_netdev_is_registered(card->dev))
- return 0;
-
if (card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -2366,14 +2218,10 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
rc = qeth_l3_iqd_read_initial_mac(card);
if (rc)
goto out;
-
- if (card->options.hsuid[0])
- memcpy(card->dev->perm_addr, card->options.hsuid, 9);
} else
return -ENODEV;
card->dev->needed_headroom = headroom;
- card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -2404,14 +2252,15 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
+ hash_init(card->ip_htable);
+
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l3_create_device_attributes(&gdev->dev);
if (rc)
return rc;
}
- hash_init(card->ip_htable);
+
hash_init(card->ip_mc_htable);
- card->info.hwtrap = 0;
return 0;
}
@@ -2428,17 +2277,18 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l3_set_offline(cgdev);
+ cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
}
-static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
+static int qeth_l3_set_online(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ struct net_device *dev = card->dev;
int rc = 0;
- enum qeth_card_states recover_flag;
bool carrier_ok;
mutex_lock(&card->discipline_mutex);
@@ -2446,7 +2296,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- recover_flag = card->state;
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
@@ -2454,10 +2303,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- rc = qeth_l3_setup_netdev(card, carrier_ok);
- if (rc)
- goto out_remove;
-
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
if (card->info.hwtrap &&
qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
@@ -2487,7 +2332,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
}
- netif_tx_disable(card->dev);
rc = qeth_init_qdio_queues(card);
if (rc) {
@@ -2500,14 +2344,23 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_set_allowed_threads(card, 0xffffffff, 0);
qeth_l3_recover_ip(card);
- qeth_enable_hw_features(card->dev);
- if (recover_flag == CARD_STATE_RECOVER) {
+ if (!qeth_netdev_is_registered(dev)) {
+ rc = qeth_l3_setup_netdev(card, carrier_ok);
+ if (rc)
+ goto out_remove;
+ } else {
rtnl_lock();
- if (recovery_mode) {
- __qeth_l3_open(card->dev);
- qeth_l3_set_rx_mode(card->dev);
- } else {
- dev_open(card->dev);
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ netif_device_attach(dev);
+ qeth_enable_hw_features(dev);
+
+ if (card->info.open_when_online) {
+ card->info.open_when_online = 0;
+ dev_open(dev, NULL);
}
rtnl_unlock();
}
@@ -2518,45 +2371,43 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
mutex_unlock(&card->discipline_mutex);
return 0;
out_remove:
- qeth_l3_stop_card(card, 0);
+ qeth_l3_stop_card(card);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
qdio_free(CARD_DDEV(card));
- if (recover_flag == CARD_STATE_RECOVER)
- card->state = CARD_STATE_RECOVER;
- else
- card->state = CARD_STATE_DOWN;
+ card->state = CARD_STATE_DOWN;
+
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
return rc;
}
-static int qeth_l3_set_online(struct ccwgroup_device *gdev)
-{
- return __qeth_l3_set_online(gdev, 0);
-}
-
static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
int recovery_mode)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
int rc = 0, rc2 = 0, rc3 = 0;
- enum qeth_card_states recover_flag;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
- netif_carrier_off(card->dev);
- recover_flag = card->state;
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
card->info.hwtrap = 1;
}
- qeth_l3_stop_card(card, recovery_mode);
- if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
+
+ rtnl_lock();
+ card->info.open_when_online = card->dev->flags & IFF_UP;
+ dev_close(card->dev);
+ netif_device_detach(card->dev);
+ netif_carrier_off(card->dev);
+ rtnl_unlock();
+
+ qeth_l3_stop_card(card);
+ if (card->options.cq == QETH_CQ_ENABLED) {
rtnl_lock();
call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
rtnl_unlock();
@@ -2569,8 +2420,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
qdio_free(CARD_DDEV(card));
- if (recover_flag == CARD_STATE_UP)
- card->state = CARD_STATE_RECOVER;
+
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
@@ -2596,18 +2446,16 @@ static int qeth_l3_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_recovery_task(card);
__qeth_l3_set_offline(card->gdev, 1);
- rc = __qeth_l3_set_online(card->gdev, 1);
+ rc = qeth_l3_set_online(card->gdev);
if (!rc)
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
else {
- qeth_close_dev(card);
+ ccwgroup_set_offline(card->gdev);
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
- qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
@@ -2617,37 +2465,23 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- netif_device_detach(card->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
- if (card->state == CARD_STATE_UP) {
- if (card->info.hwtrap)
- qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
- __qeth_l3_set_offline(card->gdev, 1);
- } else
- __qeth_l3_set_offline(card->gdev, 0);
+
+ qeth_l3_set_offline(gdev);
return 0;
}
static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc = 0;
+ int rc;
- if (card->state == CARD_STATE_RECOVER) {
- rc = __qeth_l3_set_online(card->gdev, 1);
- if (rc) {
- rtnl_lock();
- dev_close(card->dev);
- rtnl_unlock();
- }
- } else
- rc = __qeth_l3_set_online(card->gdev, 0);
+ rc = qeth_l3_set_online(gdev);
qeth_set_allowed_threads(card, 0xffffffff, 0);
- netif_device_attach(card->dev);
if (rc)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 45ac6d8705c6..cff518b0f904 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -167,8 +167,7 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
@@ -213,8 +212,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
return -EPERM;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
@@ -280,8 +278,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
- if (card->state != CARD_STATE_DOWN &&
- card->state != CARD_STATE_RECOVER)
+ if (card->state != CARD_STATE_DOWN)
return -EPERM;
if (card->options.sniffer)
return -EPERM;
@@ -356,8 +353,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 94f4d8fe85e0..e390f8c6d5f3 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2017
*/
/*
@@ -124,6 +124,9 @@ static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
+ if (zfcp_experimental_dix)
+ pr_warn("DIX is enabled. It is experimental and might cause problems\n");
+
zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
sizeof(struct fsf_qtcb));
if (!zfcp_fsf_qtcb_cache)
@@ -248,43 +251,36 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
{
- if (adapter->pool.erp_req)
- mempool_destroy(adapter->pool.erp_req);
- if (adapter->pool.scsi_req)
- mempool_destroy(adapter->pool.scsi_req);
- if (adapter->pool.scsi_abort)
- mempool_destroy(adapter->pool.scsi_abort);
- if (adapter->pool.qtcb_pool)
- mempool_destroy(adapter->pool.qtcb_pool);
- if (adapter->pool.status_read_req)
- mempool_destroy(adapter->pool.status_read_req);
- if (adapter->pool.sr_data)
- mempool_destroy(adapter->pool.sr_data);
- if (adapter->pool.gid_pn)
- mempool_destroy(adapter->pool.gid_pn);
+ mempool_destroy(adapter->pool.erp_req);
+ mempool_destroy(adapter->pool.scsi_req);
+ mempool_destroy(adapter->pool.scsi_abort);
+ mempool_destroy(adapter->pool.qtcb_pool);
+ mempool_destroy(adapter->pool.status_read_req);
+ mempool_destroy(adapter->pool.sr_data);
+ mempool_destroy(adapter->pool.gid_pn);
}
/**
* zfcp_status_read_refill - refill the long running status_read_requests
* @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
*
- * Returns: 0 on success, 1 otherwise
- *
- * if there are 16 or more status_read requests missing an adapter_reopen
- * is triggered
+ * Return:
+ * * 0 on success meaning at least one status read is pending
+ * * 1 if posting failed and not a single status read buffer is pending,
+ * also triggers adapter reopen recovery
*/
int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
- while (atomic_read(&adapter->stat_miss) > 0)
+ while (atomic_add_unless(&adapter->stat_miss, -1, 0))
if (zfcp_fsf_status_read(adapter->qdio)) {
+ atomic_inc(&adapter->stat_miss); /* undo add -1 */
if (atomic_read(&adapter->stat_miss) >=
adapter->stat_read_buf_num) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
return 1;
}
break;
- } else
- atomic_dec(&adapter->stat_miss);
+ }
return 0;
}
@@ -407,7 +403,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
goto failed;
/* report size limit per scatter-gather segment */
- adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
@@ -542,45 +537,3 @@ err_out:
zfcp_ccw_adapter_put(adapter);
return ERR_PTR(retval);
}
-
-/**
- * zfcp_sg_free_table - free memory used by scatterlists
- * @sg: pointer to scatterlist
- * @count: number of scatterlist which are to be free'ed
- * the scatterlist are expected to reference pages always
- */
-void zfcp_sg_free_table(struct scatterlist *sg, int count)
-{
- int i;
-
- for (i = 0; i < count; i++, sg++)
- if (sg)
- free_page((unsigned long) sg_virt(sg));
- else
- break;
-}
-
-/**
- * zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
- * @sg: pointer to struct scatterlist
- * @count: number of scatterlists which should be assigned with buffers
- * of size page
- *
- * Returns: 0 on success, -ENOMEM otherwise
- */
-int zfcp_sg_setup_table(struct scatterlist *sg, int count)
-{
- void *addr;
- int i;
-
- sg_init_table(sg, count);
- for (i = 0; i < count; i++, sg++) {
- addr = (void *) get_zeroed_page(GFP_KERNEL);
- if (!addr) {
- zfcp_sg_free_table(sg, i);
- return -ENOMEM;
- }
- sg_set_buf(sg, addr, PAGE_SIZE);
- }
- return 0;
-}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 3b368fcf13f4..dccdb41bed8c 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -63,7 +63,8 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
/**
* zfcp_dbf_hba_fsf_res - trace event for fsf responses
- * @tag: tag indicating which kind of unsolicited status has been received
+ * @tag: tag indicating which kind of FSF response has been received
+ * @level: trace level to be used for event
* @req: request for which a response was received
*/
void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
@@ -81,8 +82,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
rec->id = ZFCP_DBF_HBA_RES;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
- rec->fsf_cmd = req->fsf_command;
- rec->fsf_seq_no = req->seq_no;
+ rec->fsf_cmd = q_head->fsf_command;
+ rec->fsf_seq_no = q_pref->req_seq_no;
rec->u.res.req_issued = req->issued;
rec->u.res.prot_status = q_pref->prot_status;
rec->u.res.fsf_status = q_head->fsf_status;
@@ -94,7 +95,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
FSF_STATUS_QUALIFIER_SIZE);
- if (req->fsf_command != FSF_QTCB_FCP_CMND) {
+ if (q_head->fsf_command != FSF_QTCB_FCP_CMND) {
rec->pl_len = q_head->log_length;
zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
rec->pl_len, "fsf_res", req->req_id);
@@ -127,7 +128,7 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
rec->id = ZFCP_DBF_HBA_USS;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
- rec->fsf_cmd = req->fsf_command;
+ rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
if (!srb)
goto log;
@@ -153,7 +154,7 @@ log:
/**
* zfcp_dbf_hba_bit_err - trace event for bit error conditions
- * @tag: tag indicating which kind of unsolicited status has been received
+ * @tag: tag indicating which kind of bit error unsolicited status was received
* @req: request which caused the bit_error condition
*/
void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
@@ -174,7 +175,7 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
rec->id = ZFCP_DBF_HBA_BIT;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
- rec->fsf_cmd = req->fsf_command;
+ rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
memcpy(&rec->u.be, &sr_buf->payload.bit_error,
sizeof(struct fsf_bit_error_payload));
@@ -224,6 +225,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
/**
* zfcp_dbf_hba_basic - trace event for basic adapter events
+ * @tag: identifier for event
* @adapter: pointer to struct zfcp_adapter
*/
void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
@@ -357,7 +359,7 @@ void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
rec->u.run.fsf_req_id = erp->fsf_req_id;
rec->u.run.rec_status = erp->status;
rec->u.run.rec_step = erp->step;
- rec->u.run.rec_action = erp->action;
+ rec->u.run.rec_action = erp->type;
if (erp->sdev)
rec->u.run.rec_count =
@@ -478,7 +480,8 @@ out:
/**
* zfcp_dbf_san_req - trace event for issued SAN request
* @tag: identifier for event
- * @fsf_req: request containing issued CT data
+ * @fsf: request containing issued CT or ELS data
+ * @d_id: N_Port_ID where SAN request is sent to
* d_id: destination ID
*/
void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
@@ -560,7 +563,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
/**
* zfcp_dbf_san_res - trace event for received SAN request
* @tag: identifier for event
- * @fsf_req: request containing issued CT data
+ * @fsf: request containing received CT or ELS data
*/
void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
{
@@ -580,7 +583,7 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
/**
* zfcp_dbf_san_in_els - trace event for incoming ELS
* @tag: identifier for event
- * @fsf_req: request containing issued CT data
+ * @fsf: request containing received ELS data
*/
void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
{
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index d116c07ed77a..900c779cc39b 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -42,7 +42,8 @@ struct zfcp_dbf_rec_trigger {
* @fsf_req_id: request id for fsf requests
* @rec_status: status of the fsf request
* @rec_step: current step of the recovery action
- * rec_count: recovery counter
+ * @rec_action: ERP action type
+ * @rec_count: recoveries including retries for particular @rec_action
*/
struct zfcp_dbf_rec_running {
u64 fsf_req_id;
@@ -72,6 +73,7 @@ enum zfcp_dbf_rec_id {
* @adapter_status: current status of the adapter
* @port_status: current status of the port
* @lun_status: current status of the lun
+ * @u: record type specific data
* @u.trig: structure zfcp_dbf_rec_trigger
* @u.run: structure zfcp_dbf_rec_running
*/
@@ -126,6 +128,8 @@ struct zfcp_dbf_san {
* @prot_status_qual: protocol status qualifier
* @fsf_status: fsf status
* @fsf_status_qual: fsf status qualifier
+ * @port_handle: handle for port
+ * @lun_handle: handle for LUN
*/
struct zfcp_dbf_hba_res {
u64 req_issued;
@@ -158,6 +162,7 @@ struct zfcp_dbf_hba_uss {
* @ZFCP_DBF_HBA_RES: response trace record
* @ZFCP_DBF_HBA_USS: unsolicited status trace record
* @ZFCP_DBF_HBA_BIT: bit error trace record
+ * @ZFCP_DBF_HBA_BASIC: basic adapter event, only trace tag, no other data
*/
enum zfcp_dbf_hba_id {
ZFCP_DBF_HBA_RES = 1,
@@ -176,6 +181,9 @@ enum zfcp_dbf_hba_id {
* @fsf_seq_no: fsf sequence number
* @pl_len: length of payload stored as zfcp_dbf_pay
* @u: record type specific data
+ * @u.res: data for fsf responses
+ * @u.uss: data for unsolicited status buffer
+ * @u.be: data for bit error unsolicited status buffer
*/
struct zfcp_dbf_hba {
u8 id;
@@ -339,8 +347,8 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
zfcp_dbf_hba_fsf_resp_suppress(req)
? 5 : 1, req);
- } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
- (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
+ } else if ((qtcb->header.fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
+ (qtcb->header.fsf_command == FSF_QTCB_OPEN_LUN)) {
zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
} else if (qtcb->header.log_length) {
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 3396a47721a7..87d2f47a6990 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -4,7 +4,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2017
*/
#ifndef ZFCP_DEF_H
@@ -41,24 +41,16 @@
#include "zfcp_fc.h"
#include "zfcp_qdio.h"
-struct zfcp_reqlist;
-
-/********************* SCSI SPECIFIC DEFINES *********************************/
-#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
-
/********************* FSF SPECIFIC DEFINES *********************************/
/* ATTENTION: value must not be used by hardware */
#define FSF_QTCB_UNSOLICITED_STATUS 0x6305
-/* timeout value for "default timer" for fsf requests */
-#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
-
/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
/*
- * Note, the leftmost status byte is common among adapter, port
- * and unit
+ * Note, the leftmost 12 status bits (3 nibbles) are common among adapter, port
+ * and unit. This is a mask for bitwise 'and' with status values.
*/
#define ZFCP_COMMON_FLAGS 0xfff00000
@@ -97,49 +89,60 @@ struct zfcp_reqlist;
/************************* STRUCTURE DEFINITIONS *****************************/
-struct zfcp_fsf_req;
+/**
+ * enum zfcp_erp_act_type - Type of ERP action object.
+ * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
+ *
+ * Values must fit into u8 because of code dependencies:
+ * zfcp_dbf_rec_trig(), &zfcp_dbf_rec_trigger.want, &zfcp_dbf_rec_trigger.need;
+ * zfcp_dbf_rec_run_lvl(), zfcp_dbf_rec_run(), &zfcp_dbf_rec_running.rec_action.
+ */
+enum zfcp_erp_act_type {
+ ZFCP_ERP_ACTION_REOPEN_LUN = 1,
+ ZFCP_ERP_ACTION_REOPEN_PORT = 2,
+ ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
+ ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
+};
-/* holds various memory pools of an adapter */
-struct zfcp_adapter_mempool {
- mempool_t *erp_req;
- mempool_t *gid_pn_req;
- mempool_t *scsi_req;
- mempool_t *scsi_abort;
- mempool_t *status_read_req;
- mempool_t *sr_data;
- mempool_t *gid_pn;
- mempool_t *qtcb_pool;
+/*
+ * Values must fit into u16 because of code dependencies:
+ * zfcp_dbf_rec_run_lvl(), zfcp_dbf_rec_run(), zfcp_dbf_rec_run_wka(),
+ * &zfcp_dbf_rec_running.rec_step.
+ */
+enum zfcp_erp_steps {
+ ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
+ ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
+ ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
+ ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
+ ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
+ ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
};
struct zfcp_erp_action {
struct list_head list;
- int action; /* requested action code */
+ enum zfcp_erp_act_type type; /* requested action code */
struct zfcp_adapter *adapter; /* device which should be recovered */
struct zfcp_port *port;
struct scsi_device *sdev;
u32 status; /* recovery status */
- u32 step; /* active step of this erp action */
+ enum zfcp_erp_steps step; /* active step of this erp action */
unsigned long fsf_req_id;
struct timer_list timer;
};
-struct fsf_latency_record {
- u32 min;
- u32 max;
- u64 sum;
-};
-
-struct latency_cont {
- struct fsf_latency_record channel;
- struct fsf_latency_record fabric;
- u64 counter;
-};
-
-struct zfcp_latencies {
- struct latency_cont read;
- struct latency_cont write;
- struct latency_cont cmd;
- spinlock_t lock;
+/* holds various memory pools of an adapter */
+struct zfcp_adapter_mempool {
+ mempool_t *erp_req;
+ mempool_t *gid_pn_req;
+ mempool_t *scsi_req;
+ mempool_t *scsi_abort;
+ mempool_t *status_read_req;
+ mempool_t *sr_data;
+ mempool_t *gid_pn;
+ mempool_t *qtcb_pool;
};
struct zfcp_adapter {
@@ -220,6 +223,25 @@ struct zfcp_port {
unsigned int starget_id;
};
+struct zfcp_latency_record {
+ u32 min;
+ u32 max;
+ u64 sum;
+};
+
+struct zfcp_latency_cont {
+ struct zfcp_latency_record channel;
+ struct zfcp_latency_record fabric;
+ u64 counter;
+};
+
+struct zfcp_latencies {
+ struct zfcp_latency_cont read;
+ struct zfcp_latency_cont write;
+ struct zfcp_latency_cont cmd;
+ spinlock_t lock;
+};
+
/**
* struct zfcp_unit - LUN configured via zfcp sysfs
* @dev: struct device for sysfs representation and reference counting
@@ -287,9 +309,7 @@ static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
* @qdio_req: qdio queue related values
* @completion: used to signal the completion of the request
* @status: status of the request
- * @fsf_command: FSF command issued
* @qtcb: associated QTCB
- * @seq_no: sequence number of this request
* @data: private data
* @timer: timer data of this request
* @erp_action: reference to erp action if request issued on behalf of ERP
@@ -304,9 +324,7 @@ struct zfcp_fsf_req {
struct zfcp_qdio_req qdio_req;
struct completion completion;
u32 status;
- u32 fsf_command;
struct fsf_qtcb *qtcb;
- u32 seq_no;
void *data;
struct timer_list timer;
struct zfcp_erp_action *erp_action;
@@ -321,4 +339,9 @@ int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter)
return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
}
+static inline bool zfcp_fsf_req_is_status_read_buffer(struct zfcp_fsf_req *req)
+{
+ return req->qtcb == NULL;
+}
+
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e7e6b63905e2..e8fc28dba8df 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -4,7 +4,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -24,38 +24,18 @@ enum zfcp_erp_act_flags {
ZFCP_STATUS_ERP_NO_REF = 0x00800000,
};
-enum zfcp_erp_steps {
- ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
- ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
- ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
- ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
- ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
- ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
-};
-
-/**
- * enum zfcp_erp_act_type - Type of ERP action object.
- * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
- * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
- * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
- * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
- * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
- * either of the first four enum values.
- * Used to indicate that an ERP action could not be
- * set up despite a detected need for some recovery.
- * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
- * either of the first four enum values.
- * Used to indicate that ERP not needed because
- * the object has ZFCP_STATUS_COMMON_ERP_FAILED.
+/*
+ * Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
+ * Used to indicate that an ERP action could not be set up despite a detected
+ * need for some recovery.
*/
-enum zfcp_erp_act_type {
- ZFCP_ERP_ACTION_REOPEN_LUN = 1,
- ZFCP_ERP_ACTION_REOPEN_PORT = 2,
- ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
- ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
- ZFCP_ERP_ACTION_NONE = 0xc0,
- ZFCP_ERP_ACTION_FAILED = 0xe0,
-};
+#define ZFCP_ERP_ACTION_NONE 0xc0
+/*
+ * Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
+ * Used to indicate that ERP not needed because the object has
+ * ZFCP_STATUS_COMMON_ERP_FAILED.
+ */
+#define ZFCP_ERP_ACTION_FAILED 0xe0
enum zfcp_erp_act_result {
ZFCP_ERP_SUCCEEDED = 0,
@@ -136,11 +116,11 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
}
}
-static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
- struct zfcp_port *port,
- struct scsi_device *sdev)
+static enum zfcp_erp_act_type zfcp_erp_handle_failed(
+ enum zfcp_erp_act_type want, struct zfcp_adapter *adapter,
+ struct zfcp_port *port, struct scsi_device *sdev)
{
- int need = want;
+ enum zfcp_erp_act_type need = want;
struct zfcp_scsi_dev *zsdev;
switch (want) {
@@ -171,19 +151,17 @@ static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
- default:
- need = 0;
- break;
}
return need;
}
-static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
+static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
+ struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
{
- int need = want;
+ enum zfcp_erp_act_type need = want;
int l_status, p_status, a_status;
struct zfcp_scsi_dev *zfcp_sdev;
@@ -230,7 +208,8 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
return need;
}
-static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
+static struct zfcp_erp_action *zfcp_erp_setup_act(enum zfcp_erp_act_type need,
+ u32 act_status,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
@@ -278,9 +257,6 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
-
- default:
- return NULL;
}
WARN_ON_ONCE(erp_action->adapter != adapter);
@@ -288,18 +264,19 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
memset(&erp_action->timer, 0, sizeof(erp_action->timer));
erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
erp_action->fsf_req_id = 0;
- erp_action->action = need;
+ erp_action->type = need;
erp_action->status = act_status;
return erp_action;
}
-static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
+static void zfcp_erp_action_enqueue(enum zfcp_erp_act_type want,
+ struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev,
- char *id, u32 act_status)
+ char *dbftag, u32 act_status)
{
- int need;
+ enum zfcp_erp_act_type need;
struct zfcp_erp_action *act;
need = zfcp_erp_handle_failed(want, adapter, port, sdev);
@@ -327,10 +304,11 @@ static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
list_add_tail(&act->list, &adapter->erp_ready_head);
wake_up(&adapter->erp_ready_wq);
out:
- zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
+ zfcp_dbf_rec_trig(dbftag, adapter, port, sdev, want, need);
}
-void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
+void zfcp_erp_port_forced_no_port_dbf(char *dbftag,
+ struct zfcp_adapter *adapter,
u64 port_name, u32 port_id)
{
unsigned long flags;
@@ -344,29 +322,30 @@ void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
atomic_set(&tmpport.status, -1); /* unknown */
tmpport.wwpn = port_name;
tmpport.d_id = port_id;
- zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
+ zfcp_dbf_rec_trig(dbftag, adapter, &tmpport, NULL,
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
ZFCP_ERP_ACTION_NONE);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
- int clear_mask, char *id)
+ int clear_mask, char *dbftag)
{
zfcp_erp_adapter_block(adapter, clear_mask);
zfcp_scsi_schedule_rports_block(adapter);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
- adapter, NULL, NULL, id, 0);
+ adapter, NULL, NULL, dbftag, 0);
}
/**
* zfcp_erp_adapter_reopen - Reopen adapter.
* @adapter: Adapter to reopen.
* @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*/
-void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
+void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
+ char *dbftag)
{
unsigned long flags;
@@ -375,7 +354,7 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
write_lock_irqsave(&adapter->erp_lock, flags);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
- NULL, NULL, id, 0);
+ NULL, NULL, dbftag, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
@@ -383,25 +362,25 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
* zfcp_erp_adapter_shutdown - Shutdown adapter.
* @adapter: Adapter to shut down.
* @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*/
void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
- char *id)
+ char *dbftag)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
- zfcp_erp_adapter_reopen(adapter, clear | flags, id);
+ zfcp_erp_adapter_reopen(adapter, clear | flags, dbftag);
}
/**
* zfcp_erp_port_shutdown - Shutdown port
* @port: Port to shut down.
* @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*/
-void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
+void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *dbftag)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
- zfcp_erp_port_reopen(port, clear | flags, id);
+ zfcp_erp_port_reopen(port, clear | flags, dbftag);
}
static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
@@ -411,53 +390,55 @@ static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
}
static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
- char *id)
+ char *dbftag)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
- port->adapter, port, NULL, id, 0);
+ port->adapter, port, NULL, dbftag, 0);
}
/**
* zfcp_erp_port_forced_reopen - Forced close of port and open again
* @port: Port to force close and to reopen.
* @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*/
-void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
+void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
+ char *dbftag)
{
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
- _zfcp_erp_port_forced_reopen(port, clear, id);
+ _zfcp_erp_port_forced_reopen(port, clear, dbftag);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
-static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
+ char *dbftag)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
- port->adapter, port, NULL, id, 0);
+ port->adapter, port, NULL, dbftag, 0);
}
/**
* zfcp_erp_port_reopen - trigger remote port recovery
* @port: port to recover
- * @clear_mask: flags in port status to be cleared
- * @id: Id for debug trace event.
+ * @clear: flags in port status to be cleared
+ * @dbftag: Tag for debug trace event.
*/
-void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *dbftag)
{
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
- _zfcp_erp_port_reopen(port, clear, id);
+ _zfcp_erp_port_reopen(port, clear, dbftag);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
@@ -467,8 +448,8 @@ static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
}
-static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
- u32 act_status)
+static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear,
+ char *dbftag, u32 act_status)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
@@ -476,18 +457,18 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
zfcp_erp_lun_block(sdev, clear);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
- zfcp_sdev->port, sdev, id, act_status);
+ zfcp_sdev->port, sdev, dbftag, act_status);
}
/**
* zfcp_erp_lun_reopen - initiate reopen of a LUN
* @sdev: SCSI device / LUN to be reopened
- * @clear_mask: specifies flags in LUN status to be cleared
- * @id: Id for debug trace event.
+ * @clear: specifies flags in LUN status to be cleared
+ * @dbftag: Tag for debug trace event.
*
* Return: 0 on success, < 0 on error
*/
-void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
+void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *dbftag)
{
unsigned long flags;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -495,7 +476,7 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
- _zfcp_erp_lun_reopen(sdev, clear, id, 0);
+ _zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
@@ -503,25 +484,25 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
* zfcp_erp_lun_shutdown - Shutdown LUN
* @sdev: SCSI device / LUN to shut down.
* @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*/
-void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
+void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *dbftag)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
- zfcp_erp_lun_reopen(sdev, clear | flags, id);
+ zfcp_erp_lun_reopen(sdev, clear | flags, dbftag);
}
/**
* zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
* @sdev: SCSI device / LUN to shut down.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*
* Do not acquire a reference for the LUN when creating the ERP
* action. It is safe, because this function waits for the ERP to
* complete first. This allows to shutdown the LUN, even when the SCSI
* device is in the state SDEV_DEL when scsi_device_get will fail.
*/
-void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
+void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *dbftag)
{
unsigned long flags;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -530,7 +511,7 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
write_lock_irqsave(&adapter->erp_lock, flags);
- _zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
+ _zfcp_erp_lun_reopen(sdev, clear, dbftag, ZFCP_STATUS_ERP_NO_REF);
write_unlock_irqrestore(&adapter->erp_lock, flags);
zfcp_erp_wait(adapter);
@@ -619,7 +600,7 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
/**
* zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
- * @data: ERP action (from timer data)
+ * @t: timer list entry embedded in zfcp FSF request
*/
void zfcp_erp_timeout_handler(struct timer_list *t)
{
@@ -643,32 +624,46 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
add_timer(&erp_action->timer);
}
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag)
+{
+ unsigned long flags;
+ struct zfcp_port *port;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ read_lock(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
+ _zfcp_erp_port_forced_reopen(port, clear, dbftag);
+ read_unlock(&adapter->port_list_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
- int clear, char *id)
+ int clear, char *dbftag)
{
struct zfcp_port *port;
read_lock(&adapter->port_list_lock);
list_for_each_entry(port, &adapter->port_list, list)
- _zfcp_erp_port_reopen(port, clear, id);
+ _zfcp_erp_port_reopen(port, clear, dbftag);
read_unlock(&adapter->port_list_lock);
}
static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
- char *id)
+ char *dbftag)
{
struct scsi_device *sdev;
spin_lock(port->adapter->scsi_host->host_lock);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
- _zfcp_erp_lun_reopen(sdev, clear, id, 0);
+ _zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
spin_unlock(port->adapter->scsi_host->host_lock);
}
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
{
- switch (act->action) {
+ switch (act->type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
break;
@@ -686,7 +681,7 @@ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
{
- switch (act->action) {
+ switch (act->type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
break;
@@ -696,6 +691,9 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
case ZFCP_ERP_ACTION_REOPEN_PORT:
_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
break;
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ /* NOP */
+ break;
}
}
@@ -723,7 +721,8 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
_zfcp_erp_port_reopen(port, 0, "ereptp1");
}
-static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
+ struct zfcp_erp_action *erp_action)
{
int retries;
int sleep = 1;
@@ -768,7 +767,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_SUCCEEDED;
}
-static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
+ struct zfcp_erp_action *act)
{
int ret;
struct zfcp_adapter *adapter = act->adapter;
@@ -793,7 +793,8 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
return ZFCP_ERP_SUCCEEDED;
}
-static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
+ struct zfcp_erp_action *act)
{
if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
@@ -832,7 +833,8 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
}
-static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open(
+ struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
@@ -853,7 +855,8 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
return ZFCP_ERP_SUCCEEDED;
}
-static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy(
+ struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
@@ -871,7 +874,8 @@ static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
return ZFCP_ERP_SUCCEEDED;
}
-static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy_close(
+ struct zfcp_erp_action *act)
{
int retval;
@@ -885,7 +889,8 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
return ZFCP_ERP_CONTINUES;
}
-static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy(
+ struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
int status = atomic_read(&port->status);
@@ -901,11 +906,19 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
return ZFCP_ERP_SUCCEEDED;
+ break;
+ case ZFCP_ERP_STEP_PORT_CLOSING:
+ case ZFCP_ERP_STEP_PORT_OPENING:
+ case ZFCP_ERP_STEP_LUN_CLOSING:
+ case ZFCP_ERP_STEP_LUN_OPENING:
+ /* NOP */
+ break;
}
return ZFCP_ERP_FAILED;
}
-static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy_close(
+ struct zfcp_erp_action *erp_action)
{
int retval;
@@ -918,7 +931,8 @@ static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_CONTINUES;
}
-static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_port(
+ struct zfcp_erp_action *erp_action)
{
int retval;
@@ -944,7 +958,8 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
return zfcp_erp_port_strategy_open_port(act);
}
-static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_common(
+ struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
@@ -975,12 +990,18 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
port->d_id = 0;
return ZFCP_ERP_FAILED;
}
- /* fall through otherwise */
+ /* no early return otherwise, continue after switch case */
+ break;
+ case ZFCP_ERP_STEP_LUN_CLOSING:
+ case ZFCP_ERP_STEP_LUN_OPENING:
+ /* NOP */
+ break;
}
return ZFCP_ERP_FAILED;
}
-static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy(
+ struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
int p_status = atomic_read(&port->status);
@@ -999,6 +1020,12 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
if (p_status & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
break;
+ case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+ case ZFCP_ERP_STEP_PORT_OPENING:
+ case ZFCP_ERP_STEP_LUN_CLOSING:
+ case ZFCP_ERP_STEP_LUN_OPENING:
+ /* NOP */
+ break;
}
close_init_done:
@@ -1016,7 +1043,8 @@ static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
&zfcp_sdev->status);
}
-static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_lun_strategy_close(
+ struct zfcp_erp_action *erp_action)
{
int retval = zfcp_fsf_close_lun(erp_action);
if (retval == -ENOMEM)
@@ -1027,7 +1055,8 @@ static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_CONTINUES;
}
-static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_lun_strategy_open(
+ struct zfcp_erp_action *erp_action)
{
int retval = zfcp_fsf_open_lun(erp_action);
if (retval == -ENOMEM)
@@ -1038,7 +1067,8 @@ static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_CONTINUES;
}
-static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_lun_strategy(
+ struct zfcp_erp_action *erp_action)
{
struct scsi_device *sdev = erp_action->sdev;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -1048,7 +1078,8 @@ static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
zfcp_erp_lun_strategy_clearstati(sdev);
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return zfcp_erp_lun_strategy_close(erp_action);
- /* already closed, fall through */
+ /* already closed */
+ /* fall through */
case ZFCP_ERP_STEP_LUN_CLOSING:
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
@@ -1059,11 +1090,18 @@ static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
case ZFCP_ERP_STEP_LUN_OPENING:
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_SUCCEEDED;
+ break;
+ case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+ case ZFCP_ERP_STEP_PORT_CLOSING:
+ case ZFCP_ERP_STEP_PORT_OPENING:
+ /* NOP */
+ break;
}
return ZFCP_ERP_FAILED;
}
-static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_lun(
+ struct scsi_device *sdev, enum zfcp_erp_act_result result)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -1084,6 +1122,12 @@ static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
+ case ZFCP_ERP_CONTINUES:
+ case ZFCP_ERP_EXIT:
+ case ZFCP_ERP_DISMISSED:
+ case ZFCP_ERP_NOMEM:
+ /* NOP */
+ break;
}
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1093,7 +1137,8 @@ static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
return result;
}
-static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_port(
+ struct zfcp_port *port, enum zfcp_erp_act_result result)
{
switch (result) {
case ZFCP_ERP_SUCCEEDED :
@@ -1115,6 +1160,12 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
+ case ZFCP_ERP_CONTINUES:
+ case ZFCP_ERP_EXIT:
+ case ZFCP_ERP_DISMISSED:
+ case ZFCP_ERP_NOMEM:
+ /* NOP */
+ break;
}
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1124,8 +1175,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
return result;
}
-static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
- int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_adapter(
+ struct zfcp_adapter *adapter, enum zfcp_erp_act_result result)
{
switch (result) {
case ZFCP_ERP_SUCCEEDED :
@@ -1143,6 +1194,12 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
+ case ZFCP_ERP_CONTINUES:
+ case ZFCP_ERP_EXIT:
+ case ZFCP_ERP_DISMISSED:
+ case ZFCP_ERP_NOMEM:
+ /* NOP */
+ break;
}
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1152,14 +1209,14 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
return result;
}
-static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
- int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_target(
+ struct zfcp_erp_action *erp_action, enum zfcp_erp_act_result result)
{
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_port *port = erp_action->port;
struct scsi_device *sdev = erp_action->sdev;
- switch (erp_action->action) {
+ switch (erp_action->type) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
result = zfcp_erp_strategy_check_lun(sdev, result);
@@ -1192,16 +1249,17 @@ static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status)
return 0;
}
-static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
+static enum zfcp_erp_act_result zfcp_erp_strategy_statechange(
+ struct zfcp_erp_action *act, enum zfcp_erp_act_result result)
{
- int action = act->action;
+ enum zfcp_erp_act_type type = act->type;
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
struct scsi_device *sdev = act->sdev;
struct zfcp_scsi_dev *zfcp_sdev;
u32 erp_status = act->status;
- switch (action) {
+ switch (type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
_zfcp_erp_adapter_reopen(adapter,
@@ -1231,7 +1289,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
}
break;
}
- return ret;
+ return result;
}
static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
@@ -1248,7 +1306,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
list_del(&erp_action->list);
zfcp_dbf_rec_run("eractd1", erp_action);
- switch (erp_action->action) {
+ switch (erp_action->type) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
@@ -1297,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
int lun_status;
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL)
+ continue;
if (zsdev->port != port)
continue;
/* LUN under port of interest */
@@ -1324,13 +1385,14 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
-static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
+static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act,
+ enum zfcp_erp_act_result result)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
struct scsi_device *sdev = act->sdev;
- switch (act->action) {
+ switch (act->type) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
scsi_device_put(sdev);
@@ -1364,9 +1426,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
}
}
-static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_strategy_do_action(
+ struct zfcp_erp_action *erp_action)
{
- switch (erp_action->action) {
+ switch (erp_action->type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
return zfcp_erp_adapter_strategy(erp_action);
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1379,9 +1442,10 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_FAILED;
}
-static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_strategy(
+ struct zfcp_erp_action *erp_action)
{
- int retval;
+ enum zfcp_erp_act_result result;
unsigned long flags;
struct zfcp_adapter *adapter = erp_action->adapter;
@@ -1392,12 +1456,12 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
zfcp_erp_action_dequeue(erp_action);
- retval = ZFCP_ERP_DISMISSED;
+ result = ZFCP_ERP_DISMISSED;
goto unlock;
}
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
- retval = ZFCP_ERP_FAILED;
+ result = ZFCP_ERP_FAILED;
goto check_target;
}
@@ -1405,13 +1469,13 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
/* no lock to allow for blocking operations */
write_unlock_irqrestore(&adapter->erp_lock, flags);
- retval = zfcp_erp_strategy_do_action(erp_action);
+ result = zfcp_erp_strategy_do_action(erp_action);
write_lock_irqsave(&adapter->erp_lock, flags);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
- retval = ZFCP_ERP_CONTINUES;
+ result = ZFCP_ERP_CONTINUES;
- switch (retval) {
+ switch (result) {
case ZFCP_ERP_NOMEM:
if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
++adapter->erp_low_mem_count;
@@ -1421,7 +1485,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
else {
zfcp_erp_strategy_memwait(erp_action);
- retval = ZFCP_ERP_CONTINUES;
+ result = ZFCP_ERP_CONTINUES;
}
goto unlock;
@@ -1431,27 +1495,33 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
}
goto unlock;
+ case ZFCP_ERP_SUCCEEDED:
+ case ZFCP_ERP_FAILED:
+ case ZFCP_ERP_EXIT:
+ case ZFCP_ERP_DISMISSED:
+ /* NOP */
+ break;
}
check_target:
- retval = zfcp_erp_strategy_check_target(erp_action, retval);
+ result = zfcp_erp_strategy_check_target(erp_action, result);
zfcp_erp_action_dequeue(erp_action);
- retval = zfcp_erp_strategy_statechange(erp_action, retval);
- if (retval == ZFCP_ERP_EXIT)
+ result = zfcp_erp_strategy_statechange(erp_action, result);
+ if (result == ZFCP_ERP_EXIT)
goto unlock;
- if (retval == ZFCP_ERP_SUCCEEDED)
+ if (result == ZFCP_ERP_SUCCEEDED)
zfcp_erp_strategy_followup_success(erp_action);
- if (retval == ZFCP_ERP_FAILED)
+ if (result == ZFCP_ERP_FAILED)
zfcp_erp_strategy_followup_failed(erp_action);
unlock:
write_unlock_irqrestore(&adapter->erp_lock, flags);
- if (retval != ZFCP_ERP_CONTINUES)
- zfcp_erp_action_cleanup(erp_action, retval);
+ if (result != ZFCP_ERP_CONTINUES)
+ zfcp_erp_action_cleanup(erp_action, result);
kref_put(&adapter->ref, zfcp_adapter_release);
- return retval;
+ return result;
}
static int zfcp_erp_thread(void *data)
@@ -1489,7 +1559,7 @@ static int zfcp_erp_thread(void *data)
* zfcp_erp_thread_setup - Start ERP thread for adapter
* @adapter: Adapter to start the ERP thread for
*
- * Returns 0 on success or error code from kernel_thread()
+ * Return: 0 on success, or error code from kthread_run().
*/
int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
{
@@ -1694,11 +1764,11 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
/**
* zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait.
* @adapter: Pointer to zfcp_adapter to reopen.
- * @id: Trace tag string of length %ZFCP_DBF_TAG_LEN.
+ * @dbftag: Trace tag string of length %ZFCP_DBF_TAG_LEN.
*/
-void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id)
+void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *dbftag)
{
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
- zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, id);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
zfcp_erp_wait(adapter);
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index bd0c5a9f04cb..c6acca521ffe 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -59,16 +59,19 @@ extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
/* zfcp_erp.c */
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
-extern void zfcp_erp_port_forced_no_port_dbf(char *id,
+extern void zfcp_erp_port_forced_no_port_dbf(char *dbftag,
struct zfcp_adapter *adapter,
u64 port_name, u32 port_id);
extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
-extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id);
+extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
+ char *dbftag);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag);
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
@@ -79,7 +82,8 @@ extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
extern void zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_timeout_handler(struct timer_list *t);
-extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id);
+extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter,
+ char *dbftag);
/* zfcp_fc.c */
extern struct kmem_cache *zfcp_fc_req_cache;
@@ -144,6 +148,7 @@ extern void zfcp_qdio_close(struct zfcp_qdio *);
extern void zfcp_qdio_siosl(struct zfcp_adapter *);
/* zfcp_scsi.c */
+extern bool zfcp_experimental_dix;
extern struct scsi_transport_template *zfcp_scsi_transport_template;
extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index f6c415d6ef48..33eddb02ee30 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
list_for_each_entry(port, &adapter->port_list, list) {
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
zfcp_fc_test_link(port);
- if (!port->d_id)
- zfcp_erp_port_reopen(port,
- ZFCP_STATUS_COMMON_ERP_FAILED,
- "fcrscn1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
struct fc_els_rscn *head;
struct fc_els_rscn_page *page;
u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
no_entries = be16_to_cpu(head->rscn_plen) /
sizeof(struct fc_els_rscn_page);
+ if (no_entries > 1) {
+ /* handle failed ports */
+ unsigned long flags;
+ struct zfcp_port *port;
+
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list) {
+ if (port->d_id)
+ continue;
+ zfcp_erp_port_reopen(port,
+ ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fcrscn1");
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ }
+
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
page++;
@@ -312,7 +325,7 @@ static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
/**
* zfcp_fc_incoming_els - handle incoming ELS
- * @fsf_req - request which contains incoming ELS
+ * @fsf_req: request which contains incoming ELS
*/
void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
{
@@ -597,6 +610,48 @@ void zfcp_fc_test_link(struct zfcp_port *port)
put_device(&port->dev);
}
+/**
+ * zfcp_fc_sg_free_table - free memory used by scatterlists
+ * @sg: pointer to scatterlist
+ * @count: number of scatterlist which are to be free'ed
+ * the scatterlist are expected to reference pages always
+ */
+static void zfcp_fc_sg_free_table(struct scatterlist *sg, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++, sg++)
+ if (sg)
+ free_page((unsigned long) sg_virt(sg));
+ else
+ break;
+}
+
+/**
+ * zfcp_fc_sg_setup_table - init scatterlist and allocate, assign buffers
+ * @sg: pointer to struct scatterlist
+ * @count: number of scatterlists which should be assigned with buffers
+ * of size page
+ *
+ * Returns: 0 on success, -ENOMEM otherwise
+ */
+static int zfcp_fc_sg_setup_table(struct scatterlist *sg, int count)
+{
+ void *addr;
+ int i;
+
+ sg_init_table(sg, count);
+ for (i = 0; i < count; i++, sg++) {
+ addr = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!addr) {
+ zfcp_fc_sg_free_table(sg, i);
+ return -ENOMEM;
+ }
+ sg_set_buf(sg, addr, PAGE_SIZE);
+ }
+ return 0;
+}
+
static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
{
struct zfcp_fc_req *fc_req;
@@ -605,7 +660,7 @@ static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
if (!fc_req)
return NULL;
- if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
+ if (zfcp_fc_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
kmem_cache_free(zfcp_fc_req_cache, fc_req);
return NULL;
}
@@ -763,7 +818,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
break;
}
}
- zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
+ zfcp_fc_sg_free_table(&fc_req->sg_rsp, buf_num);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
out:
zfcp_fc_wka_port_put(&adapter->gs->ds);
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 3cd74729cfb9..6902ae1f8e4f 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -121,9 +121,24 @@ struct zfcp_fc_rspn_req {
/**
* struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
* @ct_els: data required for issuing fsf command
- * @sg_req: scatterlist entry for request data
- * @sg_rsp: scatterlist entry for response data
- * @u: request specific data
+ * @sg_req: scatterlist entry for request data, refers to embedded @u submember
+ * @sg_rsp: scatterlist entry for response data, refers to embedded @u submember
+ * @u: request and response specific data
+ * @u.adisc: ADISC specific data
+ * @u.adisc.req: ADISC request
+ * @u.adisc.rsp: ADISC response
+ * @u.gid_pn: GID_PN specific data
+ * @u.gid_pn.req: GID_PN request
+ * @u.gid_pn.rsp: GID_PN response
+ * @u.gpn_ft: GPN_FT specific data
+ * @u.gpn_ft.sg_rsp2: GPN_FT response, not embedded here, allocated elsewhere
+ * @u.gpn_ft.req: GPN_FT request
+ * @u.gspn: GSPN specific data
+ * @u.gspn.req: GSPN request
+ * @u.gspn.rsp: GSPN response
+ * @u.rspn: RSPN specific data
+ * @u.rspn.req: RSPN request
+ * @u.rspn.rsp: RSPN response
*/
struct zfcp_fc_req {
struct zfcp_fsf_ct_els ct_els;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 3c86e27f094d..d94496ee6883 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -19,6 +19,11 @@
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
+/* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
+#define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
+/* timeout for: exchange config/port data outside ERP, or open/close WKA port */
+#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
+
struct kmem_cache *zfcp_fsf_qtcb_cache;
static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
@@ -74,18 +79,18 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
/**
* zfcp_fsf_req_free - free memory used by fsf request
- * @fsf_req: pointer to struct zfcp_fsf_req
+ * @req: pointer to struct zfcp_fsf_req
*/
void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
{
if (likely(req->pool)) {
- if (likely(req->qtcb))
+ if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
mempool_free(req, req->pool);
return;
}
- if (likely(req->qtcb))
+ if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
kfree(req);
}
@@ -379,7 +384,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
/**
* zfcp_fsf_req_complete - process completion of a FSF request
- * @fsf_req: The FSF request that has been completed.
+ * @req: The FSF request that has been completed.
*
* When a request has been completed either from the FCP adapter,
* or it has been dismissed due to a queue shutdown, this function
@@ -388,7 +393,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
*/
static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
{
- if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
+ if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
zfcp_fsf_status_read_handler(req);
return;
}
@@ -705,7 +710,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
init_completion(&req->completion);
req->adapter = adapter;
- req->fsf_command = fsf_cmd;
req->req_id = adapter->req_no;
if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
@@ -720,14 +724,13 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
return ERR_PTR(-ENOMEM);
}
- req->seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_id = req->req_id;
req->qtcb->prefix.ulp_info = 26;
- req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
+ req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req->qtcb->header.req_handle = req->req_id;
- req->qtcb->header.fsf_command = req->fsf_command;
+ req->qtcb->header.fsf_command = fsf_cmd;
}
zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
@@ -740,7 +743,6 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
- int with_qtcb = (req->qtcb != NULL);
int req_id = req->req_id;
zfcp_reqlist_add(adapter->req_list, req);
@@ -756,7 +758,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
}
/* Don't increase for unsolicited status */
- if (with_qtcb)
+ if (!zfcp_fsf_req_is_status_read_buffer(req))
adapter->fsf_req_seq_no++;
adapter->req_no++;
@@ -765,8 +767,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
/**
* zfcp_fsf_status_read - send status read request
- * @adapter: pointer to struct zfcp_adapter
- * @req_flags: request flags
+ * @qdio: pointer to struct zfcp_qdio
* Returns: 0 on success, ERROR otherwise
*/
int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
@@ -912,7 +913,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
req->qtcb->header.port_handle = zfcp_sdev->port->handle;
req->qtcb->bottom.support.req_handle = (u64) old_req_id;
- zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
+ zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
goto out;
@@ -1057,8 +1058,10 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
/**
* zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
+ * @wka_port: pointer to zfcp WKA port to send CT/GS to
* @ct: pointer to struct zfcp_send_ct with data for request
* @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
+ * @timeout: timeout that hardware should use, and a later software timeout
*/
int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
struct zfcp_fsf_ct_els *ct, mempool_t *pool,
@@ -1151,7 +1154,10 @@ skip_fsfstatus:
/**
* zfcp_fsf_send_els - initiate an ELS command (FC-FS)
+ * @adapter: pointer to zfcp adapter
+ * @d_id: N_Port_ID to send ELS to
* @els: pointer to struct zfcp_send_els with data for the command
+ * @timeout: timeout that hardware should use, and a later software timeout
*/
int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
struct zfcp_fsf_ct_els *els, unsigned int timeout)
@@ -1809,7 +1815,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
case FSF_LUN_SHARING_VIOLATION:
if (qual->word[0])
dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
- "LUN 0x%Lx on port 0x%Lx is already in "
+ "LUN 0x%016Lx on port 0x%016Lx is already in "
"use by CSS%d, MIF Image ID %x\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn,
@@ -1986,7 +1992,7 @@ out:
return retval;
}
-static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
+static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat)
{
lat_rec->sum += lat;
lat_rec->min = min(lat_rec->min, lat);
@@ -1996,7 +2002,7 @@ static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
{
struct fsf_qual_latency_info *lat_in;
- struct latency_cont *lat = NULL;
+ struct zfcp_latency_cont *lat = NULL;
struct zfcp_scsi_dev *zfcp_sdev;
struct zfcp_blk_drv_data blktrc;
int ticks = req->adapter->timer_ticks;
@@ -2088,11 +2094,8 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
break;
case FSF_CMND_LENGTH_NOT_VALID:
dev_err(&req->adapter->ccw_device->dev,
- "Incorrect CDB length %d, LUN 0x%016Lx on "
- "port 0x%016Lx closed\n",
- req->qtcb->bottom.io.fcp_cmnd_length,
- (unsigned long long)zfcp_scsi_dev_lun(sdev),
- (unsigned long long)zfcp_sdev->port->wwpn);
+ "Incorrect FCP_CMND length %d, FCP device closed\n",
+ req->qtcb->bottom.io.fcp_cmnd_length);
zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -2369,7 +2372,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
- zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
+ zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
goto out;
@@ -2382,7 +2385,7 @@ out:
/**
* zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
- * @adapter: pointer to struct zfcp_adapter
+ * @qdio: pointer to struct zfcp_qdio
* @sbal_idx: response queue index of SBAL to be processed
*/
void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 535628b92f0a..2c658b66318c 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -438,8 +438,8 @@ struct zfcp_blk_drv_data {
/**
* struct zfcp_fsf_ct_els - zfcp data for ct or els request
- * @req: scatter-gather list for request
- * @resp: scatter-gather list for response
+ * @req: scatter-gather list for request, points to &zfcp_fc_req.sg_req or BSG
+ * @resp: scatter-gather list for response, points to &zfcp_fc_req.sg_rsp or BSG
* @handler: handler function (called for response to the request)
* @handler_data: data passed to handler function
* @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 4ab02e8d36f3..661436a92f8e 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -4,7 +4,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -19,7 +19,7 @@ static bool enable_multibuffer = true;
module_param_named(datarouter, enable_multibuffer, bool, 0400);
MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
-static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
+static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
unsigned int qdio_err)
{
struct zfcp_adapter *adapter = qdio->adapter;
@@ -28,12 +28,12 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
if (qdio_err & QDIO_ERROR_SLSB_STATE) {
zfcp_qdio_siosl(adapter);
- zfcp_erp_adapter_shutdown(adapter, 0, id);
+ zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
return;
}
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
- ZFCP_STATUS_COMMON_ERP_FAILED, id);
+ ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
}
static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
@@ -180,7 +180,6 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_qdio_req
* @sg: scatter-gather list
- * @max_sbals: upper bound for number of SBALs to be used
* Returns: zero or -EINVAL on error
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
@@ -295,15 +294,15 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
id->input_handler = zfcp_qdio_int_resp;
id->output_handler = zfcp_qdio_int_req;
id->int_parm = (unsigned long) qdio;
- id->input_sbal_addr_array = (void **) (qdio->res_q);
- id->output_sbal_addr_array = (void **) (qdio->req_q);
+ id->input_sbal_addr_array = qdio->res_q;
+ id->output_sbal_addr_array = qdio->req_q;
id->scan_threshold =
QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
}
/**
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
- * @adapter: pointer to struct zfcp_adapter
+ * @qdio: pointer to struct zfcp_qdio
* Returns: -ENOMEM on memory allocation error or return value from
* qdio_allocate
*/
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 886c662cc154..2a816a37b3c0 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -30,6 +30,8 @@
* @req_q_full: queue full incidents
* @req_q_wq: used to wait for SBAL availability
* @adapter: adapter used in conjunction with this qdio structure
+ * @max_sbale_per_sbal: qdio limit per sbal
+ * @max_sbale_per_req: qdio limit per request
*/
struct zfcp_qdio {
struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
@@ -70,7 +72,7 @@ struct zfcp_qdio_req {
/**
* zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
* @qdio: pointer to struct zfcp_qdio
- * @q_rec: pointer to struct zfcp_qdio_req
+ * @q_req: pointer to struct zfcp_qdio_req
* Returns: pointer to qdio_buffer_element (sbale) structure
*/
static inline struct qdio_buffer_element *
@@ -82,7 +84,7 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
/**
* zfcp_qdio_sbale_curr - return current sbale on req_q for a request
* @qdio: pointer to struct zfcp_qdio
- * @fsf_req: pointer to struct zfcp_fsf_req
+ * @q_req: pointer to struct zfcp_qdio_req
* Returns: pointer to qdio_buffer_element (sbale) structure
*/
static inline struct qdio_buffer_element *
@@ -135,6 +137,8 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
* zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_queue_req
+ * @data: pointer to data
+ * @len: length of data
*
* This is only required for single sbal requests, calling it when
* wrapping around to the next sbal is a bug.
@@ -182,6 +186,7 @@ int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
/**
* zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
+ * @qdio: pointer to struct zfcp_qdio
* @q_req: The current zfcp_qdio_req
*/
static inline
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index 59a943c0d51d..9b8ff249e31c 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -17,7 +17,7 @@
/**
* struct zfcp_reqlist - Container for request list (reqlist)
* @lock: Spinlock for protecting the hash list
- * @list: Array of hashbuckets, each is a list of requests in this bucket
+ * @buckets: Array of hashbuckets, each is a list of requests in this bucket
*/
struct zfcp_reqlist {
spinlock_t lock;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index a8efcb330bc1..221d0dfb8493 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -27,7 +27,11 @@ MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
static bool enable_dif;
module_param_named(dif, enable_dif, bool, 0400);
-MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
+MODULE_PARM_DESC(dif, "Enable DIF data integrity support (default off)");
+
+bool zfcp_experimental_dix;
+module_param_named(dix, zfcp_experimental_dix, bool, 0400);
+MODULE_PARM_DESC(dix, "Enable experimental DIX (data integrity extension) support which implies DIF support (default off)");
static bool allow_lun_scan = true;
module_param(allow_lun_scan, bool, 0600);
@@ -226,7 +230,9 @@ static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
(struct zfcp_scsi_req_filter *)data;
/* already aborted - prevent side-effects - or not a SCSI command */
- if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
+ if (old_req->data == NULL ||
+ zfcp_fsf_req_is_status_read_buffer(old_req) ||
+ old_req->qtcb->header.fsf_command != FSF_QTCB_FCP_CMND)
return;
/* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
@@ -362,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
int ret = SUCCESS, fc_ret;
+ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+ zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+ zfcp_erp_wait(adapter);
+ }
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
fc_ret = fc_block_scsi_eh(scpnt);
@@ -422,8 +432,9 @@ static struct scsi_host_template zfcp_scsi_host_template = {
.max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
* ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
/* GCD, adjusted later */
+ /* report size limit per scatter-gather segment */
+ .max_segment_size = ZFCP_QDIO_SBALE_LEN,
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
- .use_clustering = 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
.track_queue_depth = 1,
@@ -788,11 +799,11 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
data_div = atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
- if (enable_dif &&
+ if ((enable_dif || zfcp_experimental_dix) &&
adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
mask |= SHOST_DIF_TYPE1_PROTECTION;
- if (enable_dif && data_div &&
+ if (zfcp_experimental_dix && data_div &&
adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
mask |= SHOST_DIX_TYPE1_PROTECTION;
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index c9c57b4a0b71..74c328321889 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -272,6 +272,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
{
struct virtio_ccw_vq_info *info;
+ if (!vcdev->airq_info)
+ return;
list_for_each_entry(info, &vcdev->virtqueues, node)
drop_airq_indicator(info->vq, vcdev->airq_info);
}
@@ -413,7 +415,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
- return vcdev->config_block->num;
+ return vcdev->config_block->num ?: -ENOENT;
}
static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
@@ -635,7 +637,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
unsigned long *indicatorp = NULL;
- int ret, i;
+ int ret, i, queue_idx = 0;
struct ccw1 *ccw;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
@@ -643,8 +645,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return -ENOMEM;
for (i = 0; i < nvqs; ++i) {
- vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
- ctx ? ctx[i] : false, ccw);
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
+ names[i], ctx ? ctx[i] : false,
+ ccw);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
vqs[i] = NULL;
@@ -769,6 +777,17 @@ out_free:
return rc;
}
+static void ccw_transport_features(struct virtio_device *vdev)
+{
+ /*
+ * Packed ring isn't enabled on virtio_ccw for now,
+ * because virtio_ccw uses some legacy accessors,
+ * e.g. virtqueue_get_avail() and virtqueue_get_used()
+ * which aren't available in packed ring currently.
+ */
+ __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
+}
+
static int virtio_ccw_finalize_features(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
@@ -795,6 +814,9 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
+ /* Give virtio_ccw a chance to accept features. */
+ ccw_transport_features(vdev);
+
features->index = 0;
features->features = cpu_to_le32((u32)vdev->features);
/* Write the first half of the feature bits to the host. */
@@ -953,6 +975,13 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
kfree(ccw);
}
+static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+
+ return dev_name(&vcdev->cdev->dev);
+}
+
static const struct virtio_config_ops virtio_ccw_config_ops = {
.get_features = virtio_ccw_get_features,
.finalize_features = virtio_ccw_finalize_features,
@@ -963,6 +992,7 @@ static const struct virtio_config_ops virtio_ccw_config_ops = {
.reset = virtio_ccw_reset,
.find_vqs = virtio_ccw_find_vqs,
.del_vqs = virtio_ccw_del_vqs,
+ .bus_name = virtio_ccw_bus_name,
};