aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c386
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c11
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c8
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_erp.c2
-rw-r--r--drivers/s390/block/dasd_fba.c202
-rw-r--r--drivers/s390/block/dasd_int.h38
-rw-r--r--drivers/s390/block/dasd_proc.c2
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/block/scm_blk.c13
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/Kconfig11
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c1
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_early.c6
-rw-r--r--drivers/s390/char/sclp_ocf.c2
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/vmcp.c112
-rw-r--r--drivers/s390/char/vmcp.h30
-rw-r--r--drivers/s390/cio/chp.c4
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c2
-rw-r--r--drivers/s390/crypto/ap_asm.h9
-rw-r--r--drivers/s390/crypto/ap_bus.c49
-rw-r--r--drivers/s390/crypto/ap_bus.h47
-rw-r--r--drivers/s390/crypto/ap_queue.c26
-rw-r--r--drivers/s390/crypto/zcrypt_card.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c2
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c28
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_core.h17
-rw-r--r--drivers/s390/net/qeth_core_main.c205
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c343
-rw-r--r--drivers/s390/net/qeth_l3_main.c71
-rw-r--r--drivers/s390/net/qeth_l3_sys.c25
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c95
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h25
-rw-r--r--drivers/s390/scsi/zfcp_erp.c5
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fc.c52
-rw-r--r--drivers/s390/scsi/zfcp_fc.h25
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c35
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h12
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h17
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c18
53 files changed, 1200 insertions, 774 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 670ac0a4ef49..ea19b4ff87a2 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -62,7 +62,6 @@ MODULE_LICENSE("GPL");
static int dasd_alloc_queue(struct dasd_block *);
static void dasd_setup_queue(struct dasd_block *);
static void dasd_free_queue(struct dasd_block *);
-static void dasd_flush_request_queue(struct dasd_block *);
static int dasd_flush_block_queue(struct dasd_block *);
static void dasd_device_tasklet(struct dasd_device *);
static void dasd_block_tasklet(struct dasd_block *);
@@ -158,7 +157,6 @@ struct dasd_block *dasd_alloc_block(void)
/* open_count = 0 means device online but not in use */
atomic_set(&block->open_count, -1);
- spin_lock_init(&block->request_queue_lock);
atomic_set(&block->tasklet_scheduled, 0);
tasklet_init(&block->tasklet,
(void (*)(unsigned long)) dasd_block_tasklet,
@@ -391,7 +389,6 @@ static int dasd_state_ready_to_basic(struct dasd_device *device)
device->state = DASD_STATE_READY;
return rc;
}
- dasd_flush_request_queue(block);
dasd_destroy_partitions(block);
block->blocks = 0;
block->bp_block = 0;
@@ -801,11 +798,12 @@ static void dasd_profile_end(struct dasd_block *block,
struct dasd_ccw_req *cqr,
struct request *req)
{
- long strtime, irqtime, endtime, tottime; /* in microseconds */
- long tottimeps, sectors;
+ unsigned long strtime, irqtime, endtime, tottime;
+ unsigned long tottimeps, sectors;
struct dasd_device *device;
int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
int irqtime_ind, irqtimeps_ind, endtime_ind;
+ struct dasd_profile_info *data;
device = cqr->startdev;
if (!(dasd_global_profile_level ||
@@ -835,6 +833,11 @@ static void dasd_profile_end(struct dasd_block *block,
spin_lock(&dasd_global_profile.lock);
if (dasd_global_profile.data) {
+ data = dasd_global_profile.data;
+ data->dasd_sum_times += tottime;
+ data->dasd_sum_time_str += strtime;
+ data->dasd_sum_time_irq += irqtime;
+ data->dasd_sum_time_end += endtime;
dasd_profile_end_add_data(dasd_global_profile.data,
cqr->startdev != block->base,
cqr->cpmode == 1,
@@ -847,7 +850,12 @@ static void dasd_profile_end(struct dasd_block *block,
spin_unlock(&dasd_global_profile.lock);
spin_lock(&block->profile.lock);
- if (block->profile.data)
+ if (block->profile.data) {
+ data = block->profile.data;
+ data->dasd_sum_times += tottime;
+ data->dasd_sum_time_str += strtime;
+ data->dasd_sum_time_irq += irqtime;
+ data->dasd_sum_time_end += endtime;
dasd_profile_end_add_data(block->profile.data,
cqr->startdev != block->base,
cqr->cpmode == 1,
@@ -856,10 +864,16 @@ static void dasd_profile_end(struct dasd_block *block,
tottimeps_ind, strtime_ind,
irqtime_ind, irqtimeps_ind,
endtime_ind);
+ }
spin_unlock(&block->profile.lock);
spin_lock(&device->profile.lock);
- if (device->profile.data)
+ if (device->profile.data) {
+ data = device->profile.data;
+ data->dasd_sum_times += tottime;
+ data->dasd_sum_time_str += strtime;
+ data->dasd_sum_time_irq += irqtime;
+ data->dasd_sum_time_end += endtime;
dasd_profile_end_add_data(device->profile.data,
cqr->startdev != block->base,
cqr->cpmode == 1,
@@ -868,6 +882,7 @@ static void dasd_profile_end(struct dasd_block *block,
tottimeps_ind, strtime_ind,
irqtime_ind, irqtimeps_ind,
endtime_ind);
+ }
spin_unlock(&device->profile.lock);
}
@@ -989,6 +1004,14 @@ static void dasd_stats_seq_print(struct seq_file *m,
seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
+ seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
+ data->dasd_sum_times / data->dasd_io_reqs : 0UL);
+ seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
+ data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
+ seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
+ data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
+ seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
+ data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
seq_puts(m, "histogram_sectors ");
dasd_stats_array(m, data->dasd_io_secs);
seq_puts(m, "histogram_io_times ");
@@ -1619,8 +1642,10 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
dasd_schedule_device_bh(device);
- if (device->block)
+ if (device->block) {
dasd_schedule_block_bh(device->block);
+ blk_mq_run_hw_queues(device->block->request_queue, true);
+ }
}
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
@@ -1639,7 +1664,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
{
struct dasd_ccw_req *cqr, *next;
struct dasd_device *device;
- unsigned long long now;
+ unsigned long now;
int nrf_suppressed = 0;
int fp_suppressed = 0;
u8 *sense = NULL;
@@ -2612,6 +2637,7 @@ static void dasd_block_timeout(unsigned long ptr)
dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
dasd_schedule_block_bh(block);
+ blk_mq_run_hw_queues(block->request_queue, true);
}
/*
@@ -2651,115 +2677,11 @@ static void __dasd_process_erp(struct dasd_device *device,
erp_fn(cqr);
}
-/*
- * Fetch requests from the block device queue.
- */
-static void __dasd_process_request_queue(struct dasd_block *block)
-{
- struct request_queue *queue;
- struct request *req;
- struct dasd_ccw_req *cqr;
- struct dasd_device *basedev;
- unsigned long flags;
- queue = block->request_queue;
- basedev = block->base;
- /* No queue ? Then there is nothing to do. */
- if (queue == NULL)
- return;
-
- /*
- * We requeue request from the block device queue to the ccw
- * queue only in two states. In state DASD_STATE_READY the
- * partition detection is done and we need to requeue requests
- * for that. State DASD_STATE_ONLINE is normal block device
- * operation.
- */
- if (basedev->state < DASD_STATE_READY) {
- while ((req = blk_fetch_request(block->request_queue)))
- __blk_end_request_all(req, BLK_STS_IOERR);
- return;
- }
-
- /*
- * if device is stopped do not fetch new requests
- * except failfast is active which will let requests fail
- * immediately in __dasd_block_start_head()
- */
- if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST))
- return;
-
- /* Now we try to fetch requests from the request queue */
- while ((req = blk_peek_request(queue))) {
- if (basedev->features & DASD_FEATURE_READONLY &&
- rq_data_dir(req) == WRITE) {
- DBF_DEV_EVENT(DBF_ERR, basedev,
- "Rejecting write request %p",
- req);
- blk_start_request(req);
- __blk_end_request_all(req, BLK_STS_IOERR);
- continue;
- }
- if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
- (basedev->features & DASD_FEATURE_FAILFAST ||
- blk_noretry_request(req))) {
- DBF_DEV_EVENT(DBF_ERR, basedev,
- "Rejecting failfast request %p",
- req);
- blk_start_request(req);
- __blk_end_request_all(req, BLK_STS_TIMEOUT);
- continue;
- }
- cqr = basedev->discipline->build_cp(basedev, block, req);
- if (IS_ERR(cqr)) {
- if (PTR_ERR(cqr) == -EBUSY)
- break; /* normal end condition */
- if (PTR_ERR(cqr) == -ENOMEM)
- break; /* terminate request queue loop */
- if (PTR_ERR(cqr) == -EAGAIN) {
- /*
- * The current request cannot be build right
- * now, we have to try later. If this request
- * is the head-of-queue we stop the device
- * for 1/2 second.
- */
- if (!list_empty(&block->ccw_queue))
- break;
- spin_lock_irqsave(
- get_ccwdev_lock(basedev->cdev), flags);
- dasd_device_set_stop_bits(basedev,
- DASD_STOPPED_PENDING);
- spin_unlock_irqrestore(
- get_ccwdev_lock(basedev->cdev), flags);
- dasd_block_set_timer(block, HZ/2);
- break;
- }
- DBF_DEV_EVENT(DBF_ERR, basedev,
- "CCW creation failed (rc=%ld) "
- "on request %p",
- PTR_ERR(cqr), req);
- blk_start_request(req);
- __blk_end_request_all(req, BLK_STS_IOERR);
- continue;
- }
- /*
- * Note: callback is set to dasd_return_cqr_cb in
- * __dasd_block_start_head to cover erp requests as well
- */
- cqr->callback_data = (void *) req;
- cqr->status = DASD_CQR_FILLED;
- req->completion_data = cqr;
- blk_start_request(req);
- list_add_tail(&cqr->blocklist, &block->ccw_queue);
- INIT_LIST_HEAD(&cqr->devlist);
- dasd_profile_start(block, cqr, req);
- }
-}
-
static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
{
struct request *req;
- int status;
blk_status_t error = BLK_STS_OK;
+ int status;
req = (struct request *) cqr->callback_data;
dasd_profile_end(cqr->block, cqr, req);
@@ -2783,7 +2705,19 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
break;
}
}
- __blk_end_request_all(req, error);
+
+ /*
+ * We need to take care for ETIMEDOUT errors here since the
+ * complete callback does not get called in this case.
+ * Take care of all errors here and avoid additional code to
+ * transfer the error value to the complete callback.
+ */
+ if (error) {
+ blk_mq_end_request(req, error);
+ blk_mq_run_hw_queues(req->q, true);
+ } else {
+ blk_mq_complete_request(req);
+ }
}
/*
@@ -2912,27 +2846,30 @@ static void dasd_block_tasklet(struct dasd_block *block)
struct list_head final_queue;
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
+ struct dasd_queue *dq;
atomic_set(&block->tasklet_scheduled, 0);
INIT_LIST_HEAD(&final_queue);
- spin_lock(&block->queue_lock);
+ spin_lock_irq(&block->queue_lock);
/* Finish off requests on ccw queue */
__dasd_process_block_ccw_queue(block, &final_queue);
- spin_unlock(&block->queue_lock);
+ spin_unlock_irq(&block->queue_lock);
+
/* Now call the callback function of requests with final status */
- spin_lock_irq(&block->request_queue_lock);
list_for_each_safe(l, n, &final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, blocklist);
+ dq = cqr->dq;
+ spin_lock_irq(&dq->lock);
list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr);
+ spin_unlock_irq(&dq->lock);
}
- spin_lock(&block->queue_lock);
- /* Get new request from the block device request queue */
- __dasd_process_request_queue(block);
+
+ spin_lock_irq(&block->queue_lock);
/* Now check if the head of the ccw queue needs to be started. */
__dasd_block_start_head(block);
- spin_unlock(&block->queue_lock);
- spin_unlock_irq(&block->request_queue_lock);
+ spin_unlock_irq(&block->queue_lock);
+
if (waitqueue_active(&shutdown_waitq))
wake_up(&shutdown_waitq);
dasd_put_device(block->base);
@@ -2951,14 +2888,13 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
{
struct dasd_block *block = cqr->block;
struct request *req;
- unsigned long flags;
if (!block)
return -EINVAL;
- spin_lock_irqsave(&block->request_queue_lock, flags);
+ spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data;
- blk_requeue_request(block->request_queue, req);
- spin_unlock_irqrestore(&block->request_queue_lock, flags);
+ blk_mq_requeue_request(req, false);
+ spin_unlock_irq(&cqr->dq->lock);
return 0;
}
@@ -2973,6 +2909,7 @@ static int dasd_flush_block_queue(struct dasd_block *block)
struct dasd_ccw_req *cqr, *n;
int rc, i;
struct list_head flush_queue;
+ unsigned long flags;
INIT_LIST_HEAD(&flush_queue);
spin_lock_bh(&block->queue_lock);
@@ -3011,11 +2948,11 @@ restart_cb:
goto restart_cb;
}
/* call the callback function */
- spin_lock_irq(&block->request_queue_lock);
+ spin_lock_irqsave(&cqr->dq->lock, flags);
cqr->endclk = get_tod_clock();
list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr);
- spin_unlock_irq(&block->request_queue_lock);
+ spin_unlock_irqrestore(&cqr->dq->lock, flags);
}
return rc;
}
@@ -3043,42 +2980,114 @@ EXPORT_SYMBOL(dasd_schedule_block_bh);
/*
* Dasd request queue function. Called from ll_rw_blk.c
*/
-static void do_dasd_request(struct request_queue *queue)
+static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *qd)
{
- struct dasd_block *block;
+ struct dasd_block *block = hctx->queue->queuedata;
+ struct dasd_queue *dq = hctx->driver_data;
+ struct request *req = qd->rq;
+ struct dasd_device *basedev;
+ struct dasd_ccw_req *cqr;
+ blk_status_t rc = BLK_STS_OK;
- block = queue->queuedata;
+ basedev = block->base;
+ spin_lock_irq(&dq->lock);
+ if (basedev->state < DASD_STATE_READY) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "device not ready for request %p", req);
+ rc = BLK_STS_IOERR;
+ goto out;
+ }
+
+ /*
+ * if device is stopped do not fetch new requests
+ * except failfast is active which will let requests fail
+ * immediately in __dasd_block_start_head()
+ */
+ if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "device stopped request %p", req);
+ rc = BLK_STS_RESOURCE;
+ goto out;
+ }
+
+ if (basedev->features & DASD_FEATURE_READONLY &&
+ rq_data_dir(req) == WRITE) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "Rejecting write request %p", req);
+ rc = BLK_STS_IOERR;
+ goto out;
+ }
+
+ if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
+ (basedev->features & DASD_FEATURE_FAILFAST ||
+ blk_noretry_request(req))) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "Rejecting failfast request %p", req);
+ rc = BLK_STS_IOERR;
+ goto out;
+ }
+
+ cqr = basedev->discipline->build_cp(basedev, block, req);
+ if (IS_ERR(cqr)) {
+ if (PTR_ERR(cqr) == -EBUSY ||
+ PTR_ERR(cqr) == -ENOMEM ||
+ PTR_ERR(cqr) == -EAGAIN) {
+ rc = BLK_STS_RESOURCE;
+ goto out;
+ }
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "CCW creation failed (rc=%ld) on request %p",
+ PTR_ERR(cqr), req);
+ rc = BLK_STS_IOERR;
+ goto out;
+ }
+ /*
+ * Note: callback is set to dasd_return_cqr_cb in
+ * __dasd_block_start_head to cover erp requests as well
+ */
+ cqr->callback_data = req;
+ cqr->status = DASD_CQR_FILLED;
+ cqr->dq = dq;
+ req->completion_data = cqr;
+ blk_mq_start_request(req);
spin_lock(&block->queue_lock);
- /* Get new request from the block device request queue */
- __dasd_process_request_queue(block);
- /* Now check if the head of the ccw queue needs to be started. */
- __dasd_block_start_head(block);
+ list_add_tail(&cqr->blocklist, &block->ccw_queue);
+ INIT_LIST_HEAD(&cqr->devlist);
+ dasd_profile_start(block, cqr, req);
+ dasd_schedule_block_bh(block);
spin_unlock(&block->queue_lock);
+
+out:
+ spin_unlock_irq(&dq->lock);
+ return rc;
}
/*
* Block timeout callback, called from the block layer
*
- * request_queue lock is held on entry.
- *
* Return values:
* BLK_EH_RESET_TIMER if the request should be left running
* BLK_EH_NOT_HANDLED if the request is handled or terminated
* by the driver.
*/
-enum blk_eh_timer_return dasd_times_out(struct request *req)
+enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
{
struct dasd_ccw_req *cqr = req->completion_data;
struct dasd_block *block = req->q->queuedata;
struct dasd_device *device;
+ unsigned long flags;
int rc = 0;
if (!cqr)
return BLK_EH_NOT_HANDLED;
+ spin_lock_irqsave(&cqr->dq->lock, flags);
device = cqr->startdev ? cqr->startdev : block->base;
- if (!device->blk_timeout)
+ if (!device->blk_timeout) {
+ spin_unlock_irqrestore(&cqr->dq->lock, flags);
return BLK_EH_RESET_TIMER;
+ }
DBF_DEV_EVENT(DBF_WARNING, device,
" dasd_times_out cqr %p status %x",
cqr, cqr->status);
@@ -3128,19 +3137,64 @@ enum blk_eh_timer_return dasd_times_out(struct request *req)
}
dasd_schedule_block_bh(block);
spin_unlock(&block->queue_lock);
+ spin_unlock_irqrestore(&cqr->dq->lock, flags);
return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
}
+static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int idx)
+{
+ struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
+
+ if (!dq)
+ return -ENOMEM;
+
+ spin_lock_init(&dq->lock);
+ hctx->driver_data = dq;
+
+ return 0;
+}
+
+static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
+{
+ kfree(hctx->driver_data);
+ hctx->driver_data = NULL;
+}
+
+static void dasd_request_done(struct request *req)
+{
+ blk_mq_end_request(req, 0);
+ blk_mq_run_hw_queues(req->q, true);
+}
+
+static struct blk_mq_ops dasd_mq_ops = {
+ .queue_rq = do_dasd_request,
+ .complete = dasd_request_done,
+ .timeout = dasd_times_out,
+ .init_hctx = dasd_init_hctx,
+ .exit_hctx = dasd_exit_hctx,
+};
+
/*
* Allocate and initialize request queue and default I/O scheduler.
*/
static int dasd_alloc_queue(struct dasd_block *block)
{
- block->request_queue = blk_init_queue(do_dasd_request,
- &block->request_queue_lock);
- if (block->request_queue == NULL)
- return -ENOMEM;
+ int rc;
+
+ block->tag_set.ops = &dasd_mq_ops;
+ block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
+ block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
+ block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+
+ rc = blk_mq_alloc_tag_set(&block->tag_set);
+ if (rc)
+ return rc;
+
+ block->request_queue = blk_mq_init_queue(&block->tag_set);
+ if (IS_ERR(block->request_queue))
+ return PTR_ERR(block->request_queue);
block->request_queue->queuedata = block;
@@ -3152,7 +3206,9 @@ static int dasd_alloc_queue(struct dasd_block *block)
*/
static void dasd_setup_queue(struct dasd_block *block)
{
+ unsigned int logical_block_size = block->bp_block;
struct request_queue *q = block->request_queue;
+ unsigned int max_bytes, max_discard_sectors;
int max;
if (block->base->features & DASD_FEATURE_USERAW) {
@@ -3169,7 +3225,7 @@ static void dasd_setup_queue(struct dasd_block *block)
}
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
q->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(q, block->bp_block);
+ blk_queue_logical_block_size(q, logical_block_size);
blk_queue_max_hw_sectors(q, max);
blk_queue_max_segments(q, USHRT_MAX);
/* with page sized segments we can translate each segement into
@@ -3177,6 +3233,21 @@ static void dasd_setup_queue(struct dasd_block *block)
*/
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+
+ /* Only activate blocklayer discard support for devices that support it */
+ if (block->base->features & DASD_FEATURE_DISCARD) {
+ q->limits.discard_granularity = logical_block_size;
+ q->limits.discard_alignment = PAGE_SIZE;
+
+ /* Calculate max_discard_sectors and make it PAGE aligned */
+ max_bytes = USHRT_MAX * logical_block_size;
+ max_bytes = ALIGN(max_bytes, PAGE_SIZE) - PAGE_SIZE;
+ max_discard_sectors = max_bytes / logical_block_size;
+
+ blk_queue_max_discard_sectors(q, max_discard_sectors);
+ blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ }
}
/*
@@ -3186,26 +3257,11 @@ static void dasd_free_queue(struct dasd_block *block)
{
if (block->request_queue) {
blk_cleanup_queue(block->request_queue);
+ blk_mq_free_tag_set(&block->tag_set);
block->request_queue = NULL;
}
}
-/*
- * Flush request on the request queue.
- */
-static void dasd_flush_request_queue(struct dasd_block *block)
-{
- struct request *req;
-
- if (!block->request_queue)
- return;
-
- spin_lock_irq(&block->request_queue_lock);
- while ((req = blk_fetch_request(block->request_queue)))
- __blk_end_request_all(req, BLK_STS_IOERR);
- spin_unlock_irq(&block->request_queue_lock);
-}
-
static int dasd_open(struct block_device *bdev, fmode_t mode)
{
struct dasd_device *base;
@@ -3701,8 +3757,10 @@ int dasd_generic_path_operational(struct dasd_device *device)
return 1;
}
dasd_schedule_device_bh(device);
- if (device->block)
+ if (device->block) {
dasd_schedule_block_bh(device->block);
+ blk_mq_run_hw_queues(device->block->request_queue, true);
+ }
if (!device->stopped)
wake_up(&generic_waitq);
@@ -3965,8 +4023,10 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
*/
device->stopped |= DASD_UNRESUMED_PM;
- if (device->block)
+ if (device->block) {
dasd_schedule_block_bh(device->block);
+ blk_mq_run_hw_queues(device->block->request_queue, true);
+ }
clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
dasd_put_device(device);
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 107cd3361e29..e448a0fc0c09 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2231,7 +2231,7 @@ static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp)
struct dasd_device *device = erp->startdev;
__u8 lpum = erp->refers->irb.esw.esw1.lpum;
int pos = pathmask_to_pos(lpum);
- unsigned long long clk;
+ unsigned long clk;
if (!device->path_thrhld)
return;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 779dce069cc5..c95a4784c191 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1326,7 +1326,7 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
{
struct dasd_device *device;
struct request_queue *q;
- unsigned long val, flags;
+ unsigned long val;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device) || !device->block)
@@ -1342,16 +1342,10 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
dasd_put_device(device);
return -ENODEV;
}
- spin_lock_irqsave(&device->block->request_queue_lock, flags);
- if (!val)
- blk_queue_rq_timed_out(q, NULL);
- else
- blk_queue_rq_timed_out(q, dasd_times_out);
device->blk_timeout = val;
blk_queue_rq_timeout(q, device->blk_timeout * HZ);
- spin_unlock_irqrestore(&device->block->request_queue_lock, flags);
dasd_put_device(device);
return count;
@@ -1634,7 +1628,7 @@ static struct attribute * dasd_attrs[] = {
NULL,
};
-static struct attribute_group dasd_attr_group = {
+static const struct attribute_group dasd_attr_group = {
.attrs = dasd_attrs,
};
@@ -1676,6 +1670,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
spin_unlock(&dasd_devmap_lock);
return 0;
}
+EXPORT_SYMBOL(dasd_set_feature);
int
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 5667146c6a0a..98fb28e49d2c 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -235,7 +235,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
{
struct dasd_ccw_req *cqr, *next;
struct dasd_device *device;
- unsigned long long expires;
+ unsigned long expires;
unsigned long flags;
addr_t ip;
int rc;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c3e5ad641b0b..8eafcd5fa004 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3254,11 +3254,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
/* 1x prefix + one read/write ccw per track */
cplength = 1 + trkcount;
- /* on 31-bit we need space for two 32 bit addresses per page
- * on 64-bit one 64 bit address
- */
- datasize = sizeof(struct PFX_eckd_data) +
- cidaw * sizeof(unsigned long long);
+ datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
@@ -3856,7 +3852,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
}
size = ALIGN(size, 8);
- datasize = size + cidaw * sizeof(unsigned long long);
+ datasize = size + cidaw * sizeof(unsigned long);
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index fb1f537d986a..34e153a6b19c 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -165,7 +165,7 @@ struct DE_eckd_data {
__u8 ga_extended; /* Global Attributes Extended */
struct ch_t beg_ext;
struct ch_t end_ext;
- unsigned long long ep_sys_time; /* Ext Parameter - System Time Stamp */
+ unsigned long ep_sys_time; /* Ext Parameter - System Time Stamp */
__u8 ep_format; /* Extended Parameter format byte */
__u8 ep_prio; /* Extended Parameter priority I/O byte */
__u8 ep_reserved1; /* Extended Parameter Reserved */
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 9e3419124264..6389feb2fb7a 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -124,7 +124,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
{
int success;
- unsigned long long startclk, stopclk;
+ unsigned long startclk, stopclk;
struct dasd_device *startdev;
BUG_ON(cqr->refers == NULL || cqr->function == NULL);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 462cab5d4302..6168ccdb389c 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -174,6 +174,9 @@ dasd_fba_check_characteristics(struct dasd_device *device)
if (readonly)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+ /* FBA supports discard, set the according feature bit */
+ dasd_set_feature(cdev, DASD_FEATURE_DISCARD, 1);
+
dev_info(&device->cdev->dev,
"New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
"and %d B/blk%s\n",
@@ -247,9 +250,192 @@ static void dasd_fba_check_for_device_change(struct dasd_device *device,
dasd_generic_handle_state_change(device);
};
-static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
- struct dasd_block *block,
- struct request *req)
+
+/*
+ * Builds a CCW with no data payload
+ */
+static void ccw_write_no_data(struct ccw1 *ccw)
+{
+ ccw->cmd_code = DASD_FBA_CCW_WRITE;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = 0;
+}
+
+/*
+ * Builds a CCW that writes only zeroes.
+ */
+static void ccw_write_zero(struct ccw1 *ccw, int count)
+{
+ ccw->cmd_code = DASD_FBA_CCW_WRITE;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = count;
+ ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0));
+}
+
+/*
+ * Helper function to count the amount of necessary CCWs within a given range
+ * with 4k alignment and command chaining in mind.
+ */
+static int count_ccws(sector_t first_rec, sector_t last_rec,
+ unsigned int blocks_per_page)
+{
+ sector_t wz_stop = 0, d_stop = 0;
+ int cur_pos = 0;
+ int count = 0;
+
+ if (first_rec % blocks_per_page != 0) {
+ wz_stop = first_rec + blocks_per_page -
+ (first_rec % blocks_per_page) - 1;
+ if (wz_stop > last_rec)
+ wz_stop = last_rec;
+ cur_pos = wz_stop - first_rec + 1;
+ count++;
+ }
+
+ if (last_rec - (first_rec + cur_pos) + 1 >= blocks_per_page) {
+ if ((last_rec - blocks_per_page + 1) % blocks_per_page != 0)
+ d_stop = last_rec - ((last_rec - blocks_per_page + 1) %
+ blocks_per_page);
+ else
+ d_stop = last_rec;
+
+ cur_pos += d_stop - (first_rec + cur_pos) + 1;
+ count++;
+ }
+
+ if (cur_pos == 0 || first_rec + cur_pos - 1 < last_rec)
+ count++;
+
+ return count;
+}
+
+/*
+ * This function builds a CCW request for block layer discard requests.
+ * Each page in the z/VM hypervisor that represents certain records of an FBA
+ * device will be padded with zeros. This is a special behaviour of the WRITE
+ * command which is triggered when no data payload is added to the CCW.
+ *
+ * Note: Due to issues in some z/VM versions, we can't fully utilise this
+ * special behaviour. We have to keep a 4k (or 8 block) alignment in mind to
+ * work around those issues and write actual zeroes to the unaligned parts in
+ * the request. This workaround might be removed in the future.
+ */
+static struct dasd_ccw_req *dasd_fba_build_cp_discard(
+ struct dasd_device *memdev,
+ struct dasd_block *block,
+ struct request *req)
+{
+ struct LO_fba_data *LO_data;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+
+ sector_t wz_stop = 0, d_stop = 0;
+ sector_t first_rec, last_rec;
+
+ unsigned int blksize = block->bp_block;
+ unsigned int blocks_per_page;
+ int wz_count = 0;
+ int d_count = 0;
+ int cur_pos = 0; /* Current position within the extent */
+ int count = 0;
+ int cplength;
+ int datasize;
+ int nr_ccws;
+
+ first_rec = blk_rq_pos(req) >> block->s2b_shift;
+ last_rec =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+ count = last_rec - first_rec + 1;
+
+ blocks_per_page = BLOCKS_PER_PAGE(blksize);
+ nr_ccws = count_ccws(first_rec, last_rec, blocks_per_page);
+
+ /* define extent + nr_ccws * locate record + nr_ccws * single CCW */
+ cplength = 1 + 2 * nr_ccws;
+ datasize = sizeof(struct DE_fba_data) +
+ nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
+
+ cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+ if (IS_ERR(cqr))
+ return cqr;
+
+ ccw = cqr->cpaddr;
+
+ define_extent(ccw++, cqr->data, WRITE, blksize, first_rec, count);
+ LO_data = cqr->data + sizeof(struct DE_fba_data);
+
+ /* First part is not aligned. Calculate range to write zeroes. */
+ if (first_rec % blocks_per_page != 0) {
+ wz_stop = first_rec + blocks_per_page -
+ (first_rec % blocks_per_page) - 1;
+ if (wz_stop > last_rec)
+ wz_stop = last_rec;
+ wz_count = wz_stop - first_rec + 1;
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count);
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw_write_zero(ccw++, wz_count * blksize);
+
+ cur_pos = wz_count;
+ }
+
+ /* We can do proper discard when we've got at least blocks_per_page blocks. */
+ if (last_rec - (first_rec + cur_pos) + 1 >= blocks_per_page) {
+ /* is last record at page boundary? */
+ if ((last_rec - blocks_per_page + 1) % blocks_per_page != 0)
+ d_stop = last_rec - ((last_rec - blocks_per_page + 1) %
+ blocks_per_page);
+ else
+ d_stop = last_rec;
+
+ d_count = d_stop - (first_rec + cur_pos) + 1;
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, WRITE, cur_pos, d_count);
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw_write_no_data(ccw++);
+
+ cur_pos += d_count;
+ }
+
+ /* We might still have some bits left which need to be zeroed. */
+ if (cur_pos == 0 || first_rec + cur_pos - 1 < last_rec) {
+ if (d_stop != 0)
+ wz_count = last_rec - d_stop;
+ else if (wz_stop != 0)
+ wz_count = last_rec - wz_stop;
+ else
+ wz_count = count;
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count);
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw_write_zero(ccw++, wz_count * blksize);
+ }
+
+ if (blk_noretry_request(req) ||
+ block->base->features & DASD_FEATURE_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+
+ cqr->startdev = memdev;
+ cqr->memdev = memdev;
+ cqr->block = block;
+ cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
+ cqr->retries = memdev->default_retries;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ return cqr;
+}
+
+static struct dasd_ccw_req *dasd_fba_build_cp_regular(
+ struct dasd_device *memdev,
+ struct dasd_block *block,
+ struct request *req)
{
struct dasd_fba_private *private = block->base->private;
unsigned long *idaws;
@@ -372,6 +558,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
return cqr;
}
+static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device *memdev,
+ struct dasd_block *block,
+ struct request *req)
+{
+ if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_ZEROES)
+ return dasd_fba_build_cp_discard(memdev, block, req);
+ else
+ return dasd_fba_build_cp_regular(memdev, block, req);
+}
+
static int
dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index dca7cb1e6f65..db470bd10175 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -56,6 +56,7 @@
#include <asm/dasd.h>
#include <asm/idals.h>
#include <linux/bitops.h>
+#include <linux/blk-mq.h>
/* DASD discipline magic */
#define DASD_ECKD_MAGIC 0xC5C3D2C4
@@ -167,6 +168,9 @@ do { \
printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
} while(0)
+/* Macro to calculate number of blocks per page */
+#define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize)
+
struct dasd_ccw_req {
unsigned int magic; /* Eye catcher */
struct list_head devlist; /* for dasd_device request queue */
@@ -182,6 +186,7 @@ struct dasd_ccw_req {
char status; /* status of this request */
short retries; /* A retry counter */
unsigned long flags; /* flags of this request */
+ struct dasd_queue *dq;
/* ... and how */
unsigned long starttime; /* jiffies time of request start */
@@ -196,10 +201,10 @@ struct dasd_ccw_req {
void *function; /* originating ERP action */
/* these are for statistics only */
- unsigned long long buildclk; /* TOD-clock of request generation */
- unsigned long long startclk; /* TOD-clock of request start */
- unsigned long long stopclk; /* TOD-clock of request interrupt */
- unsigned long long endclk; /* TOD-clock of request termination */
+ unsigned long buildclk; /* TOD-clock of request generation */
+ unsigned long startclk; /* TOD-clock of request start */
+ unsigned long stopclk; /* TOD-clock of request interrupt */
+ unsigned long endclk; /* TOD-clock of request termination */
/* Callback that is called after reaching final status. */
void (*callback)(struct dasd_ccw_req *, void *data);
@@ -245,6 +250,16 @@ struct dasd_ccw_req {
#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
#define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */
+/*
+ * There is no reliable way to determine the number of available CPUs on
+ * LPAR but there is no big performance difference between 1 and the
+ * maximum CPU number.
+ * 64 is a good trade off performance wise.
+ */
+#define DASD_NR_HW_QUEUES 64
+#define DASD_MAX_LCU_DEV 256
+#define DASD_REQ_PER_DEV 4
+
/* Signature for error recovery functions. */
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
@@ -423,7 +438,7 @@ struct dasd_path {
u8 chpid;
struct dasd_conf_data *conf_data;
atomic_t error_count;
- unsigned long long errorclk;
+ unsigned long errorclk;
};
@@ -454,6 +469,10 @@ struct dasd_profile_info {
unsigned int dasd_read_time2[32]; /* hist. of time from start to irq */
unsigned int dasd_read_time3[32]; /* hist. of time from irq to end */
unsigned int dasd_read_nr_req[32]; /* hist. of # of requests in chanq */
+ unsigned long dasd_sum_times; /* sum of request times */
+ unsigned long dasd_sum_time_str; /* sum of time from build to start */
+ unsigned long dasd_sum_time_irq; /* sum of time from start to irq */
+ unsigned long dasd_sum_time_end; /* sum of time from irq to end */
};
struct dasd_profile {
@@ -532,10 +551,11 @@ struct dasd_block {
struct gendisk *gdp;
struct request_queue *request_queue;
spinlock_t request_queue_lock;
+ struct blk_mq_tag_set tag_set;
struct block_device *bdev;
atomic_t open_count;
- unsigned long long blocks; /* size of volume in blocks */
+ unsigned long blocks; /* size of volume in blocks */
unsigned int bp_block; /* bytes per block */
unsigned int s2b_shift; /* log2 (bp_block/512) */
@@ -556,6 +576,10 @@ struct dasd_attention_data {
__u8 lpum;
};
+struct dasd_queue {
+ spinlock_t lock;
+};
+
/* reasons why device (ccw_device_start) was stopped */
#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
#define DASD_STOPPED_QUIESCE 2 /* Quiesced */
@@ -724,7 +748,7 @@ void dasd_free_device(struct dasd_device *);
struct dasd_block *dasd_alloc_block(void);
void dasd_free_block(struct dasd_block *);
-enum blk_eh_timer_return dasd_times_out(struct request *req);
+enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved);
void dasd_enable_device(struct dasd_device *);
void dasd_set_target_state(struct dasd_device *, int);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 70dc2c4cd3f7..7104d6765773 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -90,7 +90,7 @@ dasd_devices_show(struct seq_file *m, void *v)
seq_printf(m, "n/f ");
else
seq_printf(m,
- "at blocksize: %d, %lld blocks, %lld MB",
+ "at blocksize: %u, %lu blocks, %lu MB",
block->bp_block, block->blocks,
((block->bp_block >> 9) *
block->blocks) >> 11);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 68bae4f6bd88..7abb240847c0 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -856,14 +856,14 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio);
bytes_done = 0;
- dev_info = bio->bi_bdev->bd_disk->private_data;
+ dev_info = bio->bi_disk->private_data;
if (dev_info == NULL)
goto fail;
if ((bio->bi_iter.bi_sector & 7) != 0 ||
(bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
- if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
+ if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) {
/* Request beyond end of DCSS segment. */
goto fail;
}
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 0071febac9e6..2e7fd966c515 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -249,13 +249,13 @@ static void scm_request_requeue(struct scm_request *scmrq)
static void scm_request_finish(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
+ int *error;
int i;
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
- if (scmrq->error)
- blk_mq_end_request(scmrq->request[i], scmrq->error);
- else
- blk_mq_complete_request(scmrq->request[i]);
+ error = blk_mq_rq_to_pdu(scmrq->request[i]);
+ *error = scmrq->error;
+ blk_mq_complete_request(scmrq->request[i]);
}
atomic_dec(&bdev->queued_reqs);
@@ -415,7 +415,9 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
static void scm_blk_request_done(struct request *req)
{
- blk_mq_end_request(req, 0);
+ int *error = blk_mq_rq_to_pdu(req);
+
+ blk_mq_end_request(req, *error);
}
static const struct block_device_operations scm_blk_devops = {
@@ -448,6 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
atomic_set(&bdev->queued_reqs, 0);
bdev->tag_set.ops = &scm_mq_ops;
+ bdev->tag_set.cmd_size = sizeof(int);
bdev->tag_set.nr_hw_queues = nr_requests;
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index a48f0d40c1d2..571a0709e1e5 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -183,7 +183,7 @@ static unsigned long xpram_highest_page_index(void)
*/
static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
{
- xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
+ xpram_device_t *xdev = bio->bi_disk->private_data;
struct bio_vec bvec;
struct bvec_iter iter;
unsigned int index;
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index b3f1c458905f..97c4c9fdd53d 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -169,10 +169,21 @@ config VMCP
def_bool y
prompt "Support for the z/VM CP interface"
depends on S390
+ select CMA
help
Select this option if you want to be able to interact with the control
program on z/VM
+config VMCP_CMA_SIZE
+ int "Memory in MiB reserved for z/VM CP interface"
+ default "4"
+ depends on VMCP
+ help
+ Specify the default amount of memory in MiB reserved for the z/VM CP
+ interface. If needed this memory is used for large contiguous memory
+ allocations. The default can be changed with the kernel command line
+ parameter "vmcp_cma".
+
config MONREADER
def_tristate m
prompt "API for reading z/VM monitor service records"
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 710f2292911d..5d4f053d7c38 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1082,7 +1082,7 @@ static struct attribute * raw3270_attrs[] = {
NULL,
};
-static struct attribute_group raw3270_attr_group = {
+static const struct attribute_group raw3270_attr_group = {
.attrs = raw3270_attrs,
};
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index b9c5522b8a68..dff8b94871f0 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -252,6 +252,7 @@ static int sclp_attach_storage(u8 id)
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
+ sccb->header.function_code = 0x40;
rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
SCLP_QUEUE_INTERVAL);
if (rc)
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 1406fb688a26..7003d52c2191 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -135,7 +135,7 @@ static ssize_t sysfs_ofb_data_write(struct file *filp, struct kobject *kobj,
return rc ?: count;
}
-static struct bin_attribute ofb_bin_attr = {
+static const struct bin_attribute ofb_bin_attr = {
.attr = {
.name = "event_data",
.mode = S_IWUSR,
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index efd84d1d178b..bc1fc00910b0 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -39,7 +39,7 @@ struct read_info_sccb {
u8 fac84; /* 84 */
u8 fac85; /* 85 */
u8 _pad_86[91 - 86]; /* 86-90 */
- u8 flags; /* 91 */
+ u8 fac91; /* 91 */
u8 _pad_92[98 - 92]; /* 92-97 */
u8 fac98; /* 98 */
u8 hamaxpow; /* 99 */
@@ -103,6 +103,8 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp.has_kss = !!(sccb->fac98 & 0x01);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
+ if (sccb->fac91 & 0x40)
+ S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp.rzm <<= 20;
@@ -139,7 +141,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
/* Save IPL information */
sclp_ipl_info.is_valid = 1;
- if (sccb->flags & 0x2)
+ if (sccb->fac91 & 0x2)
sclp_ipl_info.has_dump = 1;
memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
index f59b71776bbd..f9cbb1ab047b 100644
--- a/drivers/s390/char/sclp_ocf.c
+++ b/drivers/s390/char/sclp_ocf.c
@@ -126,7 +126,7 @@ static struct attribute *ocf_attrs[] = {
NULL,
};
-static struct attribute_group ocf_attr_group = {
+static const struct attribute_group ocf_attr_group = {
.attrs = ocf_attrs,
};
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 3c379da2eef8..9dd4534823b3 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -175,7 +175,7 @@ static struct attribute *tape_attrs[] = {
NULL
};
-static struct attribute_group tape_attr_group = {
+static const struct attribute_group tape_attr_group = {
.attrs = tape_attrs,
};
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 98749fa817da..7898bbcc28fc 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -17,15 +17,85 @@
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/cma.h>
+#include <linux/mm.h>
#include <asm/compat.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
-#include <linux/uaccess.h>
-#include "vmcp.h"
+#include <asm/vmcp.h>
+
+struct vmcp_session {
+ char *response;
+ unsigned int bufsize;
+ unsigned int cma_alloc : 1;
+ int resp_size;
+ int resp_code;
+ struct mutex mutex;
+};
static debug_info_t *vmcp_debug;
+static unsigned long vmcp_cma_size __initdata = CONFIG_VMCP_CMA_SIZE * 1024 * 1024;
+static struct cma *vmcp_cma;
+
+static int __init early_parse_vmcp_cma(char *p)
+{
+ vmcp_cma_size = ALIGN(memparse(p, NULL), PAGE_SIZE);
+ return 0;
+}
+early_param("vmcp_cma", early_parse_vmcp_cma);
+
+void __init vmcp_cma_reserve(void)
+{
+ if (!MACHINE_IS_VM)
+ return;
+ cma_declare_contiguous(0, vmcp_cma_size, 0, 0, 0, false, "vmcp", &vmcp_cma);
+}
+
+static void vmcp_response_alloc(struct vmcp_session *session)
+{
+ struct page *page = NULL;
+ int nr_pages, order;
+
+ order = get_order(session->bufsize);
+ nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
+ /*
+ * For anything below order 3 allocations rely on the buddy
+ * allocator. If such low-order allocations can't be handled
+ * anymore the system won't work anyway.
+ */
+ if (order > 2)
+ page = cma_alloc(vmcp_cma, nr_pages, 0, GFP_KERNEL);
+ if (page) {
+ session->response = (char *)page_to_phys(page);
+ session->cma_alloc = 1;
+ return;
+ }
+ session->response = (char *)__get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, order);
+}
+
+static void vmcp_response_free(struct vmcp_session *session)
+{
+ int nr_pages, order;
+ struct page *page;
+
+ if (!session->response)
+ return;
+ order = get_order(session->bufsize);
+ nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
+ if (session->cma_alloc) {
+ page = phys_to_page((unsigned long)session->response);
+ cma_release(vmcp_cma, page, nr_pages);
+ session->cma_alloc = 0;
+ } else {
+ free_pages((unsigned long)session->response, order);
+ }
+ session->response = NULL;
+}
+
static int vmcp_open(struct inode *inode, struct file *file)
{
struct vmcp_session *session;
@@ -51,7 +121,7 @@ static int vmcp_release(struct inode *inode, struct file *file)
session = file->private_data;
file->private_data = NULL;
- free_pages((unsigned long)session->response, get_order(session->bufsize));
+ vmcp_response_free(session);
kfree(session);
return 0;
}
@@ -97,9 +167,7 @@ vmcp_write(struct file *file, const char __user *buff, size_t count,
return -ERESTARTSYS;
}
if (!session->response)
- session->response = (char *)__get_free_pages(GFP_KERNEL
- | __GFP_RETRY_MAYFAIL | GFP_DMA,
- get_order(session->bufsize));
+ vmcp_response_alloc(session);
if (!session->response) {
mutex_unlock(&session->mutex);
kfree(cmd);
@@ -130,8 +198,8 @@ vmcp_write(struct file *file, const char __user *buff, size_t count,
static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct vmcp_session *session;
+ int ret = -ENOTTY;
int __user *argp;
- int temp;
session = file->private_data;
if (is_compat_task())
@@ -142,28 +210,26 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -ERESTARTSYS;
switch (cmd) {
case VMCP_GETCODE:
- temp = session->resp_code;
- mutex_unlock(&session->mutex);
- return put_user(temp, argp);
+ ret = put_user(session->resp_code, argp);
+ break;
case VMCP_SETBUF:
- free_pages((unsigned long)session->response,
- get_order(session->bufsize));
- session->response=NULL;
- temp = get_user(session->bufsize, argp);
- if (get_order(session->bufsize) > 8) {
+ vmcp_response_free(session);
+ ret = get_user(session->bufsize, argp);
+ if (ret)
session->bufsize = PAGE_SIZE;
- temp = -EINVAL;
+ if (!session->bufsize || get_order(session->bufsize) > 8) {
+ session->bufsize = PAGE_SIZE;
+ ret = -EINVAL;
}
- mutex_unlock(&session->mutex);
- return temp;
+ break;
case VMCP_GETSIZE:
- temp = session->resp_size;
- mutex_unlock(&session->mutex);
- return put_user(temp, argp);
+ ret = put_user(session->resp_size, argp);
+ break;
default:
- mutex_unlock(&session->mutex);
- return -ENOIOCTLCMD;
+ break;
}
+ mutex_unlock(&session->mutex);
+ return ret;
}
static const struct file_operations vmcp_fops = {
diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h
deleted file mode 100644
index 1e29b0418382..000000000000
--- a/drivers/s390/char/vmcp.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright IBM Corp. 2004, 2005
- * Interface implementation for communication with the z/VM control program
- * Version 1.0
- * Author(s): Christian Borntraeger <cborntra@de.ibm.com>
- *
- *
- * z/VMs CP offers the possibility to issue commands via the diagnose code 8
- * this driver implements a character device that issues these commands and
- * returns the answer of CP.
- *
- * The idea of this driver is based on cpint from Neale Ferguson
- */
-
-#include <linux/ioctl.h>
-#include <linux/mutex.h>
-
-#define VMCP_GETCODE _IOR(0x10, 1, int)
-#define VMCP_SETBUF _IOW(0x10, 2, int)
-#define VMCP_GETSIZE _IOR(0x10, 3, int)
-
-struct vmcp_session {
- unsigned int bufsize;
- char *response;
- int resp_size;
- int resp_code;
- /* As we use copy_from/to_user, which might *
- * sleep and cannot use a spinlock */
- struct mutex mutex;
-};
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 432fc40990bd..f4166f80c4d4 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -143,7 +143,7 @@ static ssize_t chp_measurement_chars_read(struct file *filp,
sizeof(chp->cmg_chars));
}
-static struct bin_attribute chp_measurement_chars_attr = {
+static const struct bin_attribute chp_measurement_chars_attr = {
.attr = {
.name = "measurement_chars",
.mode = S_IRUSR,
@@ -197,7 +197,7 @@ static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute chp_measurement_attr = {
+static const struct bin_attribute chp_measurement_attr = {
.attr = {
.name = "measurement",
.mode = S_IRUSR,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 7be01a58b44f..489b583f263d 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -612,7 +612,7 @@ static struct attribute *io_subchannel_attrs[] = {
NULL,
};
-static struct attribute_group io_subchannel_attr_group = {
+static const struct attribute_group io_subchannel_attr_group = {
.attrs = io_subchannel_attrs,
};
@@ -626,7 +626,7 @@ static struct attribute * ccwdev_attrs[] = {
NULL,
};
-static struct attribute_group ccwdev_attr_group = {
+static const struct attribute_group ccwdev_attr_group = {
.attrs = ccwdev_attrs,
};
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index ba6ac83a6c25..5ccfdc80d0ec 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -481,7 +481,7 @@ static int ccwchain_fetch_tic(struct ccwchain *chain,
ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1);
if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) {
- ccw->cda = (__u32) (addr_t) (iter->ch_ccw +
+ ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
(ccw->cda - ccw_head));
return 0;
}
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
index 287b4ad0999e..cd350345b3d2 100644
--- a/drivers/s390/crypto/ap_asm.h
+++ b/drivers/s390/crypto/ap_asm.h
@@ -69,16 +69,19 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
}
/**
- * ap_aqic(): Enable interruption for a specific AP.
+ * ap_aqic(): Control interruption for a specific AP.
* @qid: The AP queue number
+ * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
* @ind: The notification indicator byte
*
* Returns AP queue status.
*/
-static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind)
+static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
+ struct ap_qirq_ctrl qirqctrl,
+ void *ind)
{
register unsigned long reg0 asm ("0") = qid | (3UL << 24);
- register unsigned long reg1_in asm ("1") = (8UL << 44) | AP_ISC;
+ register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl;
register struct ap_queue_status reg1_out asm ("1");
register void *reg2 asm ("2") = ind;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 6dee598979e7..5f0be2040272 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -166,26 +166,51 @@ static int ap_configuration_available(void)
}
/**
+ * ap_apft_available(): Test if AP facilities test (APFT)
+ * facility is available.
+ *
+ * Returns 1 if APFT is is available.
+ */
+static int ap_apft_available(void)
+{
+ return test_facility(15);
+}
+
+/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
+ * @tbit: Test facilities bit
* @info: Pointer to queue descriptor
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status
-ap_test_queue(ap_qid_t qid, unsigned long *info)
+struct ap_queue_status ap_test_queue(ap_qid_t qid,
+ int tbit,
+ unsigned long *info)
{
- if (test_facility(15))
- qid |= 1UL << 23; /* set APFT T bit*/
+ if (tbit)
+ qid |= 1UL << 23; /* set T bit*/
return ap_tapq(qid, info);
}
+EXPORT_SYMBOL(ap_test_queue);
-static inline int ap_query_configuration(void)
+/*
+ * ap_query_configuration(): Fetch cryptographic config info
+ *
+ * Returns the ap configuration info fetched via PQAP(QCI).
+ * On success 0 is returned, on failure a negative errno
+ * is returned, e.g. if the PQAP(QCI) instruction is not
+ * available, the return value will be -EOPNOTSUPP.
+ */
+int ap_query_configuration(struct ap_config_info *info)
{
- if (!ap_configuration)
+ if (!ap_configuration_available())
return -EOPNOTSUPP;
- return ap_qci(ap_configuration);
+ if (!info)
+ return -EINVAL;
+ return ap_qci(info);
}
+EXPORT_SYMBOL(ap_query_configuration);
/**
* ap_init_configuration(): Allocate and query configuration array.
@@ -198,7 +223,7 @@ static void ap_init_configuration(void)
ap_configuration = kzalloc(sizeof(*ap_configuration), GFP_KERNEL);
if (!ap_configuration)
return;
- if (ap_query_configuration() != 0) {
+ if (ap_query_configuration(ap_configuration) != 0) {
kfree(ap_configuration);
ap_configuration = NULL;
return;
@@ -261,7 +286,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
if (!ap_test_config_card_id(AP_QID_CARD(qid)))
return -ENODEV;
- status = ap_test_queue(qid, &info);
+ status = ap_test_queue(qid, ap_apft_available(), &info);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
*queue_depth = (int)(info & 0xff);
@@ -940,7 +965,9 @@ static int ap_select_domain(void)
for (j = 0; j < AP_DEVICES; j++) {
if (!ap_test_config_card_id(j))
continue;
- status = ap_test_queue(AP_MKQID(j, i), NULL);
+ status = ap_test_queue(AP_MKQID(j, i),
+ ap_apft_available(),
+ NULL);
if (status.response_code != AP_RESPONSE_NORMAL)
continue;
count++;
@@ -993,7 +1020,7 @@ static void ap_scan_bus(struct work_struct *unused)
AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
- ap_query_configuration();
+ ap_query_configuration(ap_configuration);
if (ap_select_domain() != 0)
goto out;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 4dc7c88fb054..754cf2223cfb 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -28,6 +28,7 @@
#include <linux/device.h>
#include <linux/types.h>
+#include <asm/ap.h>
#define AP_DEVICES 64 /* Number of AP devices. */
#define AP_DOMAINS 256 /* Number of AP domains. */
@@ -40,41 +41,6 @@ extern int ap_domain_index;
extern spinlock_t ap_list_lock;
extern struct list_head ap_card_list;
-/**
- * The ap_qid_t identifier of an ap queue. It contains a
- * 6 bit card index and a 4 bit queue index (domain).
- */
-typedef unsigned int ap_qid_t;
-
-#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255))
-#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63)
-#define AP_QID_QUEUE(_qid) ((_qid) & 255)
-
-/**
- * structy ap_queue_status - Holds the AP queue status.
- * @queue_empty: Shows if queue is empty
- * @replies_waiting: Waiting replies
- * @queue_full: Is 1 if the queue is full
- * @pad: A 4 bit pad
- * @int_enabled: Shows if interrupts are enabled for the AP
- * @response_code: Holds the 8 bit response code
- * @pad2: A 16 bit pad
- *
- * The ap queue status word is returned by all three AP functions
- * (PQAP, NQAP and DQAP). There's a set of flags in the first
- * byte, followed by a 1 byte response code.
- */
-struct ap_queue_status {
- unsigned int queue_empty : 1;
- unsigned int replies_waiting : 1;
- unsigned int queue_full : 1;
- unsigned int pad1 : 4;
- unsigned int int_enabled : 1;
- unsigned int response_code : 8;
- unsigned int pad2 : 16;
-} __packed;
-
-
static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
{
return (*ptr & (0x80000000u >> nr)) != 0;
@@ -238,17 +204,6 @@ struct ap_message {
struct ap_message *);
};
-struct ap_config_info {
- unsigned int special_command:1;
- unsigned int ap_extended:1;
- unsigned char reserved1:6;
- unsigned char reserved2[15];
- unsigned int apm[8]; /* AP ID mask */
- unsigned int aqm[8]; /* AP queue mask */
- unsigned int adm[8]; /* AP domain mask */
- unsigned char reserved4[16];
-} __packed;
-
/**
* ap_init_message() - Initialize ap_message.
* Initialize a message before using. Otherwise this might result in
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 0f1a5d02acb0..56b96edffd5b 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -16,6 +16,25 @@
#include "ap_asm.h"
/**
+ * ap_queue_irq_ctrl(): Control interruption on a AP queue.
+ * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
+ * @ind: The notification indicator byte
+ *
+ * Returns AP queue status.
+ *
+ * Control interruption on the given AP queue.
+ * Just a simple wrapper function for the low level PQAP(AQIC)
+ * instruction available for other kernel modules.
+ */
+struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid,
+ struct ap_qirq_ctrl qirqctrl,
+ void *ind)
+{
+ return ap_aqic(qid, qirqctrl, ind);
+}
+EXPORT_SYMBOL(ap_queue_irq_ctrl);
+
+/**
* ap_queue_enable_interruption(): Enable interruption on an AP queue.
* @qid: The AP queue number
* @ind: the notification indicator byte
@@ -27,8 +46,11 @@
static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
{
struct ap_queue_status status;
+ struct ap_qirq_ctrl qirqctrl = { 0 };
- status = ap_aqic(aq->qid, ind);
+ qirqctrl.ir = 1;
+ qirqctrl.isc = AP_ISC;
+ status = ap_aqic(aq->qid, qirqctrl, ind);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_OTHERWISE_CHANGED:
@@ -362,7 +384,7 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
/* Get the status with TAPQ */
status = ap_tapq(aq->qid, NULL);
- if (status.int_enabled == 1) {
+ if (status.irq_enabled == 1) {
/* Irqs are now enabled */
aq->interrupt = AP_INTR_ENABLED;
aq->state = (aq->queue_count > 0) ?
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index 53436ea52230..f85dacf1c284 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -98,7 +98,7 @@ static struct attribute *zcrypt_card_attrs[] = {
NULL,
};
-static struct attribute_group zcrypt_card_attr_group = {
+static const struct attribute_group zcrypt_card_attr_group = {
.attrs = zcrypt_card_attrs,
};
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 4fddb4319481..afd20cee7ea0 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -140,7 +140,7 @@ struct function_and_rules_block {
* + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
* - VUD block
*/
-static struct CPRBX static_cprbx = {
+static const struct CPRBX static_cprbx = {
.cprb_len = 0x00DC,
.cprb_ver_id = 0x02,
.func_id = {0x54, 0x32},
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index a303f3b2c328..4742be0eec24 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -89,7 +89,7 @@ static struct attribute *zcrypt_queue_attrs[] = {
NULL,
};
-static struct attribute_group zcrypt_queue_attr_group = {
+static const struct attribute_group zcrypt_queue_attr_group = {
.attrs = zcrypt_queue_attrs,
};
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 2ade6131a89f..26363e0816fe 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -305,7 +305,7 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
* ch The channel, the sense code belongs to.
* sense The sense code to inspect.
*/
-static inline void ccw_unit_check(struct channel *ch, __u8 sense)
+static void ccw_unit_check(struct channel *ch, __u8 sense)
{
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s(%s): %02x",
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 619da81dca70..d01b5c2a7760 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -327,8 +327,7 @@ lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
spin_unlock_irqrestore(&card->mask_lock, flags);
wake_up(&card->wait_q);
}
-static inline int
-lcs_threads_running(struct lcs_card *card, unsigned long threads)
+static int lcs_threads_running(struct lcs_card *card, unsigned long threads)
{
unsigned long flags;
int rc = 0;
@@ -346,8 +345,7 @@ lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
lcs_threads_running(card, threads) == 0);
}
-static inline int
-lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
+static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
{
unsigned long flags;
@@ -373,8 +371,7 @@ lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
wake_up(&card->wait_q);
}
-static inline int
-__lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
+static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
{
unsigned long flags;
int rc = 0;
@@ -444,8 +441,7 @@ lcs_setup_card(struct lcs_card *card)
INIT_LIST_HEAD(&card->lancmd_waiters);
}
-static inline void
-lcs_clear_multicast_list(struct lcs_card *card)
+static void lcs_clear_multicast_list(struct lcs_card *card)
{
#ifdef CONFIG_IP_MULTICAST
struct lcs_ipm_list *ipm;
@@ -656,8 +652,7 @@ __lcs_resume_channel(struct lcs_channel *channel)
/**
* Make a buffer ready for processing.
*/
-static inline void
-__lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
+static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
{
int prev, next;
@@ -1169,8 +1164,8 @@ lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
/**
* function called by net device to handle multicast address relevant things
*/
-static inline void
-lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
+static void lcs_remove_mc_addresses(struct lcs_card *card,
+ struct in_device *in4_dev)
{
struct ip_mc_list *im4;
struct list_head *l;
@@ -1196,8 +1191,9 @@ lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
spin_unlock_irqrestore(&card->ipm_lock, flags);
}
-static inline struct lcs_ipm_list *
-lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
+static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card,
+ struct ip_mc_list *im4,
+ char *buf)
{
struct lcs_ipm_list *tmp, *ipm = NULL;
struct list_head *l;
@@ -1218,8 +1214,8 @@ lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
return ipm;
}
-static inline void
-lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
+static void lcs_set_mc_addresses(struct lcs_card *card,
+ struct in_device *in4_dev)
{
struct ip_mc_list *im4;
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 7e0e6a4019f3..b9c7c1e61da2 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -249,14 +249,14 @@ struct ll_header {
* Compatibility macros for busy handling
* of network devices.
*/
-static inline void netiucv_clear_busy(struct net_device *dev)
+static void netiucv_clear_busy(struct net_device *dev)
{
struct netiucv_priv *priv = netdev_priv(dev);
clear_bit(0, &priv->tbusy);
netif_wake_queue(dev);
}
-static inline int netiucv_test_and_set_busy(struct net_device *dev)
+static int netiucv_test_and_set_busy(struct net_device *dev)
{
struct netiucv_priv *priv = netdev_priv(dev);
netif_stop_queue(dev);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 7a0ffc71b25d..59e09854c4f7 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -857,11 +857,6 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
}
}
-static inline int qeth_get_ip_protocol(struct sk_buff *skb)
-{
- return ip_hdr(skb)->protocol;
-}
-
static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
struct qeth_buffer_pool_entry *entry)
{
@@ -951,10 +946,13 @@ int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
int extra_elems, int data_offset);
int qeth_get_elements_for_frags(struct sk_buff *);
-int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
- struct sk_buff *, struct qeth_hdr *, int, int);
-int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
- struct sk_buff *, struct qeth_hdr *, int);
+int qeth_do_send_packet_fast(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue, struct sk_buff *skb,
+ struct qeth_hdr *hdr, unsigned int offset,
+ unsigned int hd_len);
+int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int hd_len, unsigned int offset, int elements);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int qeth_core_get_sset_count(struct net_device *, int);
void qeth_core_get_ethtool_stats(struct net_device *,
@@ -987,6 +985,7 @@ int qeth_set_features(struct net_device *, netdev_features_t);
int qeth_recover_features(struct net_device *);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
int qeth_vm_request_mac(struct qeth_card *card);
+int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4792cabb862e..bae7440abc01 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -101,7 +101,7 @@ void qeth_close_dev(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_close_dev);
-static inline const char *qeth_get_cardname(struct qeth_card *card)
+static const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
switch (card->info.type) {
@@ -330,7 +330,7 @@ static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
return q;
}
-static inline int qeth_cq_init(struct qeth_card *card)
+static int qeth_cq_init(struct qeth_card *card)
{
int rc;
@@ -352,7 +352,7 @@ out:
return rc;
}
-static inline int qeth_alloc_cq(struct qeth_card *card)
+static int qeth_alloc_cq(struct qeth_card *card)
{
int rc;
@@ -397,7 +397,7 @@ kmsg_out:
goto out;
}
-static inline void qeth_free_cq(struct qeth_card *card)
+static void qeth_free_cq(struct qeth_card *card)
{
if (card->qdio.c_q) {
--card->qdio.no_in_queues;
@@ -408,8 +408,9 @@ static inline void qeth_free_cq(struct qeth_card *card)
card->qdio.out_bufstates = NULL;
}
-static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
- int delayed) {
+static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
+ int delayed)
+{
enum iucv_tx_notify n;
switch (sbalf15) {
@@ -432,8 +433,8 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
return n;
}
-static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
- int bidx, int forced_cleanup)
+static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
+ int forced_cleanup)
{
if (q->card->options.cq != QETH_CQ_ENABLED)
return;
@@ -475,8 +476,9 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
}
-static inline void qeth_qdio_handle_aob(struct qeth_card *card,
- unsigned long phys_aob_addr) {
+static void qeth_qdio_handle_aob(struct qeth_card *card,
+ unsigned long phys_aob_addr)
+{
struct qaob *aob;
struct qeth_qdio_out_buffer *buffer;
enum iucv_tx_notify notification;
@@ -2228,7 +2230,7 @@ static int qeth_cm_setup(struct qeth_card *card)
}
-static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
+static int qeth_get_initial_mtu_for_card(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_UNKNOWN:
@@ -2251,7 +2253,7 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
}
}
-static inline int qeth_get_mtu_outof_framesize(int framesize)
+static int qeth_get_mtu_outof_framesize(int framesize)
{
switch (framesize) {
case 0x4000:
@@ -2267,7 +2269,7 @@ static inline int qeth_get_mtu_outof_framesize(int framesize)
}
}
-static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
+static int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
{
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
@@ -2738,8 +2740,8 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card)
}
}
-static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
- struct qeth_card *card)
+static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
+ struct qeth_card *card)
{
struct list_head *plh;
struct qeth_buffer_pool_entry *entry;
@@ -2870,7 +2872,7 @@ int qeth_init_qdio_queues(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
-static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
+static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
{
switch (link_type) {
case QETH_LINK_TYPE_HSTR:
@@ -3888,27 +3890,45 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
}
EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
-static inline void __qeth_fill_buffer(struct sk_buff *skb,
- struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
- int offset)
+/**
+ * qeth_push_hdr() - push a qeth_hdr onto an skb.
+ * @skb: skb that the qeth_hdr should be pushed onto.
+ * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
+ * it contains a valid pointer to a qeth_hdr.
+ * @len: length of the hdr that needs to be pushed on.
+ *
+ * Returns the pushed length. If the header can't be pushed on
+ * (eg. because it would cross a page boundary), it is allocated from
+ * the cache instead and 0 is returned.
+ * Error to create the hdr is indicated by returning with < 0.
+ */
+int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len)
{
- int length = skb_headlen(skb);
- int length_here;
- int element;
- char *data;
- int first_lap, cnt;
- struct skb_frag_struct *frag;
-
- element = *next_element_to_fill;
- data = skb->data;
- first_lap = (is_tso == 0 ? 1 : 0);
-
- if (offset >= 0) {
- data = skb->data + offset;
- length -= offset;
- first_lap = 0;
+ if (skb_headroom(skb) >= len &&
+ qeth_get_elements_for_range((addr_t)skb->data - len,
+ (addr_t)skb->data) == 1) {
+ *hdr = skb_push(skb, len);
+ return len;
}
+ /* fall back */
+ *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+ if (!*hdr)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_push_hdr);
+
+static void __qeth_fill_buffer(struct sk_buff *skb,
+ struct qeth_qdio_out_buffer *buf,
+ bool is_first_elem, unsigned int offset)
+{
+ struct qdio_buffer *buffer = buf->buffer;
+ int element = buf->next_element_to_fill;
+ int length = skb_headlen(skb) - offset;
+ char *data = skb->data + offset;
+ int length_here, cnt;
+ /* map linear part into buffer element(s) */
while (length > 0) {
/* length_here is the remaining amount of data in this page */
length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
@@ -3918,34 +3938,28 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
buffer->element[element].addr = data;
buffer->element[element].length = length_here;
length -= length_here;
- if (!length) {
- if (first_lap)
- if (skb_shinfo(skb)->nr_frags)
- buffer->element[element].eflags =
- SBAL_EFLAGS_FIRST_FRAG;
- else
- buffer->element[element].eflags = 0;
- else
+ if (is_first_elem) {
+ is_first_elem = false;
+ if (length || skb_is_nonlinear(skb))
+ /* skb needs additional elements */
buffer->element[element].eflags =
- SBAL_EFLAGS_MIDDLE_FRAG;
- } else {
- if (first_lap)
- buffer->element[element].eflags =
- SBAL_EFLAGS_FIRST_FRAG;
+ SBAL_EFLAGS_FIRST_FRAG;
else
- buffer->element[element].eflags =
- SBAL_EFLAGS_MIDDLE_FRAG;
+ buffer->element[element].eflags = 0;
+ } else {
+ buffer->element[element].eflags =
+ SBAL_EFLAGS_MIDDLE_FRAG;
}
data += length_here;
element++;
- first_lap = 0;
}
+ /* map page frags into buffer element(s) */
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
- frag = &skb_shinfo(skb)->frags[cnt];
- data = (char *)page_to_phys(skb_frag_page(frag)) +
- frag->page_offset;
- length = frag->size;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
+
+ data = skb_frag_address(frag);
+ length = skb_frag_size(frag);
while (length > 0) {
length_here = PAGE_SIZE -
((unsigned long) data % PAGE_SIZE);
@@ -3964,48 +3978,45 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
if (buffer->element[element - 1].eflags)
buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
- *next_element_to_fill = element;
+ buf->next_element_to_fill = element;
}
-static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf, struct sk_buff *skb,
- struct qeth_hdr *hdr, int offset, int hd_len)
+/**
+ * qeth_fill_buffer() - map skb into an output buffer
+ * @queue: QDIO queue to submit the buffer on
+ * @buf: buffer to transport the skb
+ * @skb: skb to map into the buffer
+ * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
+ * from qeth_core_header_cache.
+ * @offset: when mapping the skb, start at skb->data + offset
+ * @hd_len: if > 0, build a dedicated header element of this size
+ */
+static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int offset, unsigned int hd_len)
{
- struct qdio_buffer *buffer;
- int flush_cnt = 0, hdr_len, large_send = 0;
+ struct qdio_buffer *buffer = buf->buffer;
+ bool is_first_elem = true;
+ int flush_cnt = 0;
- buffer = buf->buffer;
refcount_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb);
- /*check first on TSO ....*/
- if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) {
+ /* build dedicated header element */
+ if (hd_len) {
int element = buf->next_element_to_fill;
+ is_first_elem = false;
- hdr_len = sizeof(struct qeth_hdr_tso) +
- ((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len;
- /*fill first buffer entry only with header information */
- buffer->element[element].addr = skb->data;
- buffer->element[element].length = hdr_len;
- buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
- buf->next_element_to_fill++;
- skb->data += hdr_len;
- skb->len -= hdr_len;
- large_send = 1;
- }
-
- if (offset >= 0) {
- int element = buf->next_element_to_fill;
buffer->element[element].addr = hdr;
- buffer->element[element].length = sizeof(struct qeth_hdr) +
- hd_len;
+ buffer->element[element].length = hd_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
- buf->is_header[element] = 1;
+ /* remember to free cache-allocated qeth_hdr: */
+ buf->is_header[element] = ((void *)hdr != skb->data);
buf->next_element_to_fill++;
}
- __qeth_fill_buffer(skb, buffer, large_send,
- (int *)&buf->next_element_to_fill, offset);
+ __qeth_fill_buffer(skb, buf, is_first_elem, offset);
if (!queue->do_pack) {
QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
@@ -4030,8 +4041,9 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
}
int qeth_do_send_packet_fast(struct qeth_card *card,
- struct qeth_qdio_out_q *queue, struct sk_buff *skb,
- struct qeth_hdr *hdr, int offset, int hd_len)
+ struct qeth_qdio_out_q *queue, struct sk_buff *skb,
+ struct qeth_hdr *hdr, unsigned int offset,
+ unsigned int hd_len)
{
struct qeth_qdio_out_buffer *buffer;
int index;
@@ -4061,8 +4073,9 @@ out:
EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- int elements_needed)
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int offset, unsigned int hd_len,
+ int elements_needed)
{
struct qeth_qdio_out_buffer *buffer;
int start_index;
@@ -4111,7 +4124,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
}
}
- tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0);
+ tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
QDIO_MAX_BUFFERS_PER_Q;
flush_count += tmp;
@@ -4834,7 +4847,7 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
-static inline int qeth_get_qdio_q_format(struct qeth_card *card)
+static int qeth_get_qdio_q_format(struct qeth_card *card)
{
if (card->info.type == QETH_CARD_TYPE_IQD)
return QDIO_IQDIO_QFMT;
@@ -4899,9 +4912,12 @@ out:
return;
}
-static inline void qeth_qdio_establish_cq(struct qeth_card *card,
- struct qdio_buffer **in_sbal_ptrs,
- void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) {
+static void qeth_qdio_establish_cq(struct qeth_card *card,
+ struct qdio_buffer **in_sbal_ptrs,
+ void (**queue_start_poll)
+ (struct ccw_device *, int,
+ unsigned long))
+{
int i;
if (card->options.cq == QETH_CQ_ENABLED) {
@@ -5193,9 +5209,10 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
-static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer,
- struct qdio_buffer_element *element,
- struct sk_buff **pskb, int offset, int *pfrag, int data_len)
+static int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer,
+ struct qdio_buffer_element *element,
+ struct sk_buff **pskb, int offset, int *pfrag,
+ int data_len)
{
struct page *page = virt_to_page(element->addr);
if (*pskb == NULL) {
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 6d255c22656d..d1ee9e30c68b 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -78,7 +78,7 @@ static ssize_t qeth_dev_card_type_show(struct device *dev,
static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
-static inline const char *qeth_get_bufsize_str(struct qeth_card *card)
+static const char *qeth_get_bufsize_str(struct qeth_card *card)
{
if (card->qdio.in_buf_size == 16384)
return "16k";
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index ad110abfdd47..760b023eae95 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -231,13 +231,7 @@ static void qeth_l2_del_all_macs(struct qeth_card *card)
spin_unlock_bh(&card->mclock);
}
-static inline u32 qeth_l2_mac_hash(const u8 *addr)
-{
- return get_unaligned((u32 *)(&addr[2]));
-}
-
-static inline int qeth_l2_get_cast_type(struct qeth_card *card,
- struct sk_buff *skb)
+static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
{
if (card->info.type == QETH_CARD_TYPE_OSN)
return RTN_UNSPEC;
@@ -248,8 +242,8 @@ static inline int qeth_l2_get_cast_type(struct qeth_card *card,
return RTN_UNSPEC;
}
-static inline void qeth_l2_hdr_csum(struct qeth_card *card,
- struct qeth_hdr *hdr, struct sk_buff *skb)
+static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
+ struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
@@ -265,13 +259,14 @@ static inline void qeth_l2_hdr_csum(struct qeth_card *card,
card->perf_stats.tx_csum++;
}
-static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int cast_type)
+static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
+ int cast_type, unsigned int data_len)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
+ hdr->hdr.l2.pkt_length = data_len;
/* set byte byte 3 to casting flags */
if (cast_type == RTN_MULTICAST)
@@ -281,7 +276,6 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
else
hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
- hdr->hdr.l2.pkt_length = skb->len - sizeof(struct qeth_hdr);
/* VSWITCH relies on the VLAN
* information to be present in
* the QDIO header */
@@ -519,15 +513,6 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
/* fall back to alternative mechanism: */
}
- if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
- rc = qeth_query_setadapterparms(card);
- if (rc) {
- QETH_DBF_MESSAGE(2, "could not query adapter "
- "parameters on device %s: x%x\n",
- CARD_BUS_ID(card), rc);
- }
- }
-
if (card->info.type == QETH_CARD_TYPE_IQD ||
card->info.type == QETH_CARD_TYPE_OSM ||
card->info.type == QETH_CARD_TYPE_OSX ||
@@ -615,13 +600,13 @@ static void qeth_promisc_to_bridge(struct qeth_card *card)
* only if there is not in the hash table storage already
*
*/
-static void
-qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
+static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha,
+ u8 is_uc)
{
+ u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
struct qeth_mac *mac;
- hash_for_each_possible(card->mac_htable, mac, hnode,
- qeth_l2_mac_hash(ha->addr)) {
+ hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) {
if (is_uc == mac->is_uc &&
!memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
@@ -638,9 +623,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
mac->is_uc = is_uc;
mac->disp_flag = QETH_DISP_ADDR_ADD;
- hash_add(card->mac_htable, &mac->hnode,
- qeth_l2_mac_hash(mac->mac_addr));
-
+ hash_add(card->mac_htable, &mac->hnode, mac_hash);
}
static void qeth_l2_set_rx_mode(struct net_device *dev)
@@ -693,21 +676,127 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
qeth_promisc_to_bridge(card);
}
-static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int cast_type)
{
+ unsigned int data_offset = ETH_HLEN;
+ struct qeth_hdr *hdr;
int rc;
+
+ hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+ if (!hdr)
+ return -ENOMEM;
+ qeth_l2_fill_header(hdr, skb, cast_type, skb->len);
+ skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr),
+ data_offset);
+
+ if (!qeth_get_elements_no(card, skb, 1, data_offset)) {
+ rc = -E2BIG;
+ goto out;
+ }
+ rc = qeth_do_send_packet_fast(card, queue, skb, hdr, data_offset,
+ sizeof(*hdr) + data_offset);
+out:
+ if (rc)
+ kmem_cache_free(qeth_core_header_cache, hdr);
+ return rc;
+}
+
+static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int cast_type)
+{
+ int push_len = sizeof(struct qeth_hdr);
+ unsigned int elements, nr_frags;
+ unsigned int hdr_elements = 0;
struct qeth_hdr *hdr = NULL;
- int elements = 0;
+ unsigned int hd_len = 0;
+ int rc;
+
+ /* fix hardware limitation: as long as we do not have sbal
+ * chaining we can not send long frag lists
+ */
+ if (!qeth_get_elements_no(card, skb, 0, 0)) {
+ rc = skb_linearize(skb);
+
+ if (card->options.performance_stats) {
+ if (rc)
+ card->perf_stats.tx_linfail++;
+ else
+ card->perf_stats.tx_lin++;
+ }
+ if (rc)
+ return rc;
+ }
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ rc = skb_cow_head(skb, push_len);
+ if (rc)
+ return rc;
+ push_len = qeth_push_hdr(skb, &hdr, push_len);
+ if (push_len < 0)
+ return push_len;
+ if (!push_len) {
+ /* hdr was allocated from cache */
+ hd_len = sizeof(*hdr);
+ hdr_elements = 1;
+ }
+ qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ qeth_l2_hdr_csum(card, hdr, skb);
+
+ elements = qeth_get_elements_no(card, skb, hdr_elements, 0);
+ if (!elements) {
+ rc = -E2BIG;
+ goto out;
+ }
+ elements += hdr_elements;
+
+ /* TODO: remove the skb_orphan() once TX completion is fast enough */
+ skb_orphan(skb);
+ rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements);
+out:
+ if (!rc) {
+ if (card->options.performance_stats && nr_frags) {
+ card->perf_stats.sg_skbs_sent++;
+ /* nr_frags + skb->data */
+ card->perf_stats.sg_frags_sent += nr_frags + 1;
+ }
+ } else {
+ if (hd_len)
+ kmem_cache_free(qeth_core_header_cache, hdr);
+ if (rc == -EBUSY)
+ /* roll back to ETH header */
+ skb_pull(skb, push_len);
+ }
+ return rc;
+}
+
+static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue)
+{
+ unsigned int elements;
+ struct qeth_hdr *hdr;
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+
+ hdr = (struct qeth_hdr *)skb->data;
+ elements = qeth_get_elements_no(card, skb, 0, 0);
+ if (!elements)
+ return -E2BIG;
+ if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr)))
+ return -EINVAL;
+ return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements);
+}
+
+static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
struct qeth_card *card = dev->ml_priv;
- struct sk_buff *new_skb = skb;
int cast_type = qeth_l2_get_cast_type(card, skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
- int data_offset = -1;
- int elements_needed = 0;
- int hd_len = 0;
- int nr_frags;
+ int rc;
if (card->qdio.do_prio_queueing || (cast_type &&
card->info.is_multicast_different))
@@ -721,118 +810,38 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
goto tx_drop;
}
- if ((card->info.type == QETH_CARD_TYPE_OSN) &&
- (skb->protocol == htons(ETH_P_IPV6)))
- goto tx_drop;
-
if (card->options.performance_stats) {
card->perf_stats.outbound_cnt++;
card->perf_stats.outbound_start_time = qeth_get_micros();
}
netif_stop_queue(dev);
- /* fix hardware limitation: as long as we do not have sbal
- * chaining we can not send long frag lists
- */
- if ((card->info.type != QETH_CARD_TYPE_IQD) &&
- !qeth_get_elements_no(card, new_skb, 0, 0)) {
- int lin_rc = skb_linearize(new_skb);
-
- if (card->options.performance_stats) {
- if (lin_rc)
- card->perf_stats.tx_linfail++;
- else
- card->perf_stats.tx_lin++;
- }
- if (lin_rc)
- goto tx_drop;
- }
-
- if (card->info.type == QETH_CARD_TYPE_OSN)
- hdr = (struct qeth_hdr *)skb->data;
- else {
- if (card->info.type == QETH_CARD_TYPE_IQD) {
- new_skb = skb;
- data_offset = ETH_HLEN;
- hd_len = ETH_HLEN;
- hdr = kmem_cache_alloc(qeth_core_header_cache,
- GFP_ATOMIC);
- if (!hdr)
- goto tx_drop;
- elements_needed++;
- skb_reset_mac_header(new_skb);
- qeth_l2_fill_header(card, hdr, new_skb, cast_type);
- hdr->hdr.l2.pkt_length = new_skb->len;
- memcpy(((char *)hdr) + sizeof(struct qeth_hdr),
- skb_mac_header(new_skb), ETH_HLEN);
- } else {
- /* create a clone with writeable headroom */
- new_skb = skb_realloc_headroom(skb,
- sizeof(struct qeth_hdr));
- if (!new_skb)
- goto tx_drop;
- hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
- skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
- qeth_l2_fill_header(card, hdr, new_skb, cast_type);
- if (new_skb->ip_summed == CHECKSUM_PARTIAL)
- qeth_l2_hdr_csum(card, hdr, new_skb);
- }
- }
-
- elements = qeth_get_elements_no(card, new_skb, elements_needed,
- (data_offset > 0) ? data_offset : 0);
- if (!elements) {
- if (data_offset >= 0)
- kmem_cache_free(qeth_core_header_cache, hdr);
- goto tx_drop;
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSN:
+ rc = qeth_l2_xmit_osn(card, skb, queue);
+ break;
+ case QETH_CARD_TYPE_IQD:
+ rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type);
+ break;
+ default:
+ rc = qeth_l2_xmit_osa(card, skb, queue, cast_type);
}
- if (card->info.type != QETH_CARD_TYPE_IQD) {
- if (qeth_hdr_chk_and_bounce(new_skb, &hdr,
- sizeof(struct qeth_hdr_layer2)))
- goto tx_drop;
- rc = qeth_do_send_packet(card, queue, new_skb, hdr,
- elements);
- } else
- rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
- data_offset, hd_len);
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
- if (card->options.performance_stats) {
- nr_frags = skb_shinfo(new_skb)->nr_frags;
- if (nr_frags) {
- card->perf_stats.sg_skbs_sent++;
- /* nr_frags + skb->data */
- card->perf_stats.sg_frags_sent += nr_frags + 1;
- }
- }
- if (new_skb != skb)
- dev_kfree_skb_any(skb);
- rc = NETDEV_TX_OK;
- } else {
- if (data_offset >= 0)
- kmem_cache_free(qeth_core_header_cache, hdr);
-
- if (rc == -EBUSY) {
- if (new_skb != skb)
- dev_kfree_skb_any(new_skb);
- return NETDEV_TX_BUSY;
- } else
- goto tx_drop;
- }
-
- netif_wake_queue(dev);
- if (card->options.performance_stats)
- card->perf_stats.outbound_time += qeth_get_micros() -
- card->perf_stats.outbound_start_time;
- return rc;
+ if (card->options.performance_stats)
+ card->perf_stats.outbound_time += qeth_get_micros() -
+ card->perf_stats.outbound_start_time;
+ netif_wake_queue(dev);
+ return NETDEV_TX_OK;
+ } else if (rc == -EBUSY) {
+ return NETDEV_TX_BUSY;
+ } /* else fall through */
tx_drop:
card->stats.tx_dropped++;
card->stats.tx_errors++;
- if ((new_skb != skb) && new_skb)
- dev_kfree_skb_any(new_skb);
dev_kfree_skb_any(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
@@ -1010,6 +1019,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
}
+ if (card->info.type != QETH_CARD_TYPE_OSN &&
+ card->info.type != QETH_CARD_TYPE_IQD) {
+ card->dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ card->dev->needed_headroom = sizeof(struct qeth_hdr);
+ }
+
card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
@@ -1744,11 +1759,26 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
return rc;
}
-static inline int ipa_cmd_sbp(struct qeth_card *card)
+static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
+ enum qeth_ipa_sbp_cmd sbp_cmd,
+ unsigned int cmd_length)
{
- return (card->info.type == QETH_CARD_TYPE_IQD) ?
- IPA_CMD_SETBRIDGEPORT_IQD :
- IPA_CMD_SETBRIDGEPORT_OSA;
+ enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ?
+ IPA_CMD_SETBRIDGEPORT_IQD :
+ IPA_CMD_SETBRIDGEPORT_OSA;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ iob = qeth_get_ipacmd_buffer(card, ipa_cmd, 0);
+ if (!iob)
+ return iob;
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
+ cmd_length;
+ cmd->data.sbp.hdr.command_code = sbp_cmd;
+ cmd->data.sbp.hdr.used_total = 1;
+ cmd->data.sbp.hdr.seq_no = 1;
+ return iob;
}
static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
@@ -1778,21 +1808,13 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
static void qeth_bridgeport_query_support(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
struct _qeth_sbp_cbctl cbctl;
QETH_CARD_TEXT(card, 2, "brqsuppo");
- iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
+ iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED,
+ sizeof(struct qeth_sbp_query_cmds_supp));
if (!iob)
return;
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.sbp.hdr.cmdlength =
- sizeof(struct qeth_ipacmd_sbp_hdr) +
- sizeof(struct qeth_sbp_query_cmds_supp);
- cmd->data.sbp.hdr.command_code =
- IPA_SBP_QUERY_COMMANDS_SUPPORTED;
- cmd->data.sbp.hdr.used_total = 1;
- cmd->data.sbp.hdr.seq_no = 1;
if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
(void *)&cbctl) ||
qeth_bridgeport_makerc(card, &cbctl,
@@ -1846,7 +1868,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
{
int rc = 0;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
struct _qeth_sbp_cbctl cbctl = {
.data = {
.qports = {
@@ -1859,16 +1880,9 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "brqports");
if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
return -EOPNOTSUPP;
- iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
+ iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0);
if (!iob)
return -ENOMEM;
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.sbp.hdr.cmdlength =
- sizeof(struct qeth_ipacmd_sbp_hdr);
- cmd->data.sbp.hdr.command_code =
- IPA_SBP_QUERY_BRIDGE_PORTS;
- cmd->data.sbp.hdr.used_total = 1;
- cmd->data.sbp.hdr.seq_no = 1;
rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
(void *)&cbctl);
if (rc < 0)
@@ -1900,7 +1914,6 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
int rc = 0;
int cmdlength;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
struct _qeth_sbp_cbctl cbctl;
enum qeth_ipa_sbp_cmd setcmd;
@@ -1908,32 +1921,24 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
switch (role) {
case QETH_SBP_ROLE_NONE:
setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
- cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
- sizeof(struct qeth_sbp_reset_role);
+ cmdlength = sizeof(struct qeth_sbp_reset_role);
break;
case QETH_SBP_ROLE_PRIMARY:
setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
- cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
- sizeof(struct qeth_sbp_set_primary);
+ cmdlength = sizeof(struct qeth_sbp_set_primary);
break;
case QETH_SBP_ROLE_SECONDARY:
setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
- cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
- sizeof(struct qeth_sbp_set_secondary);
+ cmdlength = sizeof(struct qeth_sbp_set_secondary);
break;
default:
return -EINVAL;
}
if (!(card->options.sbp.supported_funcs & setcmd))
return -EOPNOTSUPP;
- iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
+ iob = qeth_sbp_build_cmd(card, setcmd, cmdlength);
if (!iob)
return -ENOMEM;
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.sbp.hdr.cmdlength = cmdlength;
- cmd->data.sbp.hdr.command_code = setcmd;
- cmd->data.sbp.hdr.used_total = 1;
- cmd->data.sbp.hdr.seq_no = 1;
rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb,
(void *)&cbctl);
if (rc < 0)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8975cd321390..ab661a431f7c 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -247,7 +247,8 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
return -ENOENT;
addr->ref_counter--;
- if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
+ if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL ||
+ addr->type == QETH_IP_TYPE_RXIP))
return rc;
if (addr->in_progress)
return -EINPROGRESS;
@@ -329,8 +330,9 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
kfree(addr);
}
} else {
- if (addr->type == QETH_IP_TYPE_NORMAL)
- addr->ref_counter++;
+ if (addr->type == QETH_IP_TYPE_NORMAL ||
+ addr->type == QETH_IP_TYPE_RXIP)
+ addr->ref_counter++;
}
return rc;
@@ -784,11 +786,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_CARD_TEXT(card, 2, "addrxip4");
+ QETH_CARD_TEXT(card, 2, "delrxip4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_CARD_TEXT(card, 2, "addrxip6");
+ QETH_CARD_TEXT(card, 2, "delrxip6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -867,7 +869,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
return rc;
}
-static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
+static u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
{
if (cast_type == RTN_MULTICAST)
return QETH_CAST_MULTICAST;
@@ -876,7 +878,7 @@ static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
return QETH_CAST_UNICAST;
}
-static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
+static u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
{
u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
if (cast_type == RTN_MULTICAST)
@@ -890,22 +892,10 @@ static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
static int qeth_l3_setadapter_parms(struct qeth_card *card)
{
- int rc;
+ int rc = 0;
QETH_DBF_TEXT(SETUP, 2, "setadprm");
- if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
- dev_info(&card->gdev->dev,
- "set adapter parameters not supported.\n");
- QETH_DBF_TEXT(SETUP, 2, " notsupp");
- return 0;
- }
- rc = qeth_query_setadapterparms(card);
- if (rc) {
- QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: "
- "0x%x\n", dev_name(&card->gdev->dev), rc);
- return rc;
- }
if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
rc = qeth_setadpparms_change_macaddr(card);
if (rc)
@@ -1656,9 +1646,8 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned short *vlan_id)
+static int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr, unsigned short *vlan_id)
{
__u16 prot;
struct iphdr *ip_hdr;
@@ -2408,7 +2397,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return rc;
}
-inline int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
{
int cast_type = RTN_UNSPEC;
struct neighbour *n = NULL;
@@ -2512,7 +2501,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct rtable *rt = (struct rtable *) dst;
__be32 *pkey = &ip_hdr(skb)->daddr;
- if (rt->rt_gateway)
+ if (rt && rt->rt_gateway)
pkey = &rt->rt_gateway;
/* IPv4 */
@@ -2523,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct rt6_info *rt = (struct rt6_info *) dst;
struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
- if (!ipv6_addr_any(&rt->rt6i_gateway))
+ if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
pkey = &rt->rt6i_gateway;
/* IPv6 */
@@ -2546,8 +2535,8 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
rcu_read_unlock();
}
-static inline void qeth_l3_hdr_csum(struct qeth_card *card,
- struct qeth_hdr *hdr, struct sk_buff *skb)
+static void qeth_l3_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
+ struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
@@ -2582,7 +2571,7 @@ static void qeth_tso_fill_header(struct qeth_card *card,
hdr->ext.hdr_len = 28;
/*insert non-fix values */
hdr->ext.mss = skb_shinfo(skb)->gso_size;
- hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
+ hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb));
hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
sizeof(struct qeth_hdr_tso));
tcph->check = 0;
@@ -2648,9 +2637,10 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
qeth_get_priority_queue(card, skb, ipv, cast_type) :
card->qdio.default_out_queue];
int tx_bytes = skb->len;
+ unsigned int hd_len = 0;
bool use_tso;
int data_offset = -1;
- int nr_frags;
+ unsigned int nr_frags;
if (((card->info.type == QETH_CARD_TYPE_IQD) &&
(((card->options.cq != QETH_CQ_ENABLED) && !ipv) ||
@@ -2675,11 +2665,12 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
/* Ignore segment size from skb_is_gso(), 1 page is always used. */
use_tso = skb_is_gso(skb) &&
- (qeth_get_ip_protocol(skb) == IPPROTO_TCP) && (ipv == 4);
+ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
if (card->info.type == QETH_CARD_TYPE_IQD) {
new_skb = skb;
data_offset = ETH_HLEN;
+ hd_len = sizeof(*hdr);
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr)
goto tx_drop;
@@ -2727,6 +2718,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (lin_rc)
goto tx_drop;
}
+ nr_frags = skb_shinfo(new_skb)->nr_frags;
if (use_tso) {
hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
@@ -2766,19 +2758,21 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (card->info.type != QETH_CARD_TYPE_IQD) {
int len;
- if (use_tso)
- len = ((unsigned long)tcp_hdr(new_skb) +
- tcp_hdrlen(new_skb)) -
- (unsigned long)new_skb->data;
- else
+ if (use_tso) {
+ hd_len = sizeof(struct qeth_hdr_tso) +
+ ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
+ len = hd_len;
+ } else {
len = sizeof(struct qeth_hdr_layer3);
+ }
if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
goto tx_drop;
- rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements);
+ rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len,
+ hd_len, elements);
} else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
- data_offset, 0);
+ data_offset, hd_len);
if (!rc) {
card->stats.tx_packets++;
@@ -2786,7 +2780,6 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (new_skb != skb)
dev_kfree_skb_any(skb);
if (card->options.performance_stats) {
- nr_frags = skb_shinfo(new_skb)->nr_frags;
if (use_tso) {
card->perf_stats.large_send_bytes += tx_bytes;
card->perf_stats.large_send_cnt++;
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index f2f94f59e0fa..e8bcc314cc5f 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -350,7 +350,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
NULL,
};
-static struct attribute_group qeth_l3_device_attr_group = {
+static const struct attribute_group qeth_l3_device_attr_group = {
.attrs = qeth_l3_device_attrs,
};
@@ -680,7 +680,7 @@ static struct attribute *qeth_ipato_device_attrs[] = {
NULL,
};
-static struct attribute_group qeth_device_ipato_group = {
+static const struct attribute_group qeth_device_ipato_group = {
.name = "ipa_takeover",
.attrs = qeth_ipato_device_attrs,
};
@@ -843,7 +843,7 @@ static struct attribute *qeth_vipa_device_attrs[] = {
NULL,
};
-static struct attribute_group qeth_device_vipa_group = {
+static const struct attribute_group qeth_device_vipa_group = {
.name = "vipa",
.attrs = qeth_vipa_device_attrs,
};
@@ -895,9 +895,26 @@ static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
u8 *addr)
{
+ __be32 ipv4_addr;
+ struct in6_addr ipv6_addr;
+
if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
return -EINVAL;
}
+ if (proto == QETH_PROT_IPV4) {
+ memcpy(&ipv4_addr, addr, sizeof(ipv4_addr));
+ if (ipv4_is_multicast(ipv4_addr)) {
+ QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n");
+ return -EINVAL;
+ }
+ } else if (proto == QETH_PROT_IPV6) {
+ memcpy(&ipv6_addr, addr, sizeof(ipv6_addr));
+ if (ipv6_addr_is_multicast(&ipv6_addr)) {
+ QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1006,7 +1023,7 @@ static struct attribute *qeth_rxip_device_attrs[] = {
NULL,
};
-static struct attribute_group qeth_device_rxip_group = {
+static const struct attribute_group qeth_device_rxip_group = {
.name = "rxip",
.attrs = qeth_rxip_device_attrs,
};
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index bcc8f3dfd4c4..82ac331d9125 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -29,7 +29,6 @@
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/miscdevice.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index d5bf36ec8a75..8227076c9cbb 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -113,8 +113,12 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_status_read_buffer *srb = req->data;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+ static int const level = 2;
unsigned long flags;
+ if (unlikely(!debug_level_enabled(dbf->hba, level)))
+ return;
+
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
@@ -142,7 +146,7 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
"fsf_uss", req->req_id);
log:
- debug_event(dbf->hba, 2, rec, sizeof(*rec));
+ debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
@@ -156,8 +160,12 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
struct zfcp_dbf *dbf = req->adapter->dbf;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
struct fsf_status_read_buffer *sr_buf = req->data;
+ static int const level = 1;
unsigned long flags;
+ if (unlikely(!debug_level_enabled(dbf->hba, level)))
+ return;
+
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
@@ -169,7 +177,7 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
memcpy(&rec->u.be, &sr_buf->payload.bit_error,
sizeof(struct fsf_bit_error_payload));
- debug_event(dbf->hba, 1, rec, sizeof(*rec));
+ debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
@@ -186,8 +194,12 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_pay *payload = &dbf->pay_buf;
unsigned long flags;
+ static int const level = 1;
u16 length;
+ if (unlikely(!debug_level_enabled(dbf->pay, level)))
+ return;
+
if (!pl)
return;
@@ -202,7 +214,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
while (payload->counter < scount && (char *)pl[payload->counter]) {
memcpy(payload->data, (char *)pl[payload->counter], length);
- debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
+ debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
payload->counter++;
}
@@ -217,15 +229,19 @@ void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+ static int const level = 1;
unsigned long flags;
+ if (unlikely(!debug_level_enabled(dbf->hba, level)))
+ return;
+
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_BASIC;
- debug_event(dbf->hba, 1, rec, sizeof(*rec));
+ debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
@@ -264,9 +280,13 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+ static int const level = 1;
struct list_head *entry;
unsigned long flags;
+ if (unlikely(!debug_level_enabled(dbf->rec, level)))
+ return;
+
spin_lock_irqsave(&dbf->rec_lock, flags);
memset(rec, 0, sizeof(*rec));
@@ -283,7 +303,7 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
rec->u.trig.want = want;
rec->u.trig.need = need;
- debug_event(dbf->rec, 1, rec, sizeof(*rec));
+ debug_event(dbf->rec, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
@@ -300,6 +320,9 @@ void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
unsigned long flags;
+ if (!debug_level_enabled(dbf->rec, level))
+ return;
+
spin_lock_irqsave(&dbf->rec_lock, flags);
memset(rec, 0, sizeof(*rec));
@@ -345,8 +368,12 @@ void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
{
struct zfcp_dbf *dbf = wka_port->adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+ static int const level = 1;
unsigned long flags;
+ if (unlikely(!debug_level_enabled(dbf->rec, level)))
+ return;
+
spin_lock_irqsave(&dbf->rec_lock, flags);
memset(rec, 0, sizeof(*rec));
@@ -362,10 +389,12 @@ void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
rec->u.run.rec_action = ~0;
rec->u.run.rec_count = ~0;
- debug_event(dbf->rec, 1, rec, sizeof(*rec));
+ debug_event(dbf->rec, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
+#define ZFCP_DBF_SAN_LEVEL 1
+
static inline
void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
char *paytag, struct scatterlist *sg, u8 id, u16 len,
@@ -408,7 +437,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
(u16)(sg->length - offset));
/* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
memcpy(payload->data, sg_virt(sg) + offset, pay_len);
- debug_event(dbf->pay, 1, payload,
+ debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
zfcp_dbf_plen(pay_len));
payload->counter++;
offset += pay_len;
@@ -418,7 +447,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
spin_unlock(&dbf->pay_lock);
out:
- debug_event(dbf->san, 1, rec, sizeof(*rec));
+ debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->san_lock, flags);
}
@@ -434,6 +463,9 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
+ if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
+ return;
+
length = (u16)zfcp_qdio_real_bytes(ct_els->req);
zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
length, fsf->req_id, d_id, length);
@@ -447,6 +479,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
struct scatterlist *resp_entry = ct_els->resp;
+ struct fc_ct_hdr *resph;
struct fc_gpn_ft_resp *acc;
int max_entries, x, last = 0;
@@ -460,7 +493,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
&& reqh->ct_fs_subtype == FC_NS_SUBTYPE
&& reqh->ct_options == 0
&& reqh->_ct_resvd1 == 0
- && reqh->ct_cmd == FC_NS_GPN_FT
+ && reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT)
/* reqh->ct_mr_size can vary so do not match but read below */
&& reqh->_ct_resvd2 == 0
&& reqh->ct_reason == 0
@@ -473,7 +506,15 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
return len; /* not GPN_FT response so do not cap */
acc = sg_virt(resp_entry);
- max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+
+ /* cap all but accept CT responses to at least the CT header */
+ resph = (struct fc_ct_hdr *)acc;
+ if ((ct_els->status) ||
+ (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
+ return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
+
+ max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 /
+ sizeof(struct fc_gpn_ft_resp))
+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
* to account for header as 1st pseudo "entry" */;
@@ -503,6 +544,9 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
+ if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
+ return;
+
length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
length, fsf->req_id, ct_els->d_id,
@@ -522,6 +566,9 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
u16 length;
struct scatterlist sg;
+ if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
+ return;
+
length = (u16)(srb->length -
offsetof(struct fsf_status_read_buffer, payload));
sg_init_one(&sg, srb->payload.data, length);
@@ -555,8 +602,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
- /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
rec->scsi_lun = (u32)sc->device->lun;
+ rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
rec->host_scribble = (unsigned long)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
@@ -564,19 +611,31 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
if (fsf) {
rec->fsf_req_id = fsf->req_id;
- fcp_rsp = (struct fcp_resp_with_ext *)
- &(fsf->qtcb->bottom.io.fcp_rsp);
+ rec->pl_len = FCP_RESP_WITH_EXT;
+ fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu);
+ /* mandatory parts of FCP_RSP IU in this SCSI record */
memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
}
if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
- rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
- (u16)ZFCP_DBF_PAY_MAX_REC);
- zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
- "fcp_sns", fsf->req_id);
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
}
+ /* complete FCP_RSP IU in associated PAYload record
+ * but only if there are optional parts
+ */
+ if (fcp_rsp->resp.fr_flags != 0)
+ zfcp_dbf_pl_write(
+ dbf, fcp_rsp,
+ /* at least one full PAY record
+ * but not beyond hardware response field
+ */
+ min_t(u16, max_t(u16, rec->pl_len,
+ ZFCP_DBF_PAY_MAX_REC),
+ FSF_FCP_RSP_SIZE),
+ "fcp_riu", fsf->req_id);
}
debug_event(dbf->scsi, level, rec, sizeof(*rec));
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index db186d44cfaf..3508c00458f4 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2016
+ * Copyright IBM Corp. 2008, 2017
*/
#ifndef ZFCP_DBF_H
@@ -204,16 +204,17 @@ enum zfcp_dbf_scsi_id {
* @id: unique number of recovery record type
* @tag: identifier string specifying the location of initiation
* @scsi_id: scsi device id
- * @scsi_lun: scsi device logical unit number
+ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
* @scsi_result: scsi result
* @scsi_retries: current retry number of scsi request
* @scsi_allowed: allowed retries
- * @fcp_rsp_info: FCP response info
+ * @fcp_rsp_info: FCP response info code
* @scsi_opcode: scsi opcode
* @fsf_req_id: request id of fsf request
* @host_scribble: LLD specific data attached to SCSI request
- * @pl_len: length of paload stored as zfcp_dbf_pay
- * @fsf_rsp: response for fsf request
+ * @pl_len: length of payload stored as zfcp_dbf_pay
+ * @fcp_rsp: response for FCP request
+ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
*/
struct zfcp_dbf_scsi {
u8 id;
@@ -230,6 +231,7 @@ struct zfcp_dbf_scsi {
u64 host_scribble;
u16 pl_len;
struct fcp_resp_with_ext fcp_rsp;
+ u32 scsi_lun_64_hi;
} __packed;
/**
@@ -299,7 +301,7 @@ bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
return false; /* not an FCP response */
- fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
+ fcp_rsp = &qtcb->bottom.io.fcp_rsp.iu.resp;
rsp_flags = fcp_rsp->fr_flags;
fr_status = fcp_rsp->fr_status;
return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
@@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
{
struct fsf_qtcb *qtcb = req->qtcb;
- if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
+ ZFCP_STATUS_FSFREQ_ERROR))) {
+ zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
+
+ } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
(qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
@@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
*/
static inline
-void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
+ struct zfcp_fsf_req *fsf_req)
{
char tmp_tag[ZFCP_DBF_TAG_LEN];
@@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
memcpy(tmp_tag, "lr_", 3);
memcpy(&tmp_tag[3], tag, 4);
- _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
+ _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
}
/**
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 7ccfce559034..37408f5f81ce 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -572,9 +572,8 @@ static void zfcp_erp_memwait_handler(unsigned long data)
static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
{
- init_timer(&erp_action->timer);
- erp_action->timer.function = zfcp_erp_memwait_handler;
- erp_action->timer.data = (unsigned long) erp_action;
+ setup_timer(&erp_action->timer, zfcp_erp_memwait_handler,
+ (unsigned long) erp_action);
erp_action->timer.expires = jiffies + HZ;
add_timer(&erp_action->timer);
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 9afdbc32b23f..a9e968717dd9 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -41,7 +41,6 @@ extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 7331eea67435..8210645c2111 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,7 +3,7 @@
*
* Fibre Channel related functions for the zfcp device driver.
*
- * Copyright IBM Corp. 2008, 2010
+ * Copyright IBM Corp. 2008, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -29,7 +29,7 @@ static u32 zfcp_fc_rscn_range_mask[] = {
};
static bool no_auto_port_rescan;
-module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
+module_param(no_auto_port_rescan, bool, 0600);
MODULE_PARM_DESC(no_auto_port_rescan,
"no automatic port_rescan (default off)");
@@ -260,7 +260,8 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
page = (struct fc_els_rscn_page *) head;
/* see FC-FS */
- no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
+ no_entries = be16_to_cpu(head->rscn_plen) /
+ sizeof(struct fc_els_rscn_page);
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
@@ -296,7 +297,7 @@ static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
status_buffer = (struct fsf_status_read_buffer *) req->data;
plogi = (struct fc_els_flogi *) status_buffer->payload.data;
- zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
+ zfcp_fc_incoming_wwpn(req, be64_to_cpu(plogi->fl_wwpn));
}
static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
@@ -306,7 +307,7 @@ static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
struct fc_els_logo *logo =
(struct fc_els_logo *) status_buffer->payload.data;
- zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
+ zfcp_fc_incoming_wwpn(req, be64_to_cpu(logo->fl_n_port_wwn));
}
/**
@@ -335,7 +336,7 @@ static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
if (ct_els->status)
return;
- if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
+ if (gid_pn_rsp->ct_hdr.ct_cmd != cpu_to_be16(FC_FS_ACC))
return;
/* looks like a valid d_id */
@@ -352,8 +353,8 @@ static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
ct_hdr->ct_rev = FC_CT_REV;
ct_hdr->ct_fs_type = FC_FST_DIR;
ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
- ct_hdr->ct_cmd = cmd;
- ct_hdr->ct_mr_size = mr_size / 4;
+ ct_hdr->ct_cmd = cpu_to_be16(cmd);
+ ct_hdr->ct_mr_size = cpu_to_be16(mr_size / 4);
}
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
@@ -376,7 +377,7 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
- gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
+ gid_pn_req->gid_pn.fn_wwpn = cpu_to_be64(port->wwpn);
ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
adapter->pool.gid_pn_req,
@@ -460,26 +461,26 @@ void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
*/
void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
{
- if (plogi->fl_wwpn != port->wwpn) {
+ if (be64_to_cpu(plogi->fl_wwpn) != port->wwpn) {
port->d_id = 0;
dev_warn(&port->adapter->ccw_device->dev,
"A port opened with WWPN 0x%016Lx returned data that "
"identifies it as WWPN 0x%016Lx\n",
(unsigned long long) port->wwpn,
- (unsigned long long) plogi->fl_wwpn);
+ (unsigned long long) be64_to_cpu(plogi->fl_wwpn));
return;
}
- port->wwnn = plogi->fl_wwnn;
- port->maxframe_size = plogi->fl_csp.sp_bb_data;
+ port->wwnn = be64_to_cpu(plogi->fl_wwnn);
+ port->maxframe_size = be16_to_cpu(plogi->fl_csp.sp_bb_data);
- if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
+ if (plogi->fl_cssp[0].cp_class & cpu_to_be16(FC_CPC_VALID))
port->supported_classes |= FC_COS_CLASS1;
- if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
+ if (plogi->fl_cssp[1].cp_class & cpu_to_be16(FC_CPC_VALID))
port->supported_classes |= FC_COS_CLASS2;
- if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
+ if (plogi->fl_cssp[2].cp_class & cpu_to_be16(FC_CPC_VALID))
port->supported_classes |= FC_COS_CLASS3;
- if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
+ if (plogi->fl_cssp[3].cp_class & cpu_to_be16(FC_CPC_VALID))
port->supported_classes |= FC_COS_CLASS4;
}
@@ -497,9 +498,9 @@ static void zfcp_fc_adisc_handler(void *data)
}
if (!port->wwnn)
- port->wwnn = adisc_resp->adisc_wwnn;
+ port->wwnn = be64_to_cpu(adisc_resp->adisc_wwnn);
- if ((port->wwpn != adisc_resp->adisc_wwpn) ||
+ if ((port->wwpn != be64_to_cpu(adisc_resp->adisc_wwpn)) ||
!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_2");
@@ -538,8 +539,8 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
- fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
- fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
+ fc_req->u.adisc.req.adisc_wwpn = cpu_to_be64(fc_host_port_name(shost));
+ fc_req->u.adisc.req.adisc_wwnn = cpu_to_be64(fc_host_node_name(shost));
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
@@ -666,8 +667,8 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
if (ct_els->status)
return -EIO;
- if (hdr->ct_cmd != FC_FS_ACC) {
- if (hdr->ct_reason == FC_BA_RJT_UNABLE)
+ if (hdr->ct_cmd != cpu_to_be16(FC_FS_ACC)) {
+ if (hdr->ct_reason == FC_FS_RJT_UNABL)
return -EAGAIN; /* might be a temporary condition */
return -EIO;
}
@@ -693,10 +694,11 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
if (d_id >= FC_FID_WELL_KNOWN_BASE)
continue;
/* skip the adapter's port and known remote ports */
- if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
+ if (be64_to_cpu(acc->fp_wwpn) ==
+ fc_host_port_name(adapter->scsi_host))
continue;
- port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
+ port = zfcp_port_enqueue(adapter, be64_to_cpu(acc->fp_wwpn),
ZFCP_STATUS_COMMON_NOESC, d_id);
if (!IS_ERR(port))
zfcp_erp_port_reopen(port, 0, "fcegpf1");
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index df2b541c8287..41f22d3dc6d1 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -4,7 +4,7 @@
* Fibre Channel related definitions and inline functions for the zfcp
* device driver
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2017
*/
#ifndef ZFCP_FC_H
@@ -212,6 +212,8 @@ static inline
void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
u8 tm_flags)
{
+ u32 datalen;
+
int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
if (unlikely(tm_flags)) {
@@ -228,10 +230,13 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
- fcp->fc_dl = scsi_bufflen(scsi);
+ datalen = scsi_bufflen(scsi);
+ fcp->fc_dl = cpu_to_be32(datalen);
- if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1)
- fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
+ if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1) {
+ datalen += datalen / scsi->device->sector_size * 8;
+ fcp->fc_dl = cpu_to_be32(datalen);
+ }
}
/**
@@ -266,19 +271,23 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
sense = (char *) &fcp_rsp[1];
if (rsp_flags & FCP_RSP_LEN_VAL)
- sense += fcp_rsp->ext.fr_rsp_len;
- sense_len = min(fcp_rsp->ext.fr_sns_len,
- (u32) SCSI_SENSE_BUFFERSIZE);
+ sense += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
+ sense_len = min_t(u32, be32_to_cpu(fcp_rsp->ext.fr_sns_len),
+ SCSI_SENSE_BUFFERSIZE);
memcpy(scsi->sense_buffer, sense, sense_len);
}
if (unlikely(rsp_flags & FCP_RESID_UNDER)) {
- resid = fcp_rsp->ext.fr_resid;
+ resid = be32_to_cpu(fcp_rsp->ext.fr_resid);
scsi_set_resid(scsi, resid);
if (scsi_bufflen(scsi) - resid < scsi->underflow &&
!(rsp_flags & FCP_SNS_LEN_VAL) &&
fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
set_host_byte(scsi, DID_ERROR);
+ } else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
+ /* FCP_DL was not sufficient for SCSI data length */
+ if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ set_host_byte(scsi, DID_ERROR);
}
}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 27ff38f839fc..69d1dc3ec79d 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -197,8 +197,6 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
switch (sr_buf->status_subtype) {
case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
- zfcp_fsf_link_down_info_eval(req, ldi);
- break;
case FSF_STATUS_READ_SUB_FDISC_FAILED:
zfcp_fsf_link_down_info_eval(req, ldi);
break;
@@ -476,8 +474,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- fc_host_port_name(shost) = nsp->fl_wwpn;
- fc_host_node_name(shost) = nsp->fl_wwnn;
+ fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
+ fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
@@ -503,8 +501,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
adapter->peer_d_id = ntoh24(bottom->peer_d_id);
- adapter->peer_wwpn = plogi->fl_wwpn;
- adapter->peer_wwnn = plogi->fl_wwnn;
+ adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
+ adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
break;
case FSF_TOPO_FABRIC:
@@ -928,8 +926,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsscth2", req);
ct->status = 0;
+ zfcp_dbf_san_res("fsscth2", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -991,8 +989,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
qtcb->bottom.support.resp_buf_length =
zfcp_qdio_real_bytes(sg_resp);
- zfcp_qdio_set_data_div(qdio, &req->qdio_req,
- zfcp_qdio_sbale_count(sg_req));
+ zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
zfcp_qdio_set_scount(qdio, &req->qdio_req);
return 0;
@@ -1109,8 +1106,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsselh1", req);
send_els->status = 0;
+ zfcp_dbf_san_res("fsselh1", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -1394,6 +1391,8 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+ /* no zfcp_fc_test_link() with failed open port */
+ /* fall through */
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
case FSF_SQ_NO_RETRY_POSSIBLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -2142,7 +2141,8 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
zfcp_scsi_dif_sense_error(scpnt, 0x3);
goto skip_fsfstatus;
}
- fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+ BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE);
+ fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
skip_fsfstatus:
@@ -2255,10 +2255,12 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
goto failed_scsi_cmnd;
- fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+ BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
+ fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
- if (scsi_prot_sg_count(scsi_cmnd)) {
+ if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
+ scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
scsi_prot_sg_count(scsi_cmnd));
retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
@@ -2299,7 +2301,7 @@ static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
zfcp_fsf_fcp_handler_common(req);
- fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+ fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
@@ -2348,7 +2350,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
- fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+ fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
@@ -2392,7 +2394,6 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
req_id, dev_name(&adapter->ccw_device->dev));
}
- fsf_req->qdio_req.sbal_response = sbal_idx;
zfcp_fsf_req_complete(fsf_req);
if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index ea3c76ac0de1..88feba5bfda4 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#ifndef FSF_H
@@ -312,8 +312,14 @@ struct fsf_qtcb_bottom_io {
u32 data_block_length;
u32 prot_data_length;
u8 res2[4];
- u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
- u8 fcp_rsp[FSF_FCP_RSP_SIZE];
+ union {
+ u8 byte[FSF_FCP_CMND_SIZE];
+ struct fcp_cmnd iu;
+ } fcp_cmnd;
+ union {
+ u8 byte[FSF_FCP_RSP_SIZE];
+ struct fcp_resp_with_ext iu;
+ } fcp_rsp;
u8 res3[64];
} __attribute__ ((packed));
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbf2b54703f7..9e358fc04b78 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -14,7 +14,7 @@
#include "zfcp_ext.h"
#include "zfcp_qdio.h"
-static bool enable_multibuffer = 1;
+static bool enable_multibuffer = true;
module_param_named(datarouter, enable_multibuffer, bool, 0400);
MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 497cd379b0d1..7f647a90c750 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -54,7 +54,6 @@ struct zfcp_qdio {
* @sbal_last: last sbal for this request
* @sbal_limit: last possible sbal for this request
* @sbale_curr: current sbale at creation of this request
- * @sbal_response: sbal used in interrupt
* @qdio_outb_usage: usage of outbound queue
*/
struct zfcp_qdio_req {
@@ -64,7 +63,6 @@ struct zfcp_qdio_req {
u8 sbal_last;
u8 sbal_limit;
u8 sbale_curr;
- u8 sbal_response;
u16 qdio_outb_usage;
};
@@ -225,21 +223,6 @@ void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
}
/**
- * zfcp_qdio_sbale_count - count sbale used
- * @sg: pointer to struct scatterlist
- */
-static inline
-unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg)
-{
- unsigned int count = 0;
-
- for (; sg; sg = sg_next(sg))
- count++;
-
- return count;
-}
-
-/**
* zfcp_qdio_real_bytes - count bytes used
* @sg: pointer to struct scatterlist
*/
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 0678cf714c0e..ec3ddd1d31d5 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -28,7 +28,7 @@ static bool enable_dif;
module_param_named(dif, enable_dif, bool, 0400);
MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
-static bool allow_lun_scan = 1;
+static bool allow_lun_scan = true;
module_param(allow_lun_scan, bool, 0600);
MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
@@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
- if (ret)
+ if (ret) {
+ zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
return ret;
+ }
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
- zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
return SUCCESS;
}
}
- if (!fsf_req)
+ if (!fsf_req) {
+ zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
return FAILED;
+ }
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
- zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
retval = FAILED;
} else {
- zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
}