From cb4ef3c20bd95e21dddc1101133d90ac7f049a53 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 9 May 2017 12:44:21 +0200 Subject: s390/pkey: make pkey_init() static drivers/s390/crypto/pkey_api.c:1197:12: warning: symbol 'pkey_init' was not declared. Should it be static? Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- drivers/s390/crypto/pkey_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/s390') diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index ea86da8c75f9..8f466facf15e 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -1194,7 +1194,7 @@ static struct miscdevice pkey_dev = { /* * Module init */ -int __init pkey_init(void) +static int __init pkey_init(void) { cpacf_mask_t pckmo_functions; -- cgit v1.2.3-59-g8ed1b From 94d26bfcf31244d24fddbe7ba5b0dd17f3c32b11 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Thu, 1 Dec 2016 12:51:32 +0100 Subject: s390/scm: remove cluster option Remove CONFIG_SCM_BLOCK_CLUSTER_WRITE and related code. This quirk is no longer needed on current hardware. Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- drivers/s390/block/Kconfig | 7 - drivers/s390/block/Makefile | 3 - drivers/s390/block/scm_blk.c | 45 +------ drivers/s390/block/scm_blk.h | 54 -------- drivers/s390/block/scm_blk_cluster.c | 255 ----------------------------------- 5 files changed, 3 insertions(+), 361 deletions(-) delete mode 100644 drivers/s390/block/scm_blk_cluster.c (limited to 'drivers/s390') diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 0acb8c2f9475..31f014b57bfc 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -82,10 +82,3 @@ config SCM_BLOCK To compile this driver as a module, choose M here: the module will be called scm_block. - -config SCM_BLOCK_CLUSTER_WRITE - def_bool y - prompt "SCM force cluster writes" - depends on SCM_BLOCK - help - Force writes to Storage Class Memory (SCM) to be in done in clusters. diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile index c2f4e673e031..b64e2b32c753 100644 --- a/drivers/s390/block/Makefile +++ b/drivers/s390/block/Makefile @@ -19,7 +19,4 @@ obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o obj-$(CONFIG_DCSSBLK) += dcssblk.o scm_block-objs := scm_drv.o scm_blk.o -ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE -scm_block-objs += scm_blk_cluster.o -endif obj-$(CONFIG_SCM_BLOCK) += scm_block.o diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 152de6817875..97fd0fcfa3e6 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -42,7 +42,6 @@ static void __scm_free_rq(struct scm_request *scmrq) struct aob_rq_header *aobrq = to_aobrq(scmrq); free_page((unsigned long) scmrq->aob); - __scm_free_rq_cluster(scmrq); kfree(scmrq->request); kfree(aobrq); } @@ -82,9 +81,6 @@ static int __scm_alloc_rq(void) if (!scmrq->request) goto free; - if (__scm_alloc_rq_cluster(scmrq)) - goto free; - INIT_LIST_HEAD(&scmrq->list); spin_lock_irq(&list_lock); list_add(&scmrq->list, &inactive_requests); @@ -234,7 +230,6 @@ static inline void scm_request_init(struct scm_blk_dev *bdev, scmrq->error = 0; /* We don't use all msbs - place aidaws at the end of the aob page. */ scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; - scm_request_cluster_init(scmrq); } static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) @@ -246,12 +241,11 @@ static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); } -void scm_request_requeue(struct scm_request *scmrq) +static void scm_request_requeue(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; int i; - scm_release_cluster(scmrq); for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) blk_requeue_request(bdev->rq, scmrq->request[i]); @@ -260,12 +254,11 @@ void scm_request_requeue(struct scm_request *scmrq) scm_ensure_queue_restart(bdev); } -void scm_request_finish(struct scm_request *scmrq) +static void scm_request_finish(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; int i; - scm_release_cluster(scmrq); for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) blk_end_request_all(scmrq->request[i], scmrq->error); @@ -313,31 +306,6 @@ static void scm_blk_request(struct request_queue *rq) } scm_request_set(scmrq, req); - if (!scm_reserve_cluster(scmrq)) { - SCM_LOG(5, "cluster busy"); - scm_request_set(scmrq, NULL); - if (scmrq->aob->request.msb_count) - goto out; - - scm_request_done(scmrq); - return; - } - - if (scm_need_cluster_request(scmrq)) { - if (scmrq->aob->request.msb_count) { - /* Start cluster requests separately. */ - scm_request_set(scmrq, NULL); - if (scm_request_start(scmrq)) - return; - } else { - atomic_inc(&bdev->queued_reqs); - blk_start_request(req); - scm_initiate_cluster_request(scmrq); - } - scmrq = NULL; - continue; - } - if (scm_request_prepare(scmrq)) { SCM_LOG(5, "aidaw alloc failed"); scm_request_set(scmrq, NULL); @@ -444,12 +412,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) continue; } - if (scm_test_cluster_request(scmrq)) { - scm_cluster_request_irq(scmrq); - spin_lock_irqsave(&bdev->lock, flags); - continue; - } - scm_request_finish(scmrq); spin_lock_irqsave(&bdev->lock, flags); } @@ -498,7 +460,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) blk_queue_max_segments(rq, nr_max_blk); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); - scm_blk_dev_cluster_setup(bdev); bdev->gendisk = alloc_disk(SCM_NR_PARTS); if (!bdev->gendisk) @@ -558,7 +519,7 @@ static bool __init scm_blk_params_valid(void) if (!nr_requests_per_io || nr_requests_per_io > 64) return false; - return scm_cluster_size_valid(); + return true; } static int __init scm_blk_init(void) diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index 09218cdc5129..f32188dd7909 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -23,9 +23,6 @@ struct scm_blk_dev { atomic_t queued_reqs; enum {SCM_OPER, SCM_WR_PROHIBIT} state; struct list_head finished_requests; -#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE - struct list_head cluster_list; -#endif }; struct scm_request { @@ -36,13 +33,6 @@ struct scm_request { struct list_head list; u8 retries; int error; -#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE - struct { - enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state; - struct list_head list; - void **buf; - } cluster; -#endif }; #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data) @@ -52,55 +42,11 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *); void scm_blk_set_available(struct scm_blk_dev *); void scm_blk_irq(struct scm_device *, void *, int); -void scm_request_finish(struct scm_request *); -void scm_request_requeue(struct scm_request *); - struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes); int scm_drv_init(void); void scm_drv_cleanup(void); -#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE -void __scm_free_rq_cluster(struct scm_request *); -int __scm_alloc_rq_cluster(struct scm_request *); -void scm_request_cluster_init(struct scm_request *); -bool scm_reserve_cluster(struct scm_request *); -void scm_release_cluster(struct scm_request *); -void scm_blk_dev_cluster_setup(struct scm_blk_dev *); -bool scm_need_cluster_request(struct scm_request *); -void scm_initiate_cluster_request(struct scm_request *); -void scm_cluster_request_irq(struct scm_request *); -bool scm_test_cluster_request(struct scm_request *); -bool scm_cluster_size_valid(void); -#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ -static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {} -static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq) -{ - return 0; -} -static inline void scm_request_cluster_init(struct scm_request *scmrq) {} -static inline bool scm_reserve_cluster(struct scm_request *scmrq) -{ - return true; -} -static inline void scm_release_cluster(struct scm_request *scmrq) {} -static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {} -static inline bool scm_need_cluster_request(struct scm_request *scmrq) -{ - return false; -} -static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {} -static inline void scm_cluster_request_irq(struct scm_request *scmrq) {} -static inline bool scm_test_cluster_request(struct scm_request *scmrq) -{ - return false; -} -static inline bool scm_cluster_size_valid(void) -{ - return true; -} -#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ - extern debug_info_t *scm_debug; #define SCM_LOG(imp, txt) do { \ diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c deleted file mode 100644 index 7497ddde2dd6..000000000000 --- a/drivers/s390/block/scm_blk_cluster.c +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Block driver for s390 storage class memory. - * - * Copyright IBM Corp. 2012 - * Author(s): Sebastian Ott - */ - -#include -#include -#include -#include -#include -#include -#include -#include "scm_blk.h" - -static unsigned int write_cluster_size = 64; -module_param(write_cluster_size, uint, S_IRUGO); -MODULE_PARM_DESC(write_cluster_size, - "Number of pages used for contiguous writes."); - -#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE) - -void __scm_free_rq_cluster(struct scm_request *scmrq) -{ - int i; - - if (!scmrq->cluster.buf) - return; - - for (i = 0; i < 2 * write_cluster_size; i++) - free_page((unsigned long) scmrq->cluster.buf[i]); - - kfree(scmrq->cluster.buf); -} - -int __scm_alloc_rq_cluster(struct scm_request *scmrq) -{ - int i; - - scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size, - GFP_KERNEL); - if (!scmrq->cluster.buf) - return -ENOMEM; - - for (i = 0; i < 2 * write_cluster_size; i++) { - scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA); - if (!scmrq->cluster.buf[i]) - return -ENOMEM; - } - INIT_LIST_HEAD(&scmrq->cluster.list); - return 0; -} - -void scm_request_cluster_init(struct scm_request *scmrq) -{ - scmrq->cluster.state = CLUSTER_NONE; -} - -static bool clusters_intersect(struct request *A, struct request *B) -{ - unsigned long firstA, lastA, firstB, lastB; - - firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE; - lastA = (((u64) blk_rq_pos(A) << 9) + - blk_rq_bytes(A) - 1) / CLUSTER_SIZE; - - firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE; - lastB = (((u64) blk_rq_pos(B) << 9) + - blk_rq_bytes(B) - 1) / CLUSTER_SIZE; - - return (firstB <= lastA && firstA <= lastB); -} - -bool scm_reserve_cluster(struct scm_request *scmrq) -{ - struct request *req = scmrq->request[scmrq->aob->request.msb_count]; - struct scm_blk_dev *bdev = scmrq->bdev; - struct scm_request *iter; - int pos, add = 1; - - if (write_cluster_size == 0) - return true; - - spin_lock(&bdev->lock); - list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { - if (iter == scmrq) { - /* - * We don't have to use clusters_intersect here, since - * cluster requests are always started separately. - */ - add = 0; - continue; - } - for (pos = 0; pos < iter->aob->request.msb_count; pos++) { - if (clusters_intersect(req, iter->request[pos]) && - (rq_data_dir(req) == WRITE || - rq_data_dir(iter->request[pos]) == WRITE)) { - spin_unlock(&bdev->lock); - return false; - } - } - } - if (add) - list_add(&scmrq->cluster.list, &bdev->cluster_list); - spin_unlock(&bdev->lock); - - return true; -} - -void scm_release_cluster(struct scm_request *scmrq) -{ - struct scm_blk_dev *bdev = scmrq->bdev; - unsigned long flags; - - if (write_cluster_size == 0) - return; - - spin_lock_irqsave(&bdev->lock, flags); - list_del(&scmrq->cluster.list); - spin_unlock_irqrestore(&bdev->lock, flags); -} - -void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) -{ - INIT_LIST_HEAD(&bdev->cluster_list); - blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); -} - -static int scm_prepare_cluster_request(struct scm_request *scmrq) -{ - struct scm_blk_dev *bdev = scmrq->bdev; - struct scm_device *scmdev = bdev->gendisk->private_data; - struct request *req = scmrq->request[0]; - struct msb *msb = &scmrq->aob->msb[0]; - struct req_iterator iter; - struct aidaw *aidaw; - struct bio_vec bv; - int i = 0; - u64 addr; - - switch (scmrq->cluster.state) { - case CLUSTER_NONE: - scmrq->cluster.state = CLUSTER_READ; - /* fall through */ - case CLUSTER_READ: - msb->bs = MSB_BS_4K; - msb->oc = MSB_OC_READ; - msb->flags = MSB_FLAG_IDA; - msb->blk_count = write_cluster_size; - - addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); - msb->scm_addr = round_down(addr, CLUSTER_SIZE); - - if (msb->scm_addr != - round_down(addr + (u64) blk_rq_bytes(req) - 1, - CLUSTER_SIZE)) - msb->blk_count = 2 * write_cluster_size; - - aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE); - if (!aidaw) - return -ENOMEM; - - scmrq->aob->request.msb_count = 1; - msb->data_addr = (u64) aidaw; - for (i = 0; i < msb->blk_count; i++) { - aidaw->data_addr = (u64) scmrq->cluster.buf[i]; - aidaw++; - } - - break; - case CLUSTER_WRITE: - aidaw = (void *) msb->data_addr; - msb->oc = MSB_OC_WRITE; - - for (addr = msb->scm_addr; - addr < scmdev->address + ((u64) blk_rq_pos(req) << 9); - addr += PAGE_SIZE) { - aidaw->data_addr = (u64) scmrq->cluster.buf[i]; - aidaw++; - i++; - } - rq_for_each_segment(bv, req, iter) { - aidaw->data_addr = (u64) page_address(bv.bv_page); - aidaw++; - i++; - } - for (; i < msb->blk_count; i++) { - aidaw->data_addr = (u64) scmrq->cluster.buf[i]; - aidaw++; - } - break; - } - return 0; -} - -bool scm_need_cluster_request(struct scm_request *scmrq) -{ - int pos = scmrq->aob->request.msb_count; - - if (rq_data_dir(scmrq->request[pos]) == READ) - return false; - - return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE; -} - -/* Called with queue lock held. */ -void scm_initiate_cluster_request(struct scm_request *scmrq) -{ - if (scm_prepare_cluster_request(scmrq)) - goto requeue; - if (eadm_start_aob(scmrq->aob)) - goto requeue; - return; -requeue: - scm_request_requeue(scmrq); -} - -bool scm_test_cluster_request(struct scm_request *scmrq) -{ - return scmrq->cluster.state != CLUSTER_NONE; -} - -void scm_cluster_request_irq(struct scm_request *scmrq) -{ - struct scm_blk_dev *bdev = scmrq->bdev; - unsigned long flags; - - switch (scmrq->cluster.state) { - case CLUSTER_NONE: - BUG(); - break; - case CLUSTER_READ: - if (scmrq->error) { - scm_request_finish(scmrq); - break; - } - scmrq->cluster.state = CLUSTER_WRITE; - spin_lock_irqsave(&bdev->rq_lock, flags); - scm_initiate_cluster_request(scmrq); - spin_unlock_irqrestore(&bdev->rq_lock, flags); - break; - case CLUSTER_WRITE: - scm_request_finish(scmrq); - break; - } -} - -bool scm_cluster_size_valid(void) -{ - if (write_cluster_size == 1 || write_cluster_size > 128) - return false; - - return !(write_cluster_size & (write_cluster_size - 1)); -} -- cgit v1.2.3-59-g8ed1b From 12d9076265398eb2fec3c5dabe7b6713bca8bac9 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Wed, 25 Jan 2017 16:18:53 +0100 Subject: s390/scm: convert to blk-mq Convert scm_blk to use the blk-mq API. This is just a simple conversion since we still use a single queue. Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- drivers/s390/block/scm_blk.c | 137 +++++++++++++++++++++++-------------------- drivers/s390/block/scm_blk.h | 2 + 2 files changed, 74 insertions(+), 65 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 97fd0fcfa3e6..701e42e852e2 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -110,13 +111,13 @@ static struct scm_request *scm_request_fetch(void) { struct scm_request *scmrq = NULL; - spin_lock(&list_lock); + spin_lock_irq(&list_lock); if (list_empty(&inactive_requests)) goto out; scmrq = list_first_entry(&inactive_requests, struct scm_request, list); list_del(&scmrq->list); out: - spin_unlock(&list_lock); + spin_unlock_irq(&list_lock); return scmrq; } @@ -232,26 +233,17 @@ static inline void scm_request_init(struct scm_blk_dev *bdev, scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; } -static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) -{ - if (atomic_read(&bdev->queued_reqs)) { - /* Queue restart is triggered by the next interrupt. */ - return; - } - blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); -} - static void scm_request_requeue(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; int i; for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) - blk_requeue_request(bdev->rq, scmrq->request[i]); + blk_mq_requeue_request(scmrq->request[i], false); atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); - scm_ensure_queue_restart(bdev); + blk_mq_kick_requeue_list(bdev->rq); } static void scm_request_finish(struct scm_request *scmrq) @@ -259,73 +251,74 @@ static void scm_request_finish(struct scm_request *scmrq) struct scm_blk_dev *bdev = scmrq->bdev; int i; - for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) - blk_end_request_all(scmrq->request[i], scmrq->error); + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { + if (scmrq->error) + blk_mq_end_request(scmrq->request[i], scmrq->error); + else + blk_mq_complete_request(scmrq->request[i]); + } atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); } -static int scm_request_start(struct scm_request *scmrq) +static void scm_request_start(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; - int ret; atomic_inc(&bdev->queued_reqs); - if (!scmrq->aob->request.msb_count) { - scm_request_requeue(scmrq); - return -EINVAL; - } - - ret = eadm_start_aob(scmrq->aob); - if (ret) { + if (eadm_start_aob(scmrq->aob)) { SCM_LOG(5, "no subchannel"); scm_request_requeue(scmrq); } - return ret; } -static void scm_blk_request(struct request_queue *rq) +static int scm_blk_request(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *qd) { - struct scm_device *scmdev = rq->queuedata; + struct scm_device *scmdev = hctx->queue->queuedata; struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); - struct scm_request *scmrq = NULL; - struct request *req; + struct request *req = qd->rq; + struct scm_request *scmrq; - while ((req = blk_peek_request(rq))) { - if (!scm_permit_request(bdev, req)) - goto out; + spin_lock(&bdev->rq_lock); + if (!scm_permit_request(bdev, req)) { + spin_unlock(&bdev->rq_lock); + return BLK_MQ_RQ_QUEUE_BUSY; + } + scmrq = hctx->driver_data; + if (!scmrq) { + scmrq = scm_request_fetch(); if (!scmrq) { - scmrq = scm_request_fetch(); - if (!scmrq) { - SCM_LOG(5, "no request"); - goto out; - } - scm_request_init(bdev, scmrq); + SCM_LOG(5, "no request"); + spin_unlock(&bdev->rq_lock); + return BLK_MQ_RQ_QUEUE_BUSY; } - scm_request_set(scmrq, req); - - if (scm_request_prepare(scmrq)) { - SCM_LOG(5, "aidaw alloc failed"); - scm_request_set(scmrq, NULL); - goto out; - } - blk_start_request(req); + scm_request_init(bdev, scmrq); + hctx->driver_data = scmrq; + } + scm_request_set(scmrq, req); - if (scmrq->aob->request.msb_count < nr_requests_per_io) - continue; + if (scm_request_prepare(scmrq)) { + SCM_LOG(5, "aidaw alloc failed"); + scm_request_set(scmrq, NULL); - if (scm_request_start(scmrq)) - return; + if (scmrq->aob->request.msb_count) + scm_request_start(scmrq); - scmrq = NULL; + hctx->driver_data = NULL; + spin_unlock(&bdev->rq_lock); + return BLK_MQ_RQ_QUEUE_BUSY; } -out: - if (scmrq) + blk_mq_start_request(req); + + if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) { scm_request_start(scmrq); - else - scm_ensure_queue_restart(bdev); + hctx->driver_data = NULL; + } + spin_unlock(&bdev->rq_lock); + return BLK_MQ_RQ_QUEUE_OK; } static void __scmrq_log_error(struct scm_request *scmrq) @@ -387,9 +380,7 @@ restart: return; requeue: - spin_lock_irqsave(&bdev->rq_lock, flags); scm_request_requeue(scmrq); - spin_unlock_irqrestore(&bdev->rq_lock, flags); } static void scm_blk_tasklet(struct scm_blk_dev *bdev) @@ -416,19 +407,21 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) spin_lock_irqsave(&bdev->lock, flags); } spin_unlock_irqrestore(&bdev->lock, flags); - /* Look out for more requests. */ - blk_run_queue(bdev->rq); } static const struct block_device_operations scm_blk_devops = { .owner = THIS_MODULE, }; +static const struct blk_mq_ops scm_mq_ops = { + .queue_rq = scm_blk_request, +}; + int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) { - struct request_queue *rq; - int len, ret = -ENOMEM; unsigned int devindex, nr_max_blk; + struct request_queue *rq; + int len, ret; devindex = atomic_inc_return(&nr_devices) - 1; /* scma..scmz + scmaa..scmzz */ @@ -447,10 +440,20 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) (void (*)(unsigned long)) scm_blk_tasklet, (unsigned long) bdev); - rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); - if (!rq) + bdev->tag_set.ops = &scm_mq_ops; + bdev->tag_set.nr_hw_queues = 1; + bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; + bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + + ret = blk_mq_alloc_tag_set(&bdev->tag_set); + if (ret) goto out; + rq = blk_mq_init_queue(&bdev->tag_set); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + goto out_tag; + } bdev->rq = rq; nr_max_blk = min(scmdev->nr_max_block, (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); @@ -462,9 +465,10 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); bdev->gendisk = alloc_disk(SCM_NR_PARTS); - if (!bdev->gendisk) + if (!bdev->gendisk) { + ret = -ENOMEM; goto out_queue; - + } rq->queuedata = scmdev; bdev->gendisk->private_data = scmdev; bdev->gendisk->fops = &scm_blk_devops; @@ -489,6 +493,8 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) out_queue: blk_cleanup_queue(rq); +out_tag: + blk_mq_free_tag_set(&bdev->tag_set); out: atomic_dec(&nr_devices); return ret; @@ -499,6 +505,7 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) tasklet_kill(&bdev->tasklet); del_gendisk(bdev->gendisk); blk_cleanup_queue(bdev->gendisk->queue); + blk_mq_free_tag_set(&bdev->tag_set); put_disk(bdev->gendisk); } diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index f32188dd7909..160564399ff4 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -17,6 +18,7 @@ struct scm_blk_dev { struct tasklet_struct tasklet; struct request_queue *rq; struct gendisk *gendisk; + struct blk_mq_tag_set tag_set; struct scm_device *scmdev; spinlock_t rq_lock; /* guard the request queue */ spinlock_t lock; /* guard the rest of the blockdev */ -- cgit v1.2.3-59-g8ed1b From c7b3e92331fbb905579e67aeed202a37eade54b2 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Tue, 31 Jan 2017 16:15:25 +0100 Subject: s390/scm: convert tasklet Drop the tasklet that was used to complete requests in favor of block layer helpers that finish the IO on the CPU that initiated it. Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- drivers/s390/block/scm_blk.c | 54 ++++++++++++-------------------------------- drivers/s390/block/scm_blk.h | 1 - 2 files changed, 15 insertions(+), 40 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 701e42e852e2..bba798c699f1 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -338,21 +338,6 @@ static void __scmrq_log_error(struct scm_request *scmrq) scmrq->error); } -void scm_blk_irq(struct scm_device *scmdev, void *data, int error) -{ - struct scm_request *scmrq = data; - struct scm_blk_dev *bdev = scmrq->bdev; - - scmrq->error = error; - if (error) - __scmrq_log_error(scmrq); - - spin_lock(&bdev->lock); - list_add_tail(&scmrq->list, &bdev->finished_requests); - spin_unlock(&bdev->lock); - tasklet_hi_schedule(&bdev->tasklet); -} - static void scm_blk_handle_error(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; @@ -383,30 +368,25 @@ requeue: scm_request_requeue(scmrq); } -static void scm_blk_tasklet(struct scm_blk_dev *bdev) +void scm_blk_irq(struct scm_device *scmdev, void *data, int error) { - struct scm_request *scmrq; - unsigned long flags; - - spin_lock_irqsave(&bdev->lock, flags); - while (!list_empty(&bdev->finished_requests)) { - scmrq = list_first_entry(&bdev->finished_requests, - struct scm_request, list); - list_del(&scmrq->list); - spin_unlock_irqrestore(&bdev->lock, flags); + struct scm_request *scmrq = data; - if (scmrq->error && scmrq->retries-- > 0) { + scmrq->error = error; + if (error) { + __scmrq_log_error(scmrq); + if (scmrq->retries-- > 0) { scm_blk_handle_error(scmrq); - - /* Request restarted or requeued, handle next. */ - spin_lock_irqsave(&bdev->lock, flags); - continue; + return; } - - scm_request_finish(scmrq); - spin_lock_irqsave(&bdev->lock, flags); } - spin_unlock_irqrestore(&bdev->lock, flags); + + scm_request_finish(scmrq); +} + +static void scm_blk_request_done(struct request *req) +{ + blk_mq_end_request(req, 0); } static const struct block_device_operations scm_blk_devops = { @@ -415,6 +395,7 @@ static const struct block_device_operations scm_blk_devops = { static const struct blk_mq_ops scm_mq_ops = { .queue_rq = scm_blk_request, + .complete = scm_blk_request_done, }; int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) @@ -434,11 +415,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) bdev->state = SCM_OPER; spin_lock_init(&bdev->rq_lock); spin_lock_init(&bdev->lock); - INIT_LIST_HEAD(&bdev->finished_requests); atomic_set(&bdev->queued_reqs, 0); - tasklet_init(&bdev->tasklet, - (void (*)(unsigned long)) scm_blk_tasklet, - (unsigned long) bdev); bdev->tag_set.ops = &scm_mq_ops; bdev->tag_set.nr_hw_queues = 1; @@ -502,7 +479,6 @@ out: void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) { - tasklet_kill(&bdev->tasklet); del_gendisk(bdev->gendisk); blk_cleanup_queue(bdev->gendisk->queue); blk_mq_free_tag_set(&bdev->tag_set); diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index 160564399ff4..f7b4d9ba43d1 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -15,7 +15,6 @@ #define SCM_QUEUE_DELAY 5 struct scm_blk_dev { - struct tasklet_struct tasklet; struct request_queue *rq; struct gendisk *gendisk; struct blk_mq_tag_set tag_set; -- cgit v1.2.3-59-g8ed1b From 9861dbd5b4a422ae03a8caa2fa6d2827912aa952 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Fri, 24 Feb 2017 17:50:17 +0100 Subject: s390/scm: use multiple queues Exploit multiple hardware contexts (queues) that can process requests in parallel. Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- drivers/s390/block/scm_blk.c | 52 ++++++++++++++++++++++++++++++++++---------- drivers/s390/block/scm_blk.h | 3 +-- 2 files changed, 42 insertions(+), 13 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index bba798c699f1..725f912fab41 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -273,30 +273,36 @@ static void scm_request_start(struct scm_request *scmrq) } } +struct scm_queue { + struct scm_request *scmrq; + spinlock_t lock; +}; + static int scm_blk_request(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *qd) { struct scm_device *scmdev = hctx->queue->queuedata; struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); + struct scm_queue *sq = hctx->driver_data; struct request *req = qd->rq; struct scm_request *scmrq; - spin_lock(&bdev->rq_lock); + spin_lock(&sq->lock); if (!scm_permit_request(bdev, req)) { - spin_unlock(&bdev->rq_lock); + spin_unlock(&sq->lock); return BLK_MQ_RQ_QUEUE_BUSY; } - scmrq = hctx->driver_data; + scmrq = sq->scmrq; if (!scmrq) { scmrq = scm_request_fetch(); if (!scmrq) { SCM_LOG(5, "no request"); - spin_unlock(&bdev->rq_lock); + spin_unlock(&sq->lock); return BLK_MQ_RQ_QUEUE_BUSY; } scm_request_init(bdev, scmrq); - hctx->driver_data = scmrq; + sq->scmrq = scmrq; } scm_request_set(scmrq, req); @@ -307,20 +313,43 @@ static int scm_blk_request(struct blk_mq_hw_ctx *hctx, if (scmrq->aob->request.msb_count) scm_request_start(scmrq); - hctx->driver_data = NULL; - spin_unlock(&bdev->rq_lock); + sq->scmrq = NULL; + spin_unlock(&sq->lock); return BLK_MQ_RQ_QUEUE_BUSY; } blk_mq_start_request(req); if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) { scm_request_start(scmrq); - hctx->driver_data = NULL; + sq->scmrq = NULL; } - spin_unlock(&bdev->rq_lock); + spin_unlock(&sq->lock); return BLK_MQ_RQ_QUEUE_OK; } +static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int idx) +{ + struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL); + + if (!qd) + return -ENOMEM; + + spin_lock_init(&qd->lock); + hctx->driver_data = qd; + + return 0; +} + +static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) +{ + struct scm_queue *qd = hctx->driver_data; + + WARN_ON(qd->scmrq); + kfree(hctx->driver_data); + hctx->driver_data = NULL; +} + static void __scmrq_log_error(struct scm_request *scmrq) { struct aob *aob = scmrq->aob; @@ -396,6 +425,8 @@ static const struct block_device_operations scm_blk_devops = { static const struct blk_mq_ops scm_mq_ops = { .queue_rq = scm_blk_request, .complete = scm_blk_request_done, + .init_hctx = scm_blk_init_hctx, + .exit_hctx = scm_blk_exit_hctx, }; int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) @@ -413,12 +444,11 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) bdev->scmdev = scmdev; bdev->state = SCM_OPER; - spin_lock_init(&bdev->rq_lock); spin_lock_init(&bdev->lock); atomic_set(&bdev->queued_reqs, 0); bdev->tag_set.ops = &scm_mq_ops; - bdev->tag_set.nr_hw_queues = 1; + bdev->tag_set.nr_hw_queues = nr_requests; bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index f7b4d9ba43d1..242d17a91920 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -19,8 +19,7 @@ struct scm_blk_dev { struct gendisk *gendisk; struct blk_mq_tag_set tag_set; struct scm_device *scmdev; - spinlock_t rq_lock; /* guard the request queue */ - spinlock_t lock; /* guard the rest of the blockdev */ + spinlock_t lock; atomic_t queued_reqs; enum {SCM_OPER, SCM_WR_PROHIBIT} state; struct list_head finished_requests; -- cgit v1.2.3-59-g8ed1b From 3050c20821d7e9f15566d105da9a0913de61e85c Mon Sep 17 00:00:00 2001 From: Jan Höppner Date: Tue, 9 May 2017 14:07:37 +0200 Subject: s390/dasd: Remove variable sized array MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Dynamic stack allocations are considered bad. Get rid of this one occurrence and use kstrdup() instead. Also, set the return codes so that we have only one exit where we can call kfree(). Signed-off-by: Jan Höppner Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dasd_devmap.c | 47 +++++++++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 17 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 1164b51d09f3..05e5762d045e 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -315,45 +315,58 @@ static int __init dasd_parse_range(const char *range) char *features_str = NULL; char *from_str = NULL; char *to_str = NULL; - size_t len = strlen(range) + 1; - char tmp[len]; + int rc = 0; + char *tmp; - strlcpy(tmp, range, len); + tmp = kstrdup(range, GFP_KERNEL); + if (!tmp) + return -ENOMEM; - if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str)) - goto out_err; + if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str)) { + rc = -EINVAL; + goto out; + } - if (dasd_busid(from_str, &from_id0, &from_id1, &from)) - goto out_err; + if (dasd_busid(from_str, &from_id0, &from_id1, &from)) { + rc = -EINVAL; + goto out; + } to = from; to_id0 = from_id0; to_id1 = from_id1; if (to_str) { - if (dasd_busid(to_str, &to_id0, &to_id1, &to)) - goto out_err; + if (dasd_busid(to_str, &to_id0, &to_id1, &to)) { + rc = -EINVAL; + goto out; + } if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) { pr_err("%s is not a valid device range\n", range); - goto out_err; + rc = -EINVAL; + goto out; } } features = dasd_feature_list(features_str); - if (features < 0) - goto out_err; + if (features < 0) { + rc = -EINVAL; + goto out; + } /* each device in dasd= parameter should be set initially online */ features |= DASD_FEATURE_INITIAL_ONLINE; while (from <= to) { sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++); devmap = dasd_add_busid(bus_id, features); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); + if (IS_ERR(devmap)) { + rc = PTR_ERR(devmap); + goto out; + } } - return 0; +out: + kfree(tmp); -out_err: - return -EINVAL; + return rc; } /* -- cgit v1.2.3-59-g8ed1b From 7a003637923e9b127dec84fd55b67f4c2c900684 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 9 May 2017 12:42:26 +0200 Subject: s390/pkey: add missing __user annotations Add missing __user annotations to get rid of a couple of sparse warnings. All callers actually pass kernel pointers instead of user space pointers, however the pointers are being used within KERNEL_DS. So everything is fine. Corresponding sparse warnings: drivers/s390/crypto/pkey_api.c:181:41: warning: incorrect type in assignment (different address spaces) expected char [noderef] *request_control_blk_addr got void * Cc: Harald Freudenberger Cc: Martin Schwidefsky Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- drivers/s390/crypto/pkey_api.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 8f466facf15e..f61fa47135a6 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -178,9 +178,9 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb, pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr); pxcrb->request_control_blk_length = preqcblk->cprb_len + preqcblk->req_parml; - pxcrb->request_control_blk_addr = (void *) preqcblk; + pxcrb->request_control_blk_addr = (void __user *) preqcblk; pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl; - pxcrb->reply_control_blk_addr = (void *) prepcblk; + pxcrb->reply_control_blk_addr = (void __user *) prepcblk; } /* -- cgit v1.2.3-59-g8ed1b From 8ff3458865ef43de80c10a32a4797bb941840aee Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 9 May 2017 12:19:16 +0200 Subject: s390/zcrypt: get rid of little/big endian handling The zcrypt code contains a couple of functions which receive a "big_endian" argument. All callers naturally pass "1" for big endian, since s390 is big endian. Therefore get rid of this argument and also get rid of the cpu_to_le()/cpu_to_be() calls. This way we get rid of a couple of sparse warnings: drivers/s390/crypto/zcrypt_cca_key.h:255:34: warning: incorrect type in assignment (different base types) expected unsigned short [unsigned] ulen got restricted __be16 [usertype] Cc: Harald Freudenberger Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- drivers/s390/crypto/zcrypt_cca_key.h | 36 +++++++++-------------------------- drivers/s390/crypto/zcrypt_msgtype6.c | 4 ++-- 2 files changed, 11 insertions(+), 29 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h index ca0cdbe46368..771cbb91c11e 100644 --- a/drivers/s390/crypto/zcrypt_cca_key.h +++ b/drivers/s390/crypto/zcrypt_cca_key.h @@ -133,8 +133,7 @@ struct cca_pvt_ext_CRT_sec { * * Returns the size of the key area or -EFAULT */ -static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex, - void *p, int big_endian) +static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex, void *p) { static struct cca_token_hdr static_pvt_me_hdr = { .token_identifier = 0x1E, @@ -162,13 +161,8 @@ static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex, memset(key, 0, sizeof(*key)); - if (big_endian) { - key->t6_hdr.blen = cpu_to_be16(0x189); - key->t6_hdr.ulen = cpu_to_be16(0x189 - 2); - } else { - key->t6_hdr.blen = cpu_to_le16(0x189); - key->t6_hdr.ulen = cpu_to_le16(0x189 - 2); - } + key->t6_hdr.blen = 0x189; + key->t6_hdr.ulen = 0x189 - 2; key->pvtMeHdr = static_pvt_me_hdr; key->pvtMeSec = static_pvt_me_sec; key->pubMeSec = static_pub_me_sec; @@ -205,8 +199,7 @@ static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex, * * Returns the size of the key area or -EFAULT */ -static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, - void *p, int big_endian) +static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p) { static struct cca_token_hdr static_pub_hdr = { .token_identifier = 0x1E, @@ -251,13 +244,8 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, 2*mex->inputdatalength - i; key->pubHdr.token_length = key->pubSec.section_length + sizeof(key->pubHdr); - if (big_endian) { - key->t6_hdr.ulen = cpu_to_be16(key->pubHdr.token_length + 4); - key->t6_hdr.blen = cpu_to_be16(key->pubHdr.token_length + 6); - } else { - key->t6_hdr.ulen = cpu_to_le16(key->pubHdr.token_length + 4); - key->t6_hdr.blen = cpu_to_le16(key->pubHdr.token_length + 6); - } + key->t6_hdr.ulen = key->pubHdr.token_length + 4; + key->t6_hdr.blen = key->pubHdr.token_length + 6; return sizeof(*key) + 2*mex->inputdatalength - i; } @@ -271,8 +259,7 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, * * Returns the size of the key area or -EFAULT */ -static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, - void *p, int big_endian) +static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p) { static struct cca_public_sec static_cca_pub_sec = { .section_identifier = 4, @@ -298,13 +285,8 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, size = sizeof(*key) + key_len + sizeof(*pub) + 3; /* parameter block.key block */ - if (big_endian) { - key->t6_hdr.blen = cpu_to_be16(size); - key->t6_hdr.ulen = cpu_to_be16(size - 2); - } else { - key->t6_hdr.blen = cpu_to_le16(size); - key->t6_hdr.ulen = cpu_to_le16(size - 2); - } + key->t6_hdr.blen = size; + key->t6_hdr.ulen = size - 2; /* key token header */ key->token.token_identifier = CCA_TKN_HDR_ID_EXT; diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index e5563ffeb839..4fddb4319481 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -291,7 +291,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, return -EFAULT; /* Set up key which is located after the variable length text. */ - size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1); + size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength); if (size < 0) return size; size += sizeof(*msg) + mex->inputdatalength; @@ -353,7 +353,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq, return -EFAULT; /* Set up key which is located after the variable length text. */ - size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1); + size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength); if (size < 0) return size; size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */ -- cgit v1.2.3-59-g8ed1b From a1b19d07ca71d6c60b49771f244fc6536cd15358 Mon Sep 17 00:00:00 2001 From: Harald Freudenberger Date: Mon, 15 May 2017 12:38:15 +0200 Subject: s390/zcrypt: remove unused function zcrypt_type6_mex_key_de() Signed-off-by: Harald Freudenberger Signed-off-by: Martin Schwidefsky --- drivers/s390/crypto/zcrypt_cca_key.h | 85 ------------------------------------ 1 file changed, 85 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h index 771cbb91c11e..12cff6262566 100644 --- a/drivers/s390/crypto/zcrypt_cca_key.h +++ b/drivers/s390/crypto/zcrypt_cca_key.h @@ -48,26 +48,6 @@ struct cca_token_hdr { #define CCA_TKN_HDR_ID_EXT 0x1E -/** - * mapping for the cca private ME section - */ -struct cca_private_ext_ME_sec { - unsigned char section_identifier; - unsigned char version; - unsigned short section_length; - unsigned char private_key_hash[20]; - unsigned char reserved1[4]; - unsigned char key_format; - unsigned char reserved2; - unsigned char key_name_hash[20]; - unsigned char key_use_flags[4]; - unsigned char reserved3[6]; - unsigned char reserved4[24]; - unsigned char confounder[24]; - unsigned char exponent[128]; - unsigned char modulus[128]; -} __attribute__((packed)); - #define CCA_PVT_USAGE_ALL 0x80 /** @@ -123,71 +103,6 @@ struct cca_pvt_ext_CRT_sec { #define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08 #define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40 -/** - * Set up private key fields of a type6 MEX message. - * Note that all numerics in the key token are big-endian, - * while the entries in the key block header are little-endian. - * - * @mex: pointer to user input data - * @p: pointer to memory area for the key - * - * Returns the size of the key area or -EFAULT - */ -static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex, void *p) -{ - static struct cca_token_hdr static_pvt_me_hdr = { - .token_identifier = 0x1E, - .token_length = 0x0183, - }; - static struct cca_private_ext_ME_sec static_pvt_me_sec = { - .section_identifier = 0x02, - .section_length = 0x016C, - .key_use_flags = {0x80,0x00,0x00,0x00}, - }; - static struct cca_public_sec static_pub_me_sec = { - .section_identifier = 0x04, - .section_length = 0x000F, - .exponent_len = 0x0003, - }; - static char pk_exponent[3] = { 0x01, 0x00, 0x01 }; - struct { - struct T6_keyBlock_hdr t6_hdr; - struct cca_token_hdr pvtMeHdr; - struct cca_private_ext_ME_sec pvtMeSec; - struct cca_public_sec pubMeSec; - char exponent[3]; - } __attribute__((packed)) *key = p; - unsigned char *temp; - - memset(key, 0, sizeof(*key)); - - key->t6_hdr.blen = 0x189; - key->t6_hdr.ulen = 0x189 - 2; - key->pvtMeHdr = static_pvt_me_hdr; - key->pvtMeSec = static_pvt_me_sec; - key->pubMeSec = static_pub_me_sec; - /* - * In a private key, the modulus doesn't appear in the public - * section. So, an arbitrary public exponent of 0x010001 will be - * used. - */ - memcpy(key->exponent, pk_exponent, 3); - - /* key parameter block */ - temp = key->pvtMeSec.exponent + - sizeof(key->pvtMeSec.exponent) - mex->inputdatalength; - if (copy_from_user(temp, mex->b_key, mex->inputdatalength)) - return -EFAULT; - - /* modulus */ - temp = key->pvtMeSec.modulus + - sizeof(key->pvtMeSec.modulus) - mex->inputdatalength; - if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength)) - return -EFAULT; - key->pubMeSec.modulus_bit_len = 8 * mex->inputdatalength; - return sizeof(*key); -} - /** * Set up private key fields of a type6 MEX message. The _pad variant * strips leading zeroes from the b_key. -- cgit v1.2.3-59-g8ed1b From ac994e80f94f440138774830f2edf148d6ece1f3 Mon Sep 17 00:00:00 2001 From: Harald Freudenberger Date: Fri, 12 May 2017 16:35:14 +0200 Subject: s390/zcrypt: Rework ap init in case of out of range domain param. When a out of range domain parameter was given, the init function returned with -EINVAL and the driver was not operational. As the driver is statically build into the kernel and is able to work with multiple domains anyway the init function should continue. Now the user has a chance to write a new default domain value via sysfs attribute file. Also added two new dbf debug messages related to the domain value handling. Signed-off-by: Harald Freudenberger Signed-off-by: Martin Schwidefsky --- drivers/s390/crypto/ap_bus.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index ea099910b4e9..6dee598979e7 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -766,7 +766,7 @@ static ssize_t ap_domain_store(struct bus_type *bus, ap_domain_index = domain; spin_unlock_bh(&ap_domain_lock); - AP_DBF(DBF_DEBUG, "store new default domain=%d\n", domain); + AP_DBF(DBF_DEBUG, "stored new default domain=%d\n", domain); return count; } @@ -952,6 +952,7 @@ static int ap_select_domain(void) } if (best_domain >= 0){ ap_domain_index = best_domain; + AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index); spin_unlock_bh(&ap_domain_lock); return 0; } @@ -988,7 +989,7 @@ static void ap_scan_bus(struct work_struct *unused) ap_qid_t qid; int depth = 0, type = 0; unsigned int functions = 0; - int rc, id, dom, borked, domains; + int rc, id, dom, borked, domains, defdomdevs = 0; AP_DBF(DBF_DEBUG, "ap_scan_bus running\n"); @@ -1052,6 +1053,8 @@ static void ap_scan_bus(struct work_struct *unused) put_device(dev); if (!borked) { domains++; + if (dom == ap_domain_index) + defdomdevs++; continue; } } @@ -1098,6 +1101,8 @@ static void ap_scan_bus(struct work_struct *unused) continue; } domains++; + if (dom == ap_domain_index) + defdomdevs++; } /* end domain loop */ if (ac) { /* remove card dev if there are no queue devices */ @@ -1106,6 +1111,11 @@ static void ap_scan_bus(struct work_struct *unused) put_device(&ac->ap_dev.device); } } /* end device loop */ + + if (defdomdevs < 1) + AP_DBF(DBF_INFO, "no queue device with default domain %d available\n", + ap_domain_index); + out: mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); } @@ -1174,14 +1184,14 @@ int __init ap_module_init(void) ap_init_configuration(); if (ap_configuration) - max_domain_id = ap_max_domain_id ? : (AP_DOMAINS - 1); + max_domain_id = + ap_max_domain_id ? ap_max_domain_id : AP_DOMAINS - 1; else max_domain_id = 15; if (ap_domain_index < -1 || ap_domain_index > max_domain_id) { pr_warn("%d is not a valid cryptographic domain\n", ap_domain_index); - rc = -EINVAL; - goto out_free; + ap_domain_index = -1; } /* In resume callback we need to know if the user had set the domain. * If so, we can not just reset it. @@ -1254,7 +1264,6 @@ out: unregister_reset_call(&ap_reset_call); if (ap_using_interrupts()) unregister_adapter_interrupt(&ap_airq); -out_free: kfree(ap_configuration); return rc; } -- cgit v1.2.3-59-g8ed1b From dbed23dba0fb153c4566af59da7dfa06b780b0af Mon Sep 17 00:00:00 2001 From: Harald Freudenberger Date: Fri, 12 May 2017 18:53:41 +0200 Subject: s390/zcrypt: Add some debug messages on failure. Added some dbf debug messages on failure of the most important ioctl calls. These messages are only enabled with dbf level 6 (debug) and so do not affect the normal operating mode which uses level 3 (errors and higher). Signed-off-by: Harald Freudenberger Signed-off-by: Martin Schwidefsky --- drivers/s390/crypto/zcrypt_api.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 93015f85d4a6..b1c27e28859b 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -821,8 +821,10 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, do { rc = zcrypt_rsa_modexpo(&mex); } while (rc == -EAGAIN); - if (rc) + if (rc) { + ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d", rc); return rc; + } return put_user(mex.outputdatalength, &umex->outputdatalength); } case ICARSACRT: { @@ -838,8 +840,10 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, do { rc = zcrypt_rsa_crt(&crt); } while (rc == -EAGAIN); - if (rc) + if (rc) { + ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d", rc); return rc; + } return put_user(crt.outputdatalength, &ucrt->outputdatalength); } case ZSECSENDCPRB: { @@ -855,6 +859,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, do { rc = zcrypt_send_cprb(&xcRB); } while (rc == -EAGAIN); + if (rc) + ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d", rc); if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) return -EFAULT; return rc; @@ -872,6 +878,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, do { rc = zcrypt_send_ep11_cprb(&xcrb); } while (rc == -EAGAIN); + if (rc) + ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d", rc); if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) return -EFAULT; return rc; -- cgit v1.2.3-59-g8ed1b From b487a914f853545842a0899329b6b72fe56c4081 Mon Sep 17 00:00:00 2001 From: Jan Höppner Date: Tue, 23 May 2017 16:17:30 +0200 Subject: s390/dasd: Display read-only attribute correctly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have two flags, DASD_FLAG_DEVICE_RO and DASD_FEATURE_READONLY, that tell us whether a device is read-only. DASD_FLAG_DEVICE_RO is set when a device is attached as read-only to z/VM and DASD_FEATURE_READONLY is set when either the corresponding kernel parameter is configured, or the read-only state is changed via sysfs. This is valuable information in any case. However, only the feature flag is being checked at the moment when we display the current state. Fix this by checking both flags. Reviewed-by: Stefan Haberland Signed-off-by: Jan Höppner Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dasd_devmap.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 05e5762d045e..0ce84f0a4d7f 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -748,13 +748,22 @@ static ssize_t dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dasd_devmap *devmap; - int ro_flag; + struct dasd_device *device; + int ro_flag = 0; devmap = dasd_find_busid(dev_name(dev)); - if (!IS_ERR(devmap)) - ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0; - else - ro_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_READONLY) != 0; + if (IS_ERR(devmap)) + goto out; + + ro_flag = !!(devmap->features & DASD_FEATURE_READONLY); + + spin_lock(&dasd_devmap_lock); + device = devmap->device; + if (device) + ro_flag |= test_bit(DASD_FLAG_DEVICE_RO, &device->flags); + spin_unlock(&dasd_devmap_lock); + +out: return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n"); } -- cgit v1.2.3-59-g8ed1b From 2757fe1d8ebd0e6ab1dbf1105978b8c8369dcc49 Mon Sep 17 00:00:00 2001 From: Stefan Haberland Date: Tue, 16 May 2017 10:30:13 +0200 Subject: s390/dasd: fix unusable device after safe offline processing The safe offline processing needs, as well as the normal offline processing, to be locked against multiple parallel executions. But it should be able to be overtaken by a normal offline processing to make sure that the device does not wait forever for outstanding I/O if the user wants to. Unfortunately the parallel processing of safe offline and normal offline might lead to a race situation where both threads report successful execution to the CIO layer which in turn tries to deregister the kobject of the device twice. This leads to a refcount_t: underflow; use-after-free. error and the device is not able to be set online again afterwards without a reboot. Correct the locking of the safe offline processing by doing the following: - Use the cdev lock to secure all set and test operations to the device flags. - Two safe offline processes are locked against each other using the DASD_FLAG_SAFE_OFFLINE and DASD_FLAG_SAFE_OFFLINE_RUNNING device flags. The differentiation between offline triggered and offline running is needed since the normal offline attribute is owned by CIO and we have to pass over control in between. - The dasd_generic_set_offline process handles the offline processing. It is locked against parallel execution using the DASD_FLAG_OFFLINE. - Only a running safe offline should be able to be overtaken by a single normal offline. This is ensured by clearing the DASD_FLAG_SAFE_OFFLINE_RUNNING flag when a normal offline overtakes. So this can only happen ones. - The safe offline just aborts in this case doing nothing and the normal offline processing finishes as usual. Signed-off-by: Stefan Haberland Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dasd.c | 68 ++++++++++++++++++++++------------------ drivers/s390/block/dasd_devmap.c | 7 ++++- 2 files changed, 44 insertions(+), 31 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 6fb3fd5efc11..b0c65dcb6865 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3562,57 +3562,69 @@ int dasd_generic_set_offline(struct ccw_device *cdev) else pr_warn("%s: The DASD cannot be set offline while it is in use\n", dev_name(&cdev->dev)); - clear_bit(DASD_FLAG_OFFLINE, &device->flags); - goto out_busy; + rc = -EBUSY; + goto out_err; } } - if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { - /* - * safe offline already running - * could only be called by normal offline so safe_offline flag - * needs to be removed to run normal offline and kill all I/O - */ - if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) - /* Already doing normal offline processing */ - goto out_busy; - else - clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); - } else { - if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) - /* Already doing offline processing */ - goto out_busy; + /* + * Test if the offline processing is already running and exit if so. + * If a safe offline is being processed this could only be a normal + * offline that should be able to overtake the safe offline and + * cancel any I/O we do not want to wait for any longer + */ + if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { + if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, + &device->flags); + } else { + rc = -EBUSY; + goto out_err; + } } - set_bit(DASD_FLAG_OFFLINE, &device->flags); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); /* - * if safe_offline called set safe_offline_running flag and + * if safe_offline is called set safe_offline_running flag and * clear safe_offline so that a call to normal offline * can overrun safe_offline processing */ if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* need to unlock here to wait for outstanding I/O */ + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); /* * If we want to set the device safe offline all IO operations * should be finished before continuing the offline process * so sync bdev first and then wait for our queues to become * empty */ - /* sync blockdev and partitions */ if (device->block) { rc = fsync_bdev(device->block->bdev); if (rc != 0) goto interrupted; } - /* schedule device tasklet and wait for completion */ dasd_schedule_device_bh(device); rc = wait_event_interruptible(shutdown_waitq, _wait_for_empty_queues(device)); if (rc != 0) goto interrupted; + + /* + * check if a normal offline process overtook the offline + * processing in this case simply do nothing beside returning + * that we got interrupted + * otherwise mark safe offline as not running any longer and + * continue with normal offline + */ + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + rc = -ERESTARTSYS; + goto out_err; + } + clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); } + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); dasd_set_target_state(device, DASD_STATE_NEW); /* dasd_delete_device destroys the device reference. */ @@ -3624,22 +3636,18 @@ int dasd_generic_set_offline(struct ccw_device *cdev) */ if (block) dasd_free_block(block); + return 0; interrupted: /* interrupted by signal */ - clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); clear_bit(DASD_FLAG_OFFLINE, &device->flags); - dasd_put_device(device); - - return rc; - -out_busy: +out_err: dasd_put_device(device); spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - - return -EBUSY; + return rc; } EXPORT_SYMBOL_GPL(dasd_generic_set_offline); diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 0ce84f0a4d7f..e943d9c48926 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -950,11 +950,14 @@ dasd_safe_offline_store(struct device *dev, struct device_attribute *attr, { struct ccw_device *cdev = to_ccwdev(dev); struct dasd_device *device; + unsigned long flags; int rc; - device = dasd_device_from_cdev(cdev); + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + device = dasd_device_from_cdev_locked(cdev); if (IS_ERR(device)) { rc = PTR_ERR(device); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); goto out; } @@ -962,12 +965,14 @@ dasd_safe_offline_store(struct device *dev, struct device_attribute *attr, test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { /* Already doing offline processing */ dasd_put_device(device); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); rc = -EBUSY; goto out; } set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); dasd_put_device(device); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); rc = ccw_device_set_offline(cdev); -- cgit v1.2.3-59-g8ed1b From e8ac01555d9e464249e8bb122337d6d6e5589ccc Mon Sep 17 00:00:00 2001 From: Stefan Haberland Date: Thu, 18 May 2017 13:24:45 +0200 Subject: s390/dasd: fix hanging safe offline The safe offline processing may hang forever because it waits for I/O which can not be started because of the offline flag that prevents new I/O from being started. Allow I/O to be started during safe offline processing because in this special case we take care that the queues are empty before throwing away the device. Signed-off-by: Stefan Haberland Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dasd.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index b0c65dcb6865..c72ac57940f4 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1965,8 +1965,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device, { int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); - if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { - /* dasd is being set offline. */ + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && + !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* + * dasd is being set offline + * but it is no safe offline where we have to allow I/O + */ return 1; } if (device->stopped) { -- cgit v1.2.3-59-g8ed1b From 36f6237ebf4dfdf62813540e962d53584ba8b271 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Mon, 15 May 2017 15:49:07 +0200 Subject: s390/cio: introduce io_subchannel_type The sysfs attributes implemented by the vfio_ccw driver are also implemented by the io_subchannel driver. Move these into a device_type which is set by the css bus. Signed-off-by: Sebastian Ott Reviewed-by: Dong Jia Shi Reviewed-by: Cornelia Huck Signed-off-by: Martin Schwidefsky --- drivers/s390/cio/css.c | 49 ++++++++++++++++++++++++++++++++++ drivers/s390/cio/device.c | 42 ----------------------------- drivers/s390/cio/vfio_ccw_drv.c | 58 +---------------------------------------- 3 files changed, 50 insertions(+), 99 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index e2aa944eb566..d3e504c3c362 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -296,6 +296,51 @@ static const struct attribute_group *default_subch_attr_groups[] = { NULL, }; +static ssize_t chpids_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + struct chsc_ssd_info *ssd = &sch->ssd_info; + ssize_t ret = 0; + int mask; + int chp; + + for (chp = 0; chp < 8; chp++) { + mask = 0x80 >> chp; + if (ssd->path_mask & mask) + ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); + else + ret += sprintf(buf + ret, "00 "); + } + ret += sprintf(buf + ret, "\n"); + return ret; +} +static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); + +static ssize_t pimpampom_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + struct pmcw *pmcw = &sch->schib.pmcw; + + return sprintf(buf, "%02x %02x %02x\n", + pmcw->pim, pmcw->pam, pmcw->pom); +} +static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); + +static struct attribute *io_subchannel_type_attrs[] = { + &dev_attr_chpids.attr, + &dev_attr_pimpampom.attr, + NULL, +}; +ATTRIBUTE_GROUPS(io_subchannel_type); + +static const struct device_type io_subchannel_type = { + .groups = io_subchannel_type_groups, +}; + int css_register_subchannel(struct subchannel *sch) { int ret; @@ -304,6 +349,10 @@ int css_register_subchannel(struct subchannel *sch) sch->dev.parent = &channel_subsystems[0]->device; sch->dev.bus = &css_bus_type; sch->dev.groups = default_subch_attr_groups; + + if (sch->st == SUBCHANNEL_TYPE_IO) + sch->dev.type = &io_subchannel_type; + /* * We don't want to generate uevents for I/O subchannels that don't * have a working ccw device behind them since they will be diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index b8006ea9099c..7be01a58b44f 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -208,44 +208,6 @@ int __init io_subchannel_init(void) /************************ device handling **************************/ -/* - * A ccw_device has some interfaces in sysfs in addition to the - * standard ones. - * The following entries are designed to export the information which - * resided in 2.4 in /proc/subchannels. Subchannel and device number - * are obvious, so they don't have an entry :) - * TODO: Split chpids and pimpampom up? Where is "in use" in the tree? - */ -static ssize_t -chpids_show (struct device * dev, struct device_attribute *attr, char * buf) -{ - struct subchannel *sch = to_subchannel(dev); - struct chsc_ssd_info *ssd = &sch->ssd_info; - ssize_t ret = 0; - int chp; - int mask; - - for (chp = 0; chp < 8; chp++) { - mask = 0x80 >> chp; - if (ssd->path_mask & mask) - ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); - else - ret += sprintf(buf + ret, "00 "); - } - ret += sprintf (buf+ret, "\n"); - return min((ssize_t)PAGE_SIZE, ret); -} - -static ssize_t -pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf) -{ - struct subchannel *sch = to_subchannel(dev); - struct pmcw *pmcw = &sch->schib.pmcw; - - return sprintf (buf, "%02x %02x %02x\n", - pmcw->pim, pmcw->pam, pmcw->pom); -} - static ssize_t devtype_show (struct device *dev, struct device_attribute *attr, char *buf) { @@ -636,8 +598,6 @@ static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%02x\n", sch->vpm); } -static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); -static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); @@ -647,8 +607,6 @@ static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); static struct attribute *io_subchannel_attrs[] = { - &dev_attr_chpids.attr, - &dev_attr_pimpampom.attr, &dev_attr_logging.attr, &dev_attr_vpm.attr, NULL, diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index e90dd43d2a55..a25367ebaa89 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -89,54 +89,6 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) private->state = VFIO_CCW_STATE_IDLE; } -/* - * Sysfs interfaces - */ -static ssize_t chpids_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct subchannel *sch = to_subchannel(dev); - struct chsc_ssd_info *ssd = &sch->ssd_info; - ssize_t ret = 0; - int chp; - int mask; - - for (chp = 0; chp < 8; chp++) { - mask = 0x80 >> chp; - if (ssd->path_mask & mask) - ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); - else - ret += sprintf(buf + ret, "00 "); - } - ret += sprintf(buf+ret, "\n"); - return ret; -} - -static ssize_t pimpampom_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct subchannel *sch = to_subchannel(dev); - struct pmcw *pmcw = &sch->schib.pmcw; - - return sprintf(buf, "%02x %02x %02x\n", - pmcw->pim, pmcw->pam, pmcw->pom); -} - -static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); -static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); - -static struct attribute *vfio_subchannel_attrs[] = { - &dev_attr_chpids.attr, - &dev_attr_pimpampom.attr, - NULL, -}; - -static struct attribute_group vfio_subchannel_attr_group = { - .attrs = vfio_subchannel_attrs, -}; - /* * Css driver callbacks */ @@ -174,13 +126,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) if (ret) goto out_free; - ret = sysfs_create_group(&sch->dev.kobj, &vfio_subchannel_attr_group); - if (ret) - goto out_disable; - ret = vfio_ccw_mdev_reg(sch); if (ret) - goto out_rm_group; + goto out_disable; INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); atomic_set(&private->avail, 1); @@ -188,8 +136,6 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) return 0; -out_rm_group: - sysfs_remove_group(&sch->dev.kobj, &vfio_subchannel_attr_group); out_disable: cio_disable_subchannel(sch); out_free: @@ -206,8 +152,6 @@ static int vfio_ccw_sch_remove(struct subchannel *sch) vfio_ccw_mdev_unreg(sch); - sysfs_remove_group(&sch->dev.kobj, &vfio_subchannel_attr_group); - dev_set_drvdata(&sch->dev, NULL); kfree(private); -- cgit v1.2.3-59-g8ed1b From 795c9a5106119f45d2501c4fb01051178904753f Mon Sep 17 00:00:00 2001 From: Jan Höppner Date: Thu, 22 Jun 2017 17:17:15 +0200 Subject: s390/dasd: Fix faulty ENODEV for RO sysfs attribute MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a device is offline it can still be set to read-only via the bus id through sysfs. Only the read-only feature flag for the ccw_device is then set. If the device is online the corresponding block device needs to be set to read-only as well (via set_disk_ro()). The check whether there is a device to do so, however, happens after the feature flag was set. This leads to an unnecessary "no such device" error in the offline case. This bug was introduced by commit 7571cb1c8e3cc ("s390/dasd: Make use of dasd_set_feature() more often"). Fix this by simply returning count if no device is available. Fixes: 7571cb1c8e3cc ("s390/dasd: Make use of dasd_set_feature() more often") Reviewed-by: Stefan Haberland Signed-off-by: Jan Höppner Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dasd_devmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/s390') diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index e943d9c48926..7c7351276d2e 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -786,7 +786,7 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr, device = dasd_device_from_cdev(cdev); if (IS_ERR(device)) - return PTR_ERR(device); + return count; spin_lock_irqsave(get_ccwdev_lock(cdev), flags); val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags); -- cgit v1.2.3-59-g8ed1b