aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nbd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/nbd.c')
-rw-r--r--drivers/block/nbd.c264
1 files changed, 142 insertions, 122 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 50a2020b5b72..0be84a3cb6d7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -41,6 +41,9 @@
#include <linux/nbd.h>
+static DEFINE_IDR(nbd_index_idr);
+static DEFINE_MUTEX(nbd_index_mutex);
+
struct nbd_sock {
struct socket *sock;
struct mutex tx_lock;
@@ -89,8 +92,9 @@ static struct dentry *nbd_dbg_dir;
#define NBD_MAGIC 0x68797548
static unsigned int nbds_max = 16;
-static struct nbd_device *nbd_dev;
static int max_part;
+static struct workqueue_struct *recv_workqueue;
+static int part_shift;
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{
@@ -193,13 +197,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
req->errors++;
- /*
- * If our disconnect packet times out then we're already holding the
- * config_lock and could deadlock here, so just set an error and return,
- * we'll handle shutting everything down later.
- */
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
- return BLK_EH_HANDLED;
mutex_lock(&nbd->config_lock);
sock_shutdown(nbd);
mutex_unlock(&nbd->config_lock);
@@ -271,21 +268,36 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index,
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
- int result, flags;
+ int result;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
u32 type;
u32 tag = blk_mq_unique_tag(req);
- if (req_op(req) == REQ_OP_DISCARD)
+ switch (req_op(req)) {
+ case REQ_OP_DISCARD:
type = NBD_CMD_TRIM;
- else if (req_op(req) == REQ_OP_FLUSH)
+ break;
+ case REQ_OP_FLUSH:
type = NBD_CMD_FLUSH;
- else if (rq_data_dir(req) == WRITE)
+ break;
+ case REQ_OP_WRITE:
type = NBD_CMD_WRITE;
- else
+ break;
+ case REQ_OP_READ:
type = NBD_CMD_READ;
+ break;
+ default:
+ return -EIO;
+ }
+
+ if (rq_data_dir(req) == WRITE &&
+ (nbd->flags & NBD_FLAG_READ_ONLY)) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Write on read-only\n");
+ return -EIO;
+ }
memset(&request, 0, sizeof(request));
request.magic = htonl(NBD_REQUEST_MAGIC);
@@ -310,7 +322,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
if (type != NBD_CMD_WRITE)
return 0;
- flags = 0;
bio = req->bio;
while (bio) {
struct bio *next = bio->bi_next;
@@ -319,9 +330,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
bio_for_each_segment(bvec, bio, iter) {
bool is_last = !next && bio_iter_last(bvec, iter);
+ int flags = is_last ? 0 : MSG_MORE;
- if (is_last)
- flags = MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
cmd, bvec.bv_len);
result = sock_send_bvec(nbd, index, &bvec, flags);
@@ -512,18 +522,6 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
goto error_out;
}
- if (req->cmd_type != REQ_TYPE_FS &&
- req->cmd_type != REQ_TYPE_DRV_PRIV)
- goto error_out;
-
- if (req->cmd_type == REQ_TYPE_FS &&
- rq_data_dir(req) == WRITE &&
- (nbd->flags & NBD_FLAG_READ_ONLY)) {
- dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Write on read-only\n");
- goto error_out;
- }
-
req->errors = 0;
nsock = nbd->socks[index];
@@ -787,7 +785,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
INIT_WORK(&args[i].work, recv_work);
args[i].nbd = nbd;
args[i].index = i;
- queue_work(system_long_wq, &args[i].work);
+ queue_work(recv_workqueue, &args[i].work);
}
wait_event_interruptible(nbd->recv_wq,
atomic_read(&nbd->recv_threads) == 0);
@@ -998,6 +996,103 @@ static struct blk_mq_ops nbd_mq_ops = {
.timeout = nbd_xmit_timeout,
};
+static void nbd_dev_remove(struct nbd_device *nbd)
+{
+ struct gendisk *disk = nbd->disk;
+ nbd->magic = 0;
+ if (disk) {
+ del_gendisk(disk);
+ blk_cleanup_queue(disk->queue);
+ blk_mq_free_tag_set(&nbd->tag_set);
+ put_disk(disk);
+ }
+ kfree(nbd);
+}
+
+static int nbd_dev_add(int index)
+{
+ struct nbd_device *nbd;
+ struct gendisk *disk;
+ struct request_queue *q;
+ int err = -ENOMEM;
+
+ nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
+ if (!nbd)
+ goto out;
+
+ disk = alloc_disk(1 << part_shift);
+ if (!disk)
+ goto out_free_nbd;
+
+ if (index >= 0) {
+ err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
+ GFP_KERNEL);
+ if (err == -ENOSPC)
+ err = -EEXIST;
+ } else {
+ err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
+ if (err >= 0)
+ index = err;
+ }
+ if (err < 0)
+ goto out_free_disk;
+
+ nbd->disk = disk;
+ nbd->tag_set.ops = &nbd_mq_ops;
+ nbd->tag_set.nr_hw_queues = 1;
+ nbd->tag_set.queue_depth = 128;
+ nbd->tag_set.numa_node = NUMA_NO_NODE;
+ nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
+ nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
+ nbd->tag_set.driver_data = nbd;
+
+ err = blk_mq_alloc_tag_set(&nbd->tag_set);
+ if (err)
+ goto out_free_idr;
+
+ q = blk_mq_init_queue(&nbd->tag_set);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto out_free_tags;
+ }
+ disk->queue = q;
+
+ /*
+ * Tell the block layer that we are not a rotational device
+ */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
+ disk->queue->limits.discard_granularity = 512;
+ blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
+ disk->queue->limits.discard_zeroes_data = 0;
+ blk_queue_max_hw_sectors(disk->queue, 65536);
+ disk->queue->limits.max_sectors = 256;
+
+ nbd->magic = NBD_MAGIC;
+ mutex_init(&nbd->config_lock);
+ disk->major = NBD_MAJOR;
+ disk->first_minor = index << part_shift;
+ disk->fops = &nbd_fops;
+ disk->private_data = nbd;
+ sprintf(disk->disk_name, "nbd%d", index);
+ init_waitqueue_head(&nbd->recv_wq);
+ nbd_reset(nbd);
+ add_disk(disk);
+ return index;
+
+out_free_tags:
+ blk_mq_free_tag_set(&nbd->tag_set);
+out_free_idr:
+ idr_remove(&nbd_index_idr, index);
+out_free_disk:
+ put_disk(disk);
+out_free_nbd:
+ kfree(nbd);
+out:
+ return err;
+}
+
/*
* And here should be modules and kernel interface
* (Just smiley confuses emacs :-)
@@ -1005,9 +1100,7 @@ static struct blk_mq_ops nbd_mq_ops = {
static int __init nbd_init(void)
{
- int err = -ENOMEM;
int i;
- int part_shift;
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
@@ -1036,111 +1129,38 @@ static int __init nbd_init(void)
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
-
- nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
- if (!nbd_dev)
+ recv_workqueue = alloc_workqueue("knbd-recv",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!recv_workqueue)
return -ENOMEM;
- for (i = 0; i < nbds_max; i++) {
- struct request_queue *q;
- struct gendisk *disk = alloc_disk(1 << part_shift);
- if (!disk)
- goto out;
- nbd_dev[i].disk = disk;
-
- nbd_dev[i].tag_set.ops = &nbd_mq_ops;
- nbd_dev[i].tag_set.nr_hw_queues = 1;
- nbd_dev[i].tag_set.queue_depth = 128;
- nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
- nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
- nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
- nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
-
- err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
- if (err) {
- put_disk(disk);
- goto out;
- }
-
- /*
- * The new linux 2.5 block layer implementation requires
- * every gendisk to have its very own request_queue struct.
- * These structs are big so we dynamically allocate them.
- */
- q = blk_mq_init_queue(&nbd_dev[i].tag_set);
- if (IS_ERR(q)) {
- blk_mq_free_tag_set(&nbd_dev[i].tag_set);
- put_disk(disk);
- goto out;
- }
- disk->queue = q;
-
- /*
- * Tell the block layer that we are not a rotational device
- */
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
- disk->queue->limits.discard_granularity = 512;
- blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
- disk->queue->limits.discard_zeroes_data = 0;
- blk_queue_max_hw_sectors(disk->queue, 65536);
- disk->queue->limits.max_sectors = 256;
- }
-
- if (register_blkdev(NBD_MAJOR, "nbd")) {
- err = -EIO;
- goto out;
- }
-
- printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
+ if (register_blkdev(NBD_MAJOR, "nbd"))
+ return -EIO;
nbd_dbg_init();
- for (i = 0; i < nbds_max; i++) {
- struct gendisk *disk = nbd_dev[i].disk;
- nbd_dev[i].magic = NBD_MAGIC;
- mutex_init(&nbd_dev[i].config_lock);
- disk->major = NBD_MAJOR;
- disk->first_minor = i << part_shift;
- disk->fops = &nbd_fops;
- disk->private_data = &nbd_dev[i];
- sprintf(disk->disk_name, "nbd%d", i);
- init_waitqueue_head(&nbd_dev[i].recv_wq);
- nbd_reset(&nbd_dev[i]);
- add_disk(disk);
- }
+ mutex_lock(&nbd_index_mutex);
+ for (i = 0; i < nbds_max; i++)
+ nbd_dev_add(i);
+ mutex_unlock(&nbd_index_mutex);
+ return 0;
+}
+static int nbd_exit_cb(int id, void *ptr, void *data)
+{
+ struct nbd_device *nbd = ptr;
+ nbd_dev_remove(nbd);
return 0;
-out:
- while (i--) {
- blk_mq_free_tag_set(&nbd_dev[i].tag_set);
- blk_cleanup_queue(nbd_dev[i].disk->queue);
- put_disk(nbd_dev[i].disk);
- }
- kfree(nbd_dev);
- return err;
}
static void __exit nbd_cleanup(void)
{
- int i;
-
nbd_dbg_close();
- for (i = 0; i < nbds_max; i++) {
- struct gendisk *disk = nbd_dev[i].disk;
- nbd_dev[i].magic = 0;
- if (disk) {
- del_gendisk(disk);
- blk_cleanup_queue(disk->queue);
- blk_mq_free_tag_set(&nbd_dev[i].tag_set);
- put_disk(disk);
- }
- }
+ idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL);
+ idr_destroy(&nbd_index_idr);
+ destroy_workqueue(recv_workqueue);
unregister_blkdev(NBD_MAJOR, "nbd");
- kfree(nbd_dev);
- printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
}
module_init(nbd_init);