aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/null_blk
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/null_blk')
-rw-r--r--drivers/block/null_blk/main.c297
-rw-r--r--drivers/block/null_blk/null_blk.h29
-rw-r--r--drivers/block/null_blk/trace.h4
-rw-r--r--drivers/block/null_blk/zoned.c25
4 files changed, 226 insertions, 129 deletions
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 323af5c9c802..1f154f92f4c2 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -11,6 +11,9 @@
#include <linux/init.h>
#include "null_blk.h"
+#undef pr_fmt
+#define pr_fmt(fmt) "null_blk: " fmt
+
#define FREE_BATCH 16
#define TICKS_PER_SEC 50ULL
@@ -74,12 +77,6 @@ enum {
NULL_IRQ_TIMER = 2,
};
-enum {
- NULL_Q_BIO = 0,
- NULL_Q_RQ = 1,
- NULL_Q_MQ = 2,
-};
-
static bool g_virt_boundary = false;
module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
@@ -204,6 +201,22 @@ static bool g_use_per_node_hctx;
module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
+static bool g_memory_backed;
+module_param_named(memory_backed, g_memory_backed, bool, 0444);
+MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false");
+
+static bool g_discard;
+module_param_named(discard, g_discard, bool, 0444);
+MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false");
+
+static unsigned long g_cache_size;
+module_param_named(cache_size, g_cache_size, ulong, 0444);
+MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+
+static unsigned int g_mbps;
+module_param_named(mbps, g_mbps, uint, 0444);
+MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
+
static bool g_zoned;
module_param_named(zoned, g_zoned, bool, S_IRUGO);
MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
@@ -232,6 +245,7 @@ static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
static int null_add_dev(struct nullb_device *dev);
+static struct nullb *null_find_dev_by_name(const char *name);
static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
static inline struct nullb_device *to_nullb_device(struct config_item *item)
@@ -340,9 +354,9 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev,
return 0;
/*
- * Make sure at least one queue exists for each of submit and poll.
+ * Make sure at least one submit queue exists.
*/
- if (!submit_queues || !poll_queues)
+ if (!submit_queues)
return -EINVAL;
/*
@@ -411,6 +425,8 @@ NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
+NULLB_DEVICE_ATTR(no_sched, bool, NULL);
+NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -431,9 +447,10 @@ static ssize_t nullb_device_power_store(struct config_item *item,
if (!dev->power && newp) {
if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
return count;
- if (null_add_dev(dev)) {
+ ret = null_add_dev(dev);
+ if (ret) {
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
- return -ENOMEM;
+ return ret;
}
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
@@ -533,6 +550,8 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_zone_max_open,
&nullb_device_attr_zone_max_active,
&nullb_device_attr_virt_boundary,
+ &nullb_device_attr_no_sched,
+ &nullb_device_attr_shared_tag_bitmap,
NULL,
};
@@ -559,6 +578,9 @@ config_item *nullb_group_make_item(struct config_group *group, const char *name)
{
struct nullb_device *dev;
+ if (null_find_dev_by_name(name))
+ return ERR_PTR(-EEXIST);
+
dev = null_alloc_dev();
if (!dev)
return ERR_PTR(-ENOMEM);
@@ -586,7 +608,13 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE,
- "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active,blocksize,max_sectors,virt_boundary\n");
+ "badblocks,blocking,blocksize,cache_size,"
+ "completion_nsec,discard,home_node,hw_queue_depth,"
+ "irqmode,max_sectors,mbps,memory_backed,no_sched,"
+ "poll_queues,power,queue_mode,shared_tag_bitmap,size,"
+ "submit_queues,use_per_node_hctx,virt_boundary,zoned,"
+ "zone_capacity,zone_max_active,zone_max_open,"
+ "zone_nr_conv,zone_size\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -648,6 +676,10 @@ static struct nullb_device *null_alloc_dev(void)
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
dev->blocking = g_blocking;
+ dev->memory_backed = g_memory_backed;
+ dev->discard = g_discard;
+ dev->cache_size = g_cache_size;
+ dev->mbps = g_mbps;
dev->use_per_node_hctx = g_use_per_node_hctx;
dev->zoned = g_zoned;
dev->zone_size = g_zone_size;
@@ -656,6 +688,8 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_max_open = g_zone_max_open;
dev->zone_max_active = g_zone_max_active;
dev->virt_boundary = g_virt_boundary;
+ dev->no_sched = g_no_sched;
+ dev->shared_tag_bitmap = g_shared_tag_bitmap;
return dev;
}
@@ -719,26 +753,25 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
return NULL;
}
-static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
+static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, struct bio *bio)
{
struct nullb_cmd *cmd;
DEFINE_WAIT(wait);
- cmd = __alloc_cmd(nq);
- if (cmd || !can_wait)
- return cmd;
-
do {
- prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
+ /*
+ * This avoids multiple return statements, multiple calls to
+ * __alloc_cmd() and a fast path call to prepare_to_wait().
+ */
cmd = __alloc_cmd(nq);
- if (cmd)
- break;
-
+ if (cmd) {
+ cmd->bio = bio;
+ return cmd;
+ }
+ prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
io_schedule();
+ finish_wait(&nq->wait, &wait);
} while (1);
-
- finish_wait(&nq->wait, &wait);
- return cmd;
}
static void end_cmd(struct nullb_cmd *cmd)
@@ -777,24 +810,22 @@ static void null_complete_rq(struct request *rq)
end_cmd(blk_mq_rq_to_pdu(rq));
}
-static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
+static struct nullb_page *null_alloc_page(void)
{
struct nullb_page *t_page;
- t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
+ t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO);
if (!t_page)
- goto out;
+ return NULL;
- t_page->page = alloc_pages(gfp_flags, 0);
- if (!t_page->page)
- goto out_freepage;
+ t_page->page = alloc_pages(GFP_NOIO, 0);
+ if (!t_page->page) {
+ kfree(t_page);
+ return NULL;
+ }
memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
return t_page;
-out_freepage:
- kfree(t_page);
-out:
- return NULL;
}
static void null_free_page(struct nullb_page *t_page)
@@ -932,7 +963,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
spin_unlock_irq(&nullb->lock);
- t_page = null_alloc_page(GFP_NOIO);
+ t_page = null_alloc_page();
if (!t_page)
goto out_lock;
@@ -1311,7 +1342,7 @@ static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
}
static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
- enum req_opf op,
+ enum req_op op,
sector_t sector,
sector_t nr_sectors)
{
@@ -1382,9 +1413,8 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
}
}
-blk_status_t null_process_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector,
- unsigned int nr_sectors)
+blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
blk_status_t ret;
@@ -1402,7 +1432,7 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd,
}
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
- sector_t nr_sectors, enum req_opf op)
+ sector_t nr_sectors, enum req_op op)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
@@ -1476,12 +1506,8 @@ static void null_submit_bio(struct bio *bio)
sector_t nr_sectors = bio_sectors(bio);
struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
struct nullb_queue *nq = nullb_to_queue(nullb);
- struct nullb_cmd *cmd;
-
- cmd = alloc_cmd(nq, 1);
- cmd->bio = bio;
- null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
+ null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio));
}
static bool should_timeout_request(struct request *rq)
@@ -1502,7 +1528,7 @@ static bool should_requeue_request(struct request *rq)
return false;
}
-static int null_map_queues(struct blk_mq_tag_set *set)
+static void null_map_queues(struct blk_mq_tag_set *set)
{
struct nullb *nullb = set->driver_data;
int i, qoff;
@@ -1529,7 +1555,9 @@ static int null_map_queues(struct blk_mq_tag_set *set)
} else {
pr_warn("tag set has unexpected nr_hw_queues: %d\n",
set->nr_hw_queues);
- return -EINVAL;
+ WARN_ON_ONCE(true);
+ submit_queues = 1;
+ poll_queues = 0;
}
}
@@ -1551,8 +1579,6 @@ static int null_map_queues(struct blk_mq_tag_set *set)
qoff += map->nr_queues;
blk_mq_map_queues(map);
}
-
- return 0;
}
static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
@@ -1574,14 +1600,16 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
- end_cmd(cmd);
+ if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
+ blk_mq_end_request_batch))
+ end_cmd(cmd);
nr++;
}
return nr;
}
-static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
+static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
@@ -1604,7 +1632,7 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
* Only fake timeouts need to execute blk_mq_complete_request() here.
*/
cmd->error = BLK_STS_TIMEOUT;
- if (cmd->fake_timeout)
+ if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
blk_mq_complete_request(rq);
return BLK_EH_DONE;
}
@@ -1659,7 +1687,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
static void cleanup_queue(struct nullb_queue *nq)
{
- kfree(nq->tag_map);
+ bitmap_free(nq->tag_map);
kfree(nq->cmds);
}
@@ -1740,7 +1768,7 @@ static void null_del_dev(struct nullb *nullb)
null_restart_queue_async(nullb);
}
- blk_cleanup_disk(nullb->disk);
+ put_disk(nullb->disk);
if (dev->queue_mode == NULL_Q_MQ &&
nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
@@ -1769,9 +1797,7 @@ static void null_config_discard(struct nullb *nullb)
}
nullb->q->limits.discard_granularity = nullb->dev->blocksize;
- nullb->q->limits.discard_alignment = nullb->dev->blocksize;
blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
}
static const struct block_device_operations null_bio_ops = {
@@ -1788,14 +1814,13 @@ static const struct block_device_operations null_rq_ops = {
static int setup_commands(struct nullb_queue *nq)
{
struct nullb_cmd *cmd;
- int i, tag_size;
+ int i;
nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
if (!nq->cmds)
return -ENOMEM;
- tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
- nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
+ nq->tag_map = bitmap_zalloc(nq->queue_depth, GFP_KERNEL);
if (!nq->tag_map) {
kfree(nq->cmds);
return -ENOMEM;
@@ -1850,7 +1875,6 @@ static int null_gendisk_register(struct nullb *nullb)
set_capacity(disk, size);
- disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
disk->first_minor = nullb->index;
disk->minors = 1;
@@ -1873,31 +1897,48 @@ static int null_gendisk_register(struct nullb *nullb)
static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
+ unsigned int flags = BLK_MQ_F_SHOULD_MERGE;
+ int hw_queues, numa_node;
+ unsigned int queue_depth;
int poll_queues;
+ if (nullb) {
+ hw_queues = nullb->dev->submit_queues;
+ poll_queues = nullb->dev->poll_queues;
+ queue_depth = nullb->dev->hw_queue_depth;
+ numa_node = nullb->dev->home_node;
+ if (nullb->dev->no_sched)
+ flags |= BLK_MQ_F_NO_SCHED;
+ if (nullb->dev->shared_tag_bitmap)
+ flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (nullb->dev->blocking)
+ flags |= BLK_MQ_F_BLOCKING;
+ } else {
+ hw_queues = g_submit_queues;
+ poll_queues = g_poll_queues;
+ queue_depth = g_hw_queue_depth;
+ numa_node = g_home_node;
+ if (g_no_sched)
+ flags |= BLK_MQ_F_NO_SCHED;
+ if (g_shared_tag_bitmap)
+ flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (g_blocking)
+ flags |= BLK_MQ_F_BLOCKING;
+ }
+
set->ops = &null_mq_ops;
- set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
- g_submit_queues;
- poll_queues = nullb ? nullb->dev->poll_queues : g_poll_queues;
- if (poll_queues)
- set->nr_hw_queues += poll_queues;
- set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
- g_hw_queue_depth;
- set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
set->cmd_size = sizeof(struct nullb_cmd);
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- if (g_no_sched)
- set->flags |= BLK_MQ_F_NO_SCHED;
- if (g_shared_tag_bitmap)
- set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ set->flags = flags;
set->driver_data = nullb;
- if (g_poll_queues)
+ set->nr_hw_queues = hw_queues;
+ set->queue_depth = queue_depth;
+ set->numa_node = numa_node;
+ if (poll_queues) {
+ set->nr_hw_queues += poll_queues;
set->nr_maps = 3;
- else
+ } else {
set->nr_maps = 1;
-
- if ((nullb && nullb->dev->blocking) || g_blocking)
- set->flags |= BLK_MQ_F_BLOCKING;
+ }
return blk_mq_alloc_tag_set(set);
}
@@ -1918,8 +1959,6 @@ static int null_validate_conf(struct nullb_device *dev)
if (dev->poll_queues > g_poll_queues)
dev->poll_queues = g_poll_queues;
- else if (dev->poll_queues == 0)
- dev->poll_queues = 1;
dev->prev_poll_queues = dev->poll_queues;
dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
@@ -2051,8 +2090,13 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
- nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
- dev->index = nullb->index;
+ rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+ if (rv < 0) {
+ mutex_unlock(&lock);
+ goto out_cleanup_zone;
+ }
+ nullb->index = rv;
+ dev->index = rv;
mutex_unlock(&lock);
blk_queue_logical_block_size(nullb->q, dev->blocksize);
@@ -2068,21 +2112,32 @@ static int null_add_dev(struct nullb_device *dev)
null_config_discard(nullb);
- sprintf(nullb->disk_name, "nullb%d", nullb->index);
+ if (config_item_name(&dev->item)) {
+ /* Use configfs dir name as the device name */
+ snprintf(nullb->disk_name, sizeof(nullb->disk_name),
+ "%s", config_item_name(&dev->item));
+ } else {
+ sprintf(nullb->disk_name, "nullb%d", nullb->index);
+ }
rv = null_gendisk_register(nullb);
if (rv)
- goto out_cleanup_zone;
+ goto out_ida_free;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
+ pr_info("disk %s created\n", nullb->disk_name);
+
return 0;
+
+out_ida_free:
+ ida_free(&nullb_indexes, nullb->index);
out_cleanup_zone:
null_free_zoned_dev(dev);
out_cleanup_disk:
- blk_cleanup_disk(nullb->disk);
+ put_disk(nullb->disk);
out_cleanup_tags:
if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
@@ -2095,12 +2150,53 @@ out:
return rv;
}
+static struct nullb *null_find_dev_by_name(const char *name)
+{
+ struct nullb *nullb = NULL, *nb;
+
+ mutex_lock(&lock);
+ list_for_each_entry(nb, &nullb_list, list) {
+ if (strcmp(nb->disk_name, name) == 0) {
+ nullb = nb;
+ break;
+ }
+ }
+ mutex_unlock(&lock);
+
+ return nullb;
+}
+
+static int null_create_dev(void)
+{
+ struct nullb_device *dev;
+ int ret;
+
+ dev = null_alloc_dev();
+ if (!dev)
+ return -ENOMEM;
+
+ ret = null_add_dev(dev);
+ if (ret) {
+ null_free_dev(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void null_destroy_dev(struct nullb *nullb)
+{
+ struct nullb_device *dev = nullb->dev;
+
+ null_del_dev(nullb);
+ null_free_dev(dev);
+}
+
static int __init null_init(void)
{
int ret = 0;
unsigned int i;
struct nullb *nullb;
- struct nullb_device *dev;
if (g_bs > PAGE_SIZE) {
pr_warn("invalid block size\n");
@@ -2120,19 +2216,21 @@ static int __init null_init(void)
}
if (g_queue_mode == NULL_Q_RQ) {
- pr_err("legacy IO path no longer available\n");
+ pr_err("legacy IO path is no longer available\n");
return -EINVAL;
}
+
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("submit_queues param is set to %u.\n",
- nr_online_nodes);
+ nr_online_nodes);
g_submit_queues = nr_online_nodes;
}
- } else if (g_submit_queues > nr_cpu_ids)
+ } else if (g_submit_queues > nr_cpu_ids) {
g_submit_queues = nr_cpu_ids;
- else if (g_submit_queues <= 0)
+ } else if (g_submit_queues <= 0) {
g_submit_queues = 1;
+ }
if (g_queue_mode == NULL_Q_MQ && shared_tags) {
ret = null_init_tag_set(NULL, &tag_set);
@@ -2156,16 +2254,9 @@ static int __init null_init(void)
}
for (i = 0; i < nr_devices; i++) {
- dev = null_alloc_dev();
- if (!dev) {
- ret = -ENOMEM;
- goto err_dev;
- }
- ret = null_add_dev(dev);
- if (ret) {
- null_free_dev(dev);
+ ret = null_create_dev();
+ if (ret)
goto err_dev;
- }
}
pr_info("module loaded\n");
@@ -2174,9 +2265,7 @@ static int __init null_init(void)
err_dev:
while (!list_empty(&nullb_list)) {
nullb = list_entry(nullb_list.next, struct nullb, list);
- dev = nullb->dev;
- null_del_dev(nullb);
- null_free_dev(dev);
+ null_destroy_dev(nullb);
}
unregister_blkdev(null_major, "nullb");
err_conf:
@@ -2197,12 +2286,8 @@ static void __exit null_exit(void)
mutex_lock(&lock);
while (!list_empty(&nullb_list)) {
- struct nullb_device *dev;
-
nullb = list_entry(nullb_list.next, struct nullb, list);
- dev = nullb->dev;
- null_del_dev(nullb);
- null_free_dev(dev);
+ null_destroy_dev(nullb);
}
mutex_unlock(&lock);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 78eb56b0ca55..94ff68052b1e 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -16,13 +16,15 @@
#include <linux/mutex.h>
struct nullb_cmd {
- struct request *rq;
- struct bio *bio;
+ union {
+ struct request *rq;
+ struct bio *bio;
+ };
unsigned int tag;
blk_status_t error;
+ bool fake_timeout;
struct nullb_queue *nq;
struct hrtimer timer;
- bool fake_timeout;
};
struct nullb_queue {
@@ -58,6 +60,13 @@ struct nullb_zone {
unsigned int capacity;
};
+/* Queue modes */
+enum {
+ NULL_Q_BIO = 0,
+ NULL_Q_RQ = 1,
+ NULL_Q_MQ = 2,
+};
+
struct nullb_device {
struct nullb *nullb;
struct config_item item;
@@ -104,6 +113,8 @@ struct nullb_device {
bool discard; /* if support discard */
bool zoned; /* if device is zoned */
bool virt_boundary; /* virtual boundary on/off for the device */
+ bool no_sched; /* no IO scheduler for the device */
+ bool shared_tag_bitmap; /* use hostwide shared tags */
};
struct nullb {
@@ -127,9 +138,8 @@ struct nullb {
blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
sector_t nr_sectors);
-blk_status_t null_process_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector,
- unsigned int nr_sectors);
+blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, unsigned int nr_sectors);
#ifdef CONFIG_BLK_DEV_ZONED
int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
@@ -137,9 +147,8 @@ int null_register_zoned_dev(struct nullb *nullb);
void null_free_zoned_dev(struct nullb_device *dev);
int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
-blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector,
- sector_t nr_sectors);
+blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, sector_t nr_sectors);
size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len);
#else
@@ -155,7 +164,7 @@ static inline int null_register_zoned_dev(struct nullb *nullb)
}
static inline void null_free_zoned_dev(struct nullb_device *dev) {}
static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector, sector_t nr_sectors)
+ enum req_op op, sector_t sector, sector_t nr_sectors)
{
return BLK_STS_NOTSUPP;
}
diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h
index ce3b430e88c5..6b2b370e786f 100644
--- a/drivers/block/null_blk/trace.h
+++ b/drivers/block/null_blk/trace.h
@@ -36,7 +36,7 @@ TRACE_EVENT(nullb_zone_op,
TP_ARGS(cmd, zone_no, zone_cond),
TP_STRUCT__entry(
__array(char, disk, DISK_NAME_LEN)
- __field(enum req_opf, op)
+ __field(enum req_op, op)
__field(unsigned int, zone_no)
__field(unsigned int, zone_cond)
),
@@ -44,7 +44,7 @@ TRACE_EVENT(nullb_zone_op,
__entry->op = req_op(cmd->rq);
__entry->zone_no = zone_no;
__entry->zone_cond = zone_cond;
- __assign_disk_name(__entry->disk, cmd->rq->rq_disk);
+ __assign_disk_name(__entry->disk, cmd->rq->q->disk);
),
TP_printk("%s req=%-15s zone_no=%u zone_cond=%-10s",
__print_disk_name(__entry->disk),
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index dae54dd1aeac..55a69e48ef8b 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -6,6 +6,9 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
+#undef pr_fmt
+#define pr_fmt(fmt) "null_blk: " fmt
+
static inline sector_t mb_to_sects(unsigned long mb)
{
return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
@@ -75,8 +78,8 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
dev->zone_capacity = dev->zone_size;
if (dev->zone_capacity > dev->zone_size) {
- pr_err("null_blk: zone capacity (%lu MB) larger than zone size (%lu MB)\n",
- dev->zone_capacity, dev->zone_size);
+ pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
+ dev->zone_capacity, dev->zone_size);
return -EINVAL;
}
@@ -156,7 +159,7 @@ int null_register_zoned_dev(struct nullb *nullb)
struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
- blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM);
+ disk_set_zoned(nullb->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
@@ -167,12 +170,12 @@ int null_register_zoned_dev(struct nullb *nullb)
return ret;
} else {
blk_queue_chunk_sectors(q, dev->zone_size_sects);
- q->nr_zones = blkdev_nr_zones(nullb->disk);
+ nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
}
blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
- blk_queue_max_open_zones(q, dev->zone_max_open);
- blk_queue_max_active_zones(q, dev->zone_max_active);
+ disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
+ disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
return 0;
}
@@ -395,10 +398,10 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
*/
if (append) {
sector = zone->wp;
- if (cmd->bio)
- cmd->bio->bi_iter.bi_sector = sector;
- else
+ if (dev->queue_mode == NULL_Q_MQ)
cmd->rq->__sector = sector;
+ else
+ cmd->bio->bi_iter.bi_sector = sector;
} else if (sector != zone->wp) {
ret = BLK_STS_IOERR;
goto unlock;
@@ -597,7 +600,7 @@ static blk_status_t null_reset_zone(struct nullb_device *dev,
return BLK_STS_OK;
}
-static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
+static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
@@ -650,7 +653,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
return ret;
}
-blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
+blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, sector_t nr_sectors)
{
struct nullb_device *dev;