aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq-tag.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq-tag.c')
-rw-r--r--block/blk-mq-tag.c81
1 files changed, 50 insertions, 31 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 60c9d4a93fe4..d53a764b05ea 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -140,35 +140,39 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
return atomic_read(&hctx->nr_active) < depth;
}
-static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
+static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
+ bool nowrap)
{
- int tag, org_last_tag, end;
- bool wrap = last_tag != 0;
+ int tag, org_last_tag = last_tag;
- org_last_tag = last_tag;
- end = bm->depth;
- do {
-restart:
- tag = find_next_zero_bit(&bm->word, end, last_tag);
- if (unlikely(tag >= end)) {
+ while (1) {
+ tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
+ if (unlikely(tag >= bm->depth)) {
/*
- * We started with an offset, start from 0 to
+ * We started with an offset, and we didn't reset the
+ * offset to 0 in a failure case, so start from 0 to
* exhaust the map.
*/
- if (wrap) {
- wrap = false;
- end = org_last_tag;
- last_tag = 0;
- goto restart;
+ if (org_last_tag && last_tag && !nowrap) {
+ last_tag = org_last_tag = 0;
+ continue;
}
return -1;
}
+
+ if (!test_and_set_bit(tag, &bm->word))
+ break;
+
last_tag = tag + 1;
- } while (test_and_set_bit(tag, &bm->word));
+ if (last_tag >= bm->depth - 1)
+ last_tag = 0;
+ }
return tag;
}
+#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
+
/*
* Straight forward bitmap tag implementation, where each bit is a tag
* (cleared == free, and set == busy). The small twist is using per-cpu
@@ -181,7 +185,7 @@ restart:
* until the map is exhausted.
*/
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
- unsigned int *tag_cache)
+ unsigned int *tag_cache, struct blk_mq_tags *tags)
{
unsigned int last_tag, org_last_tag;
int index, i, tag;
@@ -193,15 +197,24 @@ static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
index = TAG_TO_INDEX(bt, last_tag);
for (i = 0; i < bt->map_nr; i++) {
- tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
+ tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
+ BT_ALLOC_RR(tags));
if (tag != -1) {
tag += (index << bt->bits_per_word);
goto done;
}
- last_tag = 0;
- if (++index >= bt->map_nr)
+ /*
+ * Jump to next index, and reset the last tag to be the
+ * first tag of that index
+ */
+ index++;
+ last_tag = (index << bt->bits_per_word);
+
+ if (index >= bt->map_nr) {
index = 0;
+ last_tag = 0;
+ }
}
*tag_cache = 0;
@@ -212,7 +225,7 @@ static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
* up using the specific cached tag.
*/
done:
- if (tag == org_last_tag) {
+ if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
last_tag = tag + 1;
if (last_tag >= bt->depth - 1)
last_tag = 0;
@@ -241,13 +254,13 @@ static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
static int bt_get(struct blk_mq_alloc_data *data,
struct blk_mq_bitmap_tags *bt,
struct blk_mq_hw_ctx *hctx,
- unsigned int *last_tag)
+ unsigned int *last_tag, struct blk_mq_tags *tags)
{
struct bt_wait_state *bs;
DEFINE_WAIT(wait);
int tag;
- tag = __bt_get(hctx, bt, last_tag);
+ tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
return tag;
@@ -258,7 +271,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
do {
prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
- tag = __bt_get(hctx, bt, last_tag);
+ tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
break;
@@ -273,7 +286,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
* Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions.
*/
- tag = __bt_get(hctx, bt, last_tag);
+ tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
break;
@@ -304,7 +317,7 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
int tag;
tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
- &data->ctx->last_tag);
+ &data->ctx->last_tag, data->hctx->tags);
if (tag >= 0)
return tag + data->hctx->tags->nr_reserved_tags;
@@ -320,7 +333,8 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
return BLK_MQ_TAG_FAIL;
}
- tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
+ tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
+ data->hctx->tags);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
@@ -392,7 +406,8 @@ void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
BUG_ON(real_tag >= tags->nr_tags);
bt_clear_tag(&tags->bitmap_tags, real_tag);
- *last_tag = real_tag;
+ if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
+ *last_tag = real_tag;
} else {
BUG_ON(tag >= tags->nr_reserved_tags);
bt_clear_tag(&tags->breserved_tags, tag);
@@ -509,6 +524,7 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
if (!bt->bs) {
kfree(bt->map);
+ bt->map = NULL;
return -ENOMEM;
}
@@ -529,10 +545,12 @@ static void bt_free(struct blk_mq_bitmap_tags *bt)
}
static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
- int node)
+ int node, int alloc_policy)
{
unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
+ tags->alloc_policy = alloc_policy;
+
if (bt_alloc(&tags->bitmap_tags, depth, node, false))
goto enomem;
if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
@@ -546,7 +564,8 @@ enomem:
}
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
- unsigned int reserved_tags, int node)
+ unsigned int reserved_tags,
+ int node, int alloc_policy)
{
struct blk_mq_tags *tags;
@@ -562,7 +581,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
- return blk_mq_init_bitmap_tags(tags, node);
+ return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
}
void blk_mq_free_tags(struct blk_mq_tags *tags)