aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-settings.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r--block/blk-settings.c86
1 files changed, 74 insertions, 12 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 76a7e03bcd6c..9741d1d83e98 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -172,15 +172,13 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors);
*
* Description:
* If a driver doesn't want IOs to cross a given chunk size, it can set
- * this limit and prevent merging across chunks. Note that the chunk size
- * must currently be a power-of-2 in sectors. Also note that the block
- * layer must accept a page worth of data at any offset. So if the
- * crossing of chunks is a hard limitation in the driver, it must still be
- * prepared to split single page bios.
+ * this limit and prevent merging across chunks. Note that the block layer
+ * must accept a page worth of data at any offset. So if the crossing of
+ * chunks is a hard limitation in the driver, it must still be prepared
+ * to split single page bios.
**/
void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
{
- BUG_ON(!is_power_of_2(chunk_sectors));
q->limits.chunk_sectors = chunk_sectors;
}
EXPORT_SYMBOL(blk_queue_chunk_sectors);
@@ -374,6 +372,19 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
}
EXPORT_SYMBOL(blk_queue_alignment_offset);
+void blk_queue_update_readahead(struct request_queue *q)
+{
+ /*
+ * For read-ahead of large files to be effective, we need to read ahead
+ * at least twice the optimal I/O size.
+ */
+ q->backing_dev_info->ra_pages =
+ max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
+ q->backing_dev_info->io_pages =
+ queue_max_sectors(q) >> (PAGE_SHIFT - 9);
+}
+EXPORT_SYMBOL_GPL(blk_queue_update_readahead);
+
/**
* blk_limits_io_min - set minimum request size for a device
* @limits: the queue limits
@@ -452,6 +463,8 @@ EXPORT_SYMBOL(blk_limits_io_opt);
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
blk_limits_io_opt(&q->limits, opt);
+ q->backing_dev_info->ra_pages =
+ max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
}
EXPORT_SYMBOL(blk_queue_io_opt);
@@ -534,6 +547,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
+ t->chunk_sectors = lcm_not_zero(t->chunk_sectors, b->chunk_sectors);
/* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) {
@@ -556,6 +570,13 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
ret = -1;
}
+ /* chunk_sectors a multiple of the physical block size? */
+ if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
+ t->chunk_sectors = 0;
+ t->misaligned = 1;
+ ret = -1;
+ }
+
t->raid_partial_stripes_expensive =
max(t->raid_partial_stripes_expensive,
b->raid_partial_stripes_expensive);
@@ -594,10 +615,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->discard_granularity;
}
- if (b->chunk_sectors)
- t->chunk_sectors = min_not_zero(t->chunk_sectors,
- b->chunk_sectors);
-
t->zoned = max(t->zoned, b->zoned);
return ret;
}
@@ -629,8 +646,7 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
top, bottom);
}
- t->backing_dev_info->io_pages =
- t->limits.max_sectors >> (PAGE_SHIFT - 9);
+ blk_queue_update_readahead(disk->queue);
}
EXPORT_SYMBOL(disk_stack_limits);
@@ -801,6 +817,52 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
+/**
+ * blk_queue_set_zoned - configure a disk queue zoned model.
+ * @disk: the gendisk of the queue to configure
+ * @model: the zoned model to set
+ *
+ * Set the zoned model of the request queue of @disk according to @model.
+ * When @model is BLK_ZONED_HM (host managed), this should be called only
+ * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
+ * If @model specifies BLK_ZONED_HA (host aware), the effective model used
+ * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
+ * on the disk.
+ */
+void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
+{
+ switch (model) {
+ case BLK_ZONED_HM:
+ /*
+ * Host managed devices are supported only if
+ * CONFIG_BLK_DEV_ZONED is enabled.
+ */
+ WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
+ break;
+ case BLK_ZONED_HA:
+ /*
+ * Host aware devices can be treated either as regular block
+ * devices (similar to drive managed devices) or as zoned block
+ * devices to take advantage of the zone command set, similarly
+ * to host managed devices. We try the latter if there are no
+ * partitions and zoned block device support is enabled, else
+ * we do nothing special as far as the block layer is concerned.
+ */
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
+ disk_has_partitions(disk))
+ model = BLK_ZONED_NONE;
+ break;
+ case BLK_ZONED_NONE:
+ default:
+ if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
+ model = BLK_ZONED_NONE;
+ break;
+ }
+
+ disk->queue->limits.zoned = model;
+}
+EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
+
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;