aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig2
-rw-r--r--block/bio.c3
-rw-r--r--block/blk-mq.c16
-rw-r--r--block/blk-tag.c4
-rw-r--r--block/blk-zoned.c4
-rw-r--r--block/partitions/check.c2
-rw-r--r--block/partitions/ldm.c2
7 files changed, 17 insertions, 16 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 28ec55752b68..eb50fd4977c2 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -114,7 +114,7 @@ config BLK_DEV_THROTTLING
one needs to mount and use blkio cgroup controller for creating
cgroups and specifying per device IO rate policies.
- See Documentation/cgroups/blkio-controller.txt for more information.
+ See Documentation/cgroup-v1/blkio-controller.txt for more information.
config BLK_DEV_THROTTLING_LOW
bool "Block throttling .low limit interface support (EXPERIMENTAL)"
diff --git a/block/bio.c b/block/bio.c
index f7e3d88bd0b6..67eff5eddc49 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -2088,7 +2088,8 @@ static int __init init_bio(void)
{
bio_slab_max = 2;
bio_slab_nr = 0;
- bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
+ bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
+ GFP_KERNEL);
if (!bio_slabs)
panic("bio: can't allocate bios\n");
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b6888ff556cf..b429d515b568 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1907,7 +1907,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
if (!tags)
return NULL;
- tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
+ tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
node);
if (!tags->rqs) {
@@ -1915,9 +1915,9 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
return NULL;
}
- tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
- node);
+ tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+ node);
if (!tags->static_rqs) {
kfree(tags->rqs);
blk_mq_free_tags(tags);
@@ -2525,7 +2525,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
- q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
+ q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)),
GFP_KERNEL, set->numa_node);
if (!q->queue_hw_ctx)
goto err_percpu;
@@ -2744,14 +2744,14 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->nr_hw_queues > nr_cpu_ids)
set->nr_hw_queues = nr_cpu_ids;
- set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
+ set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
if (!set->tags)
return -ENOMEM;
ret = -ENOMEM;
- set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
- GFP_KERNEL, set->numa_node);
+ set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
+ GFP_KERNEL, set->numa_node);
if (!set->mq_map)
goto out_free_tags;
diff --git a/block/blk-tag.c b/block/blk-tag.c
index ada0d7cff62b..fbc153aef166 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -99,12 +99,12 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
__func__, depth);
}
- tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+ tag_index = kcalloc(depth, sizeof(struct request *), GFP_ATOMIC);
if (!tag_index)
goto fail;
nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
- tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+ tag_map = kcalloc(nr_ulongs, sizeof(unsigned long), GFP_ATOMIC);
if (!tag_map)
goto fail;
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 3d08dc84db16..51000914e23f 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -331,8 +331,8 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone))
return -ERANGE;
- zones = kvmalloc(rep.nr_zones * sizeof(struct blk_zone),
- GFP_KERNEL | __GFP_ZERO);
+ zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone),
+ GFP_KERNEL | __GFP_ZERO);
if (!zones)
return -ENOMEM;
diff --git a/block/partitions/check.c b/block/partitions/check.c
index 720145c49066..ffe408fead0c 100644
--- a/block/partitions/check.c
+++ b/block/partitions/check.c
@@ -122,7 +122,7 @@ static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
return NULL;
nr = disk_max_parts(hd);
- state->parts = vzalloc(nr * sizeof(state->parts[0]));
+ state->parts = vzalloc(array_size(nr, sizeof(state->parts[0])));
if (!state->parts) {
kfree(state);
return NULL;
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 2a365c756648..0417937dfe99 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -378,7 +378,7 @@ static bool ldm_validate_tocblocks(struct parsed_partitions *state,
BUG_ON(!state || !ldb);
ph = &ldb->ph;
tb[0] = &ldb->toc;
- tb[1] = kmalloc(sizeof(*tb[1]) * 3, GFP_KERNEL);
+ tb[1] = kmalloc_array(3, sizeof(*tb[1]), GFP_KERNEL);
if (!tb[1]) {
ldm_crit("Out of memory.");
goto err;