aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-01-29 13:50:06 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-01-29 13:50:06 -0800
commit2ba1c4d1a4b5fb9961452286bdcad502b0c8b78a (patch)
tree3843bf10a2a962724e906b6ef7a28439e26bce7d /block
parentMerge tag 'io_uring-5.11-2021-01-29' of git://git.kernel.dk/linux-block (diff)
parentnull_blk: cleanup zoned mode initialization (diff)
downloadlinux-dev-2ba1c4d1a4b5fb9961452286bdcad502b0c8b78a.tar.xz
linux-dev-2ba1c4d1a4b5fb9961452286bdcad502b0c8b78a.zip
Merge tag 'block-5.11-2021-01-29' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "All over the place fixes for this release: - blk-cgroup iteration teardown resched fix (Baolin) - NVMe pull request from Christoph: - add another Write Zeroes quirk (Chaitanya Kulkarni) - handle a no path available corner case (Daniel Wagner) - use the proper RCU aware list_add helper (Chao Leng) - bcache regression fix (Coly) - bdev->bd_size_lock IRQ fix. This will be fixed in drivers for 5.12, but for now, we'll make it IRQ safe (Damien) - null_blk zoned init fix (Damien) - add_partition() error handling fix (Dinghao) - s390 dasd kobject fix (Jan) - nbd fix for freezing queue while adding connections (Josef) - tag queueing regression fix (Ming) - revert of a patch that inadvertently meant that we regressed write performance on raid (Maxim)" * tag 'block-5.11-2021-01-29' of git://git.kernel.dk/linux-block: null_blk: cleanup zoned mode initialization nvme-core: use list_add_tail_rcu instead of list_add_tail for nvme_init_ns_head nvme-multipath: Early exit if no path is available nvme-pci: add the DISABLE_WRITE_ZEROES quirk for a SPCC device bcache: only check feature sets when sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES block: fix bd_size_lock use blk-cgroup: Use cond_resched() when destroy blkgs Revert "block: simplify set_init_blocksize" to regain lost performance nbd: freeze the queue while we're adding connections s390/dasd: Fix inconsistent kobject removal block: Fix an error handling in add_partition blk-mq: test QUEUE_FLAG_HCTX_ACTIVE for sbitmap_shared in hctx_may_queue
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c18
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/genhd.c5
-rw-r--r--block/partitions/core.c8
4 files changed, 22 insertions, 11 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 031114d454a6..4221a1539391 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1016,6 +1016,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
*/
void blkcg_destroy_blkgs(struct blkcg *blkcg)
{
+ might_sleep();
+
spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) {
@@ -1023,14 +1025,20 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;
- if (spin_trylock(&q->queue_lock)) {
- blkg_destroy(blkg);
- spin_unlock(&q->queue_lock);
- } else {
+ if (need_resched() || !spin_trylock(&q->queue_lock)) {
+ /*
+ * Given that the system can accumulate a huge number
+ * of blkgs in pathological cases, check to see if we
+ * need to rescheduling to avoid softlockup.
+ */
spin_unlock_irq(&blkcg->lock);
- cpu_relax();
+ cond_resched();
spin_lock_irq(&blkcg->lock);
+ continue;
}
+
+ blkg_destroy(blkg);
+ spin_unlock(&q->queue_lock);
}
spin_unlock_irq(&blkcg->lock);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index c1458d9502f1..3616453ca28c 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -304,7 +304,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
struct request_queue *q = hctx->queue;
struct blk_mq_tag_set *set = q->tag_set;
- if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
+ if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return true;
users = atomic_read(&set->active_queues_shared_sbitmap);
} else {
diff --git a/block/genhd.c b/block/genhd.c
index 419548e92d82..9e741a4f351b 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -45,10 +45,11 @@ static void disk_release_events(struct gendisk *disk);
void set_capacity(struct gendisk *disk, sector_t sectors)
{
struct block_device *bdev = disk->part0;
+ unsigned long flags;
- spin_lock(&bdev->bd_size_lock);
+ spin_lock_irqsave(&bdev->bd_size_lock, flags);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
- spin_unlock(&bdev->bd_size_lock);
+ spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
}
EXPORT_SYMBOL(set_capacity);
diff --git a/block/partitions/core.c b/block/partitions/core.c
index e7d776db803b..4601a845cd79 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -88,9 +88,11 @@ static int (*check_part[])(struct parsed_partitions *) = {
static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
- spin_lock(&bdev->bd_size_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bdev->bd_size_lock, flags);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
- spin_unlock(&bdev->bd_size_lock);
+ spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
}
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
@@ -384,7 +386,7 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
err = blk_alloc_devt(bdev, &devt);
if (err)
- goto out_bdput;
+ goto out_put;
pdev->devt = devt;
/* delay uevent until 'holders' subdir is created */