aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-11-24 09:36:54 +0100
committerJens Axboe <axboe@kernel.dk>2020-12-01 14:53:40 -0700
commit8446fe9255be821cb38ffd306d7e8edc4b9ea662 (patch)
tree7da651adc11c5df03c517255cd6872a92705f150 /drivers
parentblock: allocate struct hd_struct as part of struct bdev_inode (diff)
downloadlinux-dev-8446fe9255be821cb38ffd306d7e8edc4b9ea662.tar.xz
linux-dev-8446fe9255be821cb38ffd306d7e8edc4b9ea662.zip
block: switch partition lookup to use struct block_device
Use struct block_device to lookup partitions on a disk. This removes all usage of struct hd_struct from the I/O path. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Hannes Reinecke <hare@suse.de> Acked-by: Coly Li <colyli@suse.de> [bcache] Acked-by: Chao Yu <yuchao0@huawei.com> [f2fs] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/md/bcache/request.c4
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/nvme/target/admin-cmd.c20
7 files changed, 19 insertions, 19 deletions
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 9e5c2fdfda36..09c86ef3f0fd 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2802,7 +2802,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
if (c_min_rate == 0)
return false;
- curr_events = (int)part_stat_read_accum(disk->part0->bd_part, sectors) -
+ curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
atomic_read(&device->rs_sect_ev);
if (atomic_read(&device->ap_actlog_cnt)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 343f56b86bb7..02044ab7f767 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1679,7 +1679,7 @@ void drbd_rs_controller_reset(struct drbd_device *device)
atomic_set(&device->rs_sect_ev, 0);
device->rs_in_flight = 0;
device->rs_last_events =
- (int)part_stat_read_accum(disk->part0->bd_part, sectors);
+ (int)part_stat_read_accum(disk->part0, sectors);
/* Updating the RCU protected object in place is necessary since
this function gets called from atomic context.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 6d84876a9cd0..dc8957d173d3 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1687,7 +1687,7 @@ static void zram_reset_device(struct zram *zram)
zram->disksize = 0;
set_capacity_and_notify(zram->disk, 0);
- part_stat_set_all(zram->disk->part0->bd_part, 0);
+ part_stat_set_all(zram->disk->part0, 0);
up_write(&zram->init_lock);
/* I/O operation under all of CPU are done so let's free */
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index afac8d07c1bd..85b1f2a9b72d 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -475,7 +475,7 @@ struct search {
unsigned int read_dirty_data:1;
unsigned int cache_missed:1;
- struct hd_struct *part;
+ struct block_device *part;
unsigned long start_time;
struct btree_op op;
@@ -1073,7 +1073,7 @@ struct detached_dev_io_private {
unsigned long start_time;
bio_end_io_t *bi_end_io;
void *bi_private;
- struct hd_struct *part;
+ struct block_device *part;
};
static void detached_dev_end_io(struct bio *bio)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1b2db4d530ea..176adcff56b3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1607,7 +1607,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* (by eliminating DM's splitting and just using bio_split)
*/
part_stat_lock();
- __dm_part_stat_sub(dm_disk(md)->part0->bd_part,
+ __dm_part_stat_sub(dm_disk(md)->part0,
sectors[op_stat_group(bio_op(bio))], ci.sector_count);
part_stat_unlock();
@@ -2242,7 +2242,7 @@ EXPORT_SYMBOL_GPL(dm_put);
static bool md_in_flight_bios(struct mapped_device *md)
{
int cpu;
- struct hd_struct *part = dm_disk(md)->part0->bd_part;
+ struct block_device *part = dm_disk(md)->part0;
long sum = 0;
for_each_possible_cpu(cpu) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3696c2d77a4d..0065736f05b4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -464,7 +464,7 @@ struct md_io {
bio_end_io_t *orig_bi_end_io;
void *orig_bi_private;
unsigned long start_time;
- struct hd_struct *part;
+ struct block_device *part;
};
static void md_end_io(struct bio *bio)
@@ -8441,7 +8441,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_disk;
- curr_events = (int)part_stat_read_accum(disk->part0->bd_part, sectors) -
+ curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index dca34489a1dc..8d90235e4fcc 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -89,12 +89,12 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
if (!ns->bdev)
goto out;
- host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
- data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
- sectors[READ]), 1000);
- host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
- data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
- sectors[WRITE]), 1000);
+ host_reads = part_stat_read(ns->bdev, ios[READ]);
+ data_units_read =
+ DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000);
+ host_writes = part_stat_read(ns->bdev, ios[WRITE]);
+ data_units_written =
+ DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000);
put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
@@ -120,12 +120,12 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
/* we don't have the right data for file backed ns */
if (!ns->bdev)
continue;
- host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
+ host_reads += part_stat_read(ns->bdev, ios[READ]);
data_units_read += DIV_ROUND_UP(
- part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
- host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+ part_stat_read(ns->bdev, sectors[READ]), 1000);
+ host_writes += part_stat_read(ns->bdev, ios[WRITE]);
data_units_written += DIV_ROUND_UP(
- part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
+ part_stat_read(ns->bdev, sectors[WRITE]), 1000);
}
put_unaligned_le64(host_reads, &slog->host_reads[0]);