aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-04 21:19:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-04 21:19:53 -0800
commite0700ce70921fbe3d1913968c663beb9df2b01a9 (patch)
tree8f8163dbdce74942a6ebe849c43c4fdcf2b52799 /drivers/md
parentMerge tag 'md/4.4' of git://neil.brown.name/md (diff)
parentdm switch: simplify conditional in alloc_region_table() (diff)
downloadlinux-dev-e0700ce70921fbe3d1913968c663beb9df2b01a9.tar.xz
linux-dev-e0700ce70921fbe3d1913968c663beb9df2b01a9.zip
Merge tag 'dm-4.4-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer: "Smaller set of DM changes for this merge. I've based these changes on Jens' for-4.4/reservations branch because the associated DM changes required it. - Revert a dm-multipath change that caused a regression for unprivledged users (e.g. kvm guests) that issued ioctls when a multipath device had no available paths. - Include Christoph's refactoring of DM's ioctl handling and add support for passing through persistent reservations with DM multipath. - All other changes are very simple cleanups" * tag 'dm-4.4-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm switch: simplify conditional in alloc_region_table() dm delay: document that offsets are specified in sectors dm delay: capitalize the start of an delay_ctr() error message dm delay: Use DM_MAPIO macros instead of open-coded equivalents dm linear: remove redundant target name from error messages dm persistent data: eliminate unnecessary return values dm: eliminate unused "bioset" process for each bio-based DM device dm: convert ffs to __ffs dm: drop NULL test before kmem_cache_destroy() and mempool_destroy() dm: add support for passing through persistent reservations dm: refactor ioctl handling Revert "dm mpath: fix stalls when handling invalid ioctls" dm: initialize non-blk-mq queue data before queue is used
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-bufio.c18
-rw-r--r--drivers/md/dm-cache-metadata.c8
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c2
-rw-r--r--drivers/md/dm-cache-policy-mq.c2
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-cache-target.c3
-rw-r--r--drivers/md/dm-crypt.c6
-rw-r--r--drivers/md/dm-delay.c7
-rw-r--r--drivers/md/dm-era-target.c15
-rw-r--r--drivers/md/dm-exception-store.c2
-rw-r--r--drivers/md/dm-flakey.c16
-rw-r--r--drivers/md/dm-io.c3
-rw-r--r--drivers/md/dm-linear.c20
-rw-r--r--drivers/md/dm-log-userspace-base.c3
-rw-r--r--drivers/md/dm-log-writes.c13
-rw-r--r--drivers/md/dm-mpath.c34
-rw-r--r--drivers/md/dm-region-hash.c6
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-switch.c25
-rw-r--r--drivers/md/dm-thin-metadata.c16
-rw-r--r--drivers/md/dm-verity.c15
-rw-r--r--drivers/md/dm.c209
-rw-r--r--drivers/md/persistent-data/dm-array.c4
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c4
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h2
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h2
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c36
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c20
-rw-r--r--drivers/md/persistent-data/dm-btree.c4
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c32
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c4
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.h2
32 files changed, 327 insertions, 210 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 83cc52eaf56d..2dd33085b331 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1598,11 +1598,11 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->bdev = bdev;
c->block_size = block_size;
- c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
- c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
- ffs(block_size) - 1 - PAGE_SHIFT : 0;
- c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
- PAGE_SHIFT - (ffs(block_size) - 1) : 0);
+ c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
+ c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
+ __ffs(block_size) - PAGE_SHIFT : 0;
+ c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
+ PAGE_SHIFT - __ffs(block_size) : 0);
c->aux_size = aux_size;
c->alloc_callback = alloc_callback;
@@ -1861,12 +1861,8 @@ static void __exit dm_bufio_exit(void)
cancel_delayed_work_sync(&dm_bufio_work);
destroy_workqueue(dm_bufio_wq);
- for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
- struct kmem_cache *kc = dm_bufio_caches[i];
-
- if (kc)
- kmem_cache_destroy(kc);
- }
+ for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
+ kmem_cache_destroy(dm_bufio_caches[i]);
for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
kfree(dm_bufio_cache_names[i]);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 0a17d1b91a81..f6543f3a970f 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -260,7 +260,9 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
}
}
- return dm_bm_unlock(b);
+ dm_bm_unlock(b);
+
+ return 0;
}
static void __setup_mapping_info(struct dm_cache_metadata *cmd)
@@ -465,7 +467,9 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
sb_flags = le32_to_cpu(disk_super->flags);
cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
- return dm_bm_unlock(sblock);
+ dm_bm_unlock(sblock);
+
+ return 0;
bad:
dm_bm_unlock(sblock);
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
index 8a096456579b..14aaaf059f06 100644
--- a/drivers/md/dm-cache-policy-cleaner.c
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -83,7 +83,7 @@ static struct list_head *list_pop(struct list_head *q)
static int alloc_hash(struct hash *hash, unsigned elts)
{
hash->nr_buckets = next_power(elts >> 4, 16);
- hash->hash_bits = ffs(hash->nr_buckets) - 1;
+ hash->hash_bits = __ffs(hash->nr_buckets);
hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
return hash->table ? 0 : -ENOMEM;
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index aa1b41ca40f7..ddb26980cd66 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -1410,7 +1410,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
- mq->hash_bits = ffs(mq->nr_buckets) - 1;
+ mq->hash_bits = __ffs(mq->nr_buckets);
mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
if (!mq->table)
goto bad_alloc_table;
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 1ffbeb1b3ea6..28d4586748d0 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -566,7 +566,7 @@ static int h_init(struct hash_table *ht, struct entry_space *es, unsigned nr_ent
ht->es = es;
nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
- ht->hash_bits = ffs(nr_buckets) - 1;
+ ht->hash_bits = __ffs(nr_buckets);
ht->buckets = vmalloc(sizeof(*ht->buckets) * nr_buckets);
if (!ht->buckets)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index dd90d1236f4a..2fd4c8296144 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2309,8 +2309,7 @@ static void destroy(struct cache *cache)
{
unsigned i;
- if (cache->migration_pool)
- mempool_destroy(cache->migration_pool);
+ mempool_destroy(cache->migration_pool);
if (cache->all_io_ds)
dm_deferred_set_destroy(cache->all_io_ds);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4b3b6f8aff0c..3729b394432c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1544,10 +1544,8 @@ static void crypt_dtr(struct dm_target *ti)
if (cc->bs)
bioset_free(cc->bs);
- if (cc->page_pool)
- mempool_destroy(cc->page_pool);
- if (cc->req_pool)
- mempool_destroy(cc->req_pool);
+ mempool_destroy(cc->page_pool);
+ mempool_destroy(cc->req_pool);
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index b34f6e27293d..b4c356a21123 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -122,6 +122,7 @@ static void flush_expired_bios(struct work_struct *work)
* <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
*
* With separate write parameters, the first set is only used for reads.
+ * Offsets are specified in sectors.
* Delays are specified in milliseconds.
*/
static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
@@ -132,7 +133,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int ret;
if (argc != 3 && argc != 6) {
- ti->error = "requires exactly 3 or 6 arguments";
+ ti->error = "Requires exactly 3 or 6 arguments";
return -EINVAL;
}
@@ -237,7 +238,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
unsigned long expires = 0;
if (!delay || !atomic_read(&dc->may_delay))
- return 1;
+ return DM_MAPIO_REMAPPED;
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
@@ -257,7 +258,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
queue_timeout(dc, expires);
- return 0;
+ return DM_MAPIO_SUBMITTED;
}
static void delay_presuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 0119ebfb3d49..665bf3285618 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -343,7 +343,9 @@ static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
}
}
- return dm_bm_unlock(b);
+ dm_bm_unlock(b);
+
+ return 0;
}
/*----------------------------------------------------------------*/
@@ -582,7 +584,9 @@ static int open_metadata(struct era_metadata *md)
md->metadata_snap = le64_to_cpu(disk->metadata_snap);
md->archived_writesets = true;
- return dm_bm_unlock(sblock);
+ dm_bm_unlock(sblock);
+
+ return 0;
bad:
dm_bm_unlock(sblock);
@@ -1046,12 +1050,7 @@ static int metadata_take_snap(struct era_metadata *md)
md->metadata_snap = dm_block_location(clone);
- r = dm_tm_unlock(md->tm, clone);
- if (r) {
- DMERR("%s: couldn't unlock clone", __func__);
- md->metadata_snap = SUPERBLOCK_LOCATION;
- return r;
- }
+ dm_tm_unlock(md->tm, clone);
return 0;
}
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 192bb8beeb6b..3997f34cfebc 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -183,7 +183,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
store->chunk_size = chunk_size;
store->chunk_mask = chunk_size - 1;
- store->chunk_shift = ffs(chunk_size) - 1;
+ store->chunk_shift = __ffs(chunk_size);
return 0;
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 645e8b4f808e..09e2afcafd2d 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -373,20 +373,20 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
}
}
-static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+static int flakey_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct flakey_c *fc = ti->private;
- struct dm_dev *dev = fc->dev;
- int r = 0;
+
+ *bdev = fc->dev->bdev;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
if (fc->start ||
- ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
-
- return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+ ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ return 1;
+ return 0;
}
static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
@@ -405,7 +405,7 @@ static struct target_type flakey_target = {
.map = flakey_map,
.end_io = flakey_end_io,
.status = flakey_status,
- .ioctl = flakey_ioctl,
+ .prepare_ioctl = flakey_prepare_ioctl,
.iterate_devices = flakey_iterate_devices,
};
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 6f8e83b2a6f8..81c5e1a1f363 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -65,8 +65,7 @@ struct dm_io_client *dm_io_client_create(void)
return client;
bad:
- if (client->pool)
- mempool_destroy(client->pool);
+ mempool_destroy(client->pool);
kfree(client);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 436f5c9b6aea..05c35aacb3aa 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -39,20 +39,20 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
lc = kmalloc(sizeof(*lc), GFP_KERNEL);
if (lc == NULL) {
- ti->error = "dm-linear: Cannot allocate linear context";
+ ti->error = "Cannot allocate linear context";
return -ENOMEM;
}
ret = -EINVAL;
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
- ti->error = "dm-linear: Invalid device sector";
+ ti->error = "Invalid device sector";
goto bad;
}
lc->start = tmp;
ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
if (ret) {
- ti->error = "dm-linear: Device lookup failed";
+ ti->error = "Device lookup failed";
goto bad;
}
@@ -116,21 +116,21 @@ static void linear_status(struct dm_target *ti, status_type_t type,
}
}
-static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
- unsigned long arg)
+static int linear_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct linear_c *lc = (struct linear_c *) ti->private;
struct dm_dev *dev = lc->dev;
- int r = 0;
+
+ *bdev = dev->bdev;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
if (lc->start ||
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
-
- return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+ return 1;
+ return 0;
}
static int linear_iterate_devices(struct dm_target *ti,
@@ -149,7 +149,7 @@ static struct target_type linear_target = {
.dtr = linear_dtr,
.map = linear_map,
.status = linear_status,
- .ioctl = linear_ioctl,
+ .prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
};
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 058256d2eeea..53b7b06d0aa8 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -313,8 +313,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
out:
kfree(devices_rdata);
if (r) {
- if (lc->flush_entry_pool)
- mempool_destroy(lc->flush_entry_pool);
+ mempool_destroy(lc->flush_entry_pool);
kfree(lc);
kfree(ctr_str);
} else {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index b2912dbac8bc..624589d51c2c 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -714,20 +714,19 @@ static void log_writes_status(struct dm_target *ti, status_type_t type,
}
}
-static int log_writes_ioctl(struct dm_target *ti, unsigned int cmd,
- unsigned long arg)
+static int log_writes_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct log_writes_c *lc = ti->private;
struct dm_dev *dev = lc->dev;
- int r = 0;
+ *bdev = dev->bdev;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
-
- return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+ return 1;
+ return 0;
}
static int log_writes_iterate_devices(struct dm_target *ti,
@@ -782,7 +781,7 @@ static struct target_type log_writes_target = {
.map = log_writes_map,
.end_io = normal_end_io,
.status = log_writes_status,
- .ioctl = log_writes_ioctl,
+ .prepare_ioctl = log_writes_prepare_ioctl,
.message = log_writes_message,
.iterate_devices = log_writes_iterate_devices,
.io_hints = log_writes_io_hints,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 5a67671a3973..aaa6caa46a9f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1533,18 +1533,14 @@ out:
return r;
}
-static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
- unsigned long arg)
+static int multipath_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct multipath *m = ti->private;
struct pgpath *pgpath;
- struct block_device *bdev;
- fmode_t mode;
unsigned long flags;
int r;
- bdev = NULL;
- mode = 0;
r = 0;
spin_lock_irqsave(&m->lock, flags);
@@ -1555,26 +1551,17 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
pgpath = m->current_pgpath;
if (pgpath) {
- bdev = pgpath->path.dev->bdev;
- mode = pgpath->path.dev->mode;
+ *bdev = pgpath->path.dev->bdev;
+ *mode = pgpath->path.dev->mode;
}
if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
r = -ENOTCONN;
- else if (!bdev)
+ else if (!*bdev)
r = -EIO;
spin_unlock_irqrestore(&m->lock, flags);
- /*
- * Only pass ioctls through if the device sizes match exactly.
- */
- if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
- int err = scsi_verify_blk_ioctl(NULL, cmd);
- if (err)
- r = err;
- }
-
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pg) {
@@ -1587,7 +1574,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
dm_table_run_md_queue_async(m->ti->table);
}
- return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ return 1;
+ return r;
}
static int multipath_iterate_devices(struct dm_target *ti,
@@ -1690,7 +1682,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
@@ -1703,7 +1695,7 @@ static struct target_type multipath_target = {
.resume = multipath_resume,
.status = multipath_status,
.message = multipath_message,
- .ioctl = multipath_ioctl,
+ .prepare_ioctl = multipath_prepare_ioctl,
.iterate_devices = multipath_iterate_devices,
.busy = multipath_busy,
};
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index b929fd5f4984..74cb7b991d41 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -193,7 +193,7 @@ struct dm_region_hash *dm_region_hash_create(
rh->max_recovery = max_recovery;
rh->log = log;
rh->region_size = region_size;
- rh->region_shift = ffs(region_size) - 1;
+ rh->region_shift = __ffs(region_size);
rwlock_init(&rh->hash_lock);
rh->mask = nr_buckets - 1;
rh->nr_buckets = nr_buckets;
@@ -249,9 +249,7 @@ void dm_region_hash_destroy(struct dm_region_hash *rh)
if (rh->log)
dm_dirty_log_destroy(rh->log);
- if (rh->region_pool)
- mempool_destroy(rh->region_pool);
-
+ mempool_destroy(rh->region_pool);
vfree(rh->buckets);
kfree(rh);
}
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 117a05e40090..3164b8bce294 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -322,7 +322,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
bdev) >> 9);
ps->store->chunk_mask = ps->store->chunk_size - 1;
- ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
+ ps->store->chunk_shift = __ffs(ps->store->chunk_size);
chunk_size_supplied = 0;
}
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 50fca469cafd..871c18fe000d 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -99,11 +99,11 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
if (sector_div(nr_regions, sctx->region_size))
nr_regions++;
- sctx->nr_regions = nr_regions;
- if (sctx->nr_regions != nr_regions || sctx->nr_regions >= ULONG_MAX) {
+ if (nr_regions >= ULONG_MAX) {
ti->error = "Region table too large";
return -EINVAL;
}
+ sctx->nr_regions = nr_regions;
nr_slots = nr_regions;
if (sector_div(nr_slots, sctx->region_entries_per_slot))
@@ -511,27 +511,24 @@ static void switch_status(struct dm_target *ti, status_type_t type,
*
* Passthrough all ioctls to the path for sector 0
*/
-static int switch_ioctl(struct dm_target *ti, unsigned cmd,
- unsigned long arg)
+static int switch_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct switch_ctx *sctx = ti->private;
- struct block_device *bdev;
- fmode_t mode;
unsigned path_nr;
- int r = 0;
path_nr = switch_get_path_nr(sctx, 0);
- bdev = sctx->path_list[path_nr].dmdev->bdev;
- mode = sctx->path_list[path_nr].dmdev->mode;
+ *bdev = sctx->path_list[path_nr].dmdev->bdev;
+ *mode = sctx->path_list[path_nr].dmdev->mode;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (ti->len + sctx->path_list[path_nr].start != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
-
- return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ if (ti->len + sctx->path_list[path_nr].start !=
+ i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ return 1;
+ return 0;
}
static int switch_iterate_devices(struct dm_target *ti,
@@ -560,7 +557,7 @@ static struct target_type switch_target = {
.map = switch_map,
.message = switch_message,
.status = switch_status,
- .ioctl = switch_ioctl,
+ .prepare_ioctl = switch_prepare_ioctl,
.iterate_devices = switch_iterate_devices,
};
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 6ba47cfb1443..1fa45695b68a 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -396,7 +396,9 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
}
}
- return dm_bm_unlock(b);
+ dm_bm_unlock(b);
+
+ return 0;
}
static void __setup_btree_details(struct dm_pool_metadata *pmd)
@@ -650,7 +652,9 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
}
__setup_btree_details(pmd);
- return dm_bm_unlock(sblock);
+ dm_bm_unlock(sblock);
+
+ return 0;
bad_cleanup_data_sm:
dm_sm_destroy(pmd->data_sm);
@@ -1297,7 +1301,9 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
dm_sm_dec_block(pmd->metadata_sm, held_root);
- return dm_tm_unlock(pmd->tm, copy);
+ dm_tm_unlock(pmd->tm, copy);
+
+ return 0;
}
int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
@@ -1327,7 +1333,9 @@ static int __get_metadata_snap(struct dm_pool_metadata *pmd,
disk_super = dm_block_data(sblock);
*result = le64_to_cpu(disk_super->held_root);
- return dm_bm_unlock(sblock);
+ dm_bm_unlock(sblock);
+
+ return 0;
}
int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index edc624bccf9a..ccf41886ebcf 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -631,18 +631,17 @@ static void verity_status(struct dm_target *ti, status_type_t type,
}
}
-static int verity_ioctl(struct dm_target *ti, unsigned cmd,
- unsigned long arg)
+static int verity_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct dm_verity *v = ti->private;
- int r = 0;
+
+ *bdev = v->data_dev->bdev;
if (v->data_start ||
ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
-
- return r ? : __blkdev_driver_ioctl(v->data_dev->bdev, v->data_dev->mode,
- cmd, arg);
+ return 1;
+ return 0;
}
static int verity_iterate_devices(struct dm_target *ti,
@@ -965,7 +964,7 @@ static struct target_type verity_target = {
.dtr = verity_dtr,
.map = verity_map,
.status = verity_status,
- .ioctl = verity_ioctl,
+ .prepare_ioctl = verity_prepare_ioctl,
.iterate_devices = verity_iterate_devices,
.io_hints = verity_io_hints,
};
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 485760ebba76..32440ad5f684 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -24,6 +24,7 @@
#include <linux/ktime.h>
#include <linux/elevator.h> /* for rq_end_sector() */
#include <linux/blk-mq.h>
+#include <linux/pr.h>
#include <trace/events/block.h>
@@ -555,18 +556,16 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
-static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+static int dm_get_live_table_for_ioctl(struct mapped_device *md,
+ struct dm_target **tgt, struct block_device **bdev,
+ fmode_t *mode, int *srcu_idx)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
- int srcu_idx;
struct dm_table *map;
- struct dm_target *tgt;
- int r = -ENOTTY;
+ int r;
retry:
- map = dm_get_live_table(md, &srcu_idx);
-
+ r = -ENOTTY;
+ map = dm_get_live_table(md, srcu_idx);
if (!map || !dm_table_get_size(map))
goto out;
@@ -574,8 +573,9 @@ retry:
if (dm_table_get_num_targets(map) != 1)
goto out;
- tgt = dm_table_get_target(map, 0);
- if (!tgt->type->ioctl)
+ *tgt = dm_table_get_target(map, 0);
+
+ if (!(*tgt)->type->prepare_ioctl)
goto out;
if (dm_suspended_md(md)) {
@@ -583,16 +583,46 @@ retry:
goto out;
}
- r = tgt->type->ioctl(tgt, cmd, arg);
+ r = (*tgt)->type->prepare_ioctl(*tgt, bdev, mode);
+ if (r < 0)
+ goto out;
-out:
- dm_put_live_table(md, srcu_idx);
+ return r;
+out:
+ dm_put_live_table(md, *srcu_idx);
if (r == -ENOTCONN) {
msleep(10);
goto retry;
}
+ return r;
+}
+
+static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ struct dm_target *tgt;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+ /*
+ * Target determined this ioctl is being issued against
+ * a logical partition of the parent bdev; so extra
+ * validation is needed.
+ */
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+ if (r)
+ goto out;
+ }
+
+ r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+out:
+ dm_put_live_table(md, srcu_idx);
return r;
}
@@ -1734,8 +1764,6 @@ static void dm_make_request(struct request_queue *q, struct bio *bio)
map = dm_get_live_table(md, &srcu_idx);
- blk_queue_split(q, &bio, q->bio_split);
-
generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
/* if we're suspended, we have to queue this io for later */
@@ -2198,6 +2226,13 @@ static void dm_init_md_queue(struct mapped_device *md)
* This queue is new, so no concurrency on the queue_flags.
*/
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+
+ /*
+ * Initialize data that will only be used by a non-blk-mq DM queue
+ * - must do so here (in alloc_dev callchain) before queue is used
+ */
+ md->queue->queuedata = md;
+ md->queue->backing_dev_info.congested_data = md;
}
static void dm_init_old_md_queue(struct mapped_device *md)
@@ -2208,10 +2243,7 @@ static void dm_init_old_md_queue(struct mapped_device *md)
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
- md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
- md->queue->backing_dev_info.congested_data = md;
-
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
}
@@ -2221,10 +2253,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
destroy_workqueue(md->wq);
if (md->kworker_task)
kthread_stop(md->kworker_task);
- if (md->io_pool)
- mempool_destroy(md->io_pool);
- if (md->rq_pool)
- mempool_destroy(md->rq_pool);
+ mempool_destroy(md->io_pool);
+ mempool_destroy(md->rq_pool);
if (md->bs)
bioset_free(md->bs);
@@ -2759,6 +2789,12 @@ int dm_setup_md_queue(struct mapped_device *md)
case DM_TYPE_BIO_BASED:
dm_init_old_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
+ /*
+ * DM handles splitting bios as needed. Free the bio_split bioset
+ * since it won't be used (saves 1 process per bio-based DM device).
+ */
+ bioset_free(md->queue->bio_split);
+ md->queue->bio_split = NULL;
break;
}
@@ -3505,11 +3541,8 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (!pools)
return;
- if (pools->io_pool)
- mempool_destroy(pools->io_pool);
-
- if (pools->rq_pool)
- mempool_destroy(pools->rq_pool);
+ mempool_destroy(pools->io_pool);
+ mempool_destroy(pools->rq_pool);
if (pools->bs)
bioset_free(pools->bs);
@@ -3517,11 +3550,133 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
kfree(pools);
}
+static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
+ u32 flags)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ const struct pr_ops *ops;
+ struct dm_target *tgt;
+ fmode_t mode;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+
+ ops = bdev->bd_disk->fops->pr_ops;
+ if (ops && ops->pr_register)
+ r = ops->pr_register(bdev, old_key, new_key, flags);
+ else
+ r = -EOPNOTSUPP;
+
+ dm_put_live_table(md, srcu_idx);
+ return r;
+}
+
+static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
+ u32 flags)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ const struct pr_ops *ops;
+ struct dm_target *tgt;
+ fmode_t mode;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+
+ ops = bdev->bd_disk->fops->pr_ops;
+ if (ops && ops->pr_reserve)
+ r = ops->pr_reserve(bdev, key, type, flags);
+ else
+ r = -EOPNOTSUPP;
+
+ dm_put_live_table(md, srcu_idx);
+ return r;
+}
+
+static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ const struct pr_ops *ops;
+ struct dm_target *tgt;
+ fmode_t mode;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+
+ ops = bdev->bd_disk->fops->pr_ops;
+ if (ops && ops->pr_release)
+ r = ops->pr_release(bdev, key, type);
+ else
+ r = -EOPNOTSUPP;
+
+ dm_put_live_table(md, srcu_idx);
+ return r;
+}
+
+static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
+ enum pr_type type, bool abort)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ const struct pr_ops *ops;
+ struct dm_target *tgt;
+ fmode_t mode;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+
+ ops = bdev->bd_disk->fops->pr_ops;
+ if (ops && ops->pr_preempt)
+ r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
+ else
+ r = -EOPNOTSUPP;
+
+ dm_put_live_table(md, srcu_idx);
+ return r;
+}
+
+static int dm_pr_clear(struct block_device *bdev, u64 key)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ const struct pr_ops *ops;
+ struct dm_target *tgt;
+ fmode_t mode;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+
+ ops = bdev->bd_disk->fops->pr_ops;
+ if (ops && ops->pr_clear)
+ r = ops->pr_clear(bdev, key);
+ else
+ r = -EOPNOTSUPP;
+
+ dm_put_live_table(md, srcu_idx);
+ return r;
+}
+
+static const struct pr_ops dm_pr_ops = {
+ .pr_register = dm_pr_register,
+ .pr_reserve = dm_pr_reserve,
+ .pr_release = dm_pr_release,
+ .pr_preempt = dm_pr_preempt,
+ .pr_clear = dm_pr_clear,
+};
+
static const struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
+ .pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index e64b61ad0ef3..431a03067d64 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -233,9 +233,9 @@ static int get_ablock(struct dm_array_info *info, dm_block_t b,
/*
* Unlocks an array block.
*/
-static int unlock_ablock(struct dm_array_info *info, struct dm_block *block)
+static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
{
- return dm_tm_unlock(info->btree_info.tm, block);
+ dm_tm_unlock(info->btree_info.tm, block);
}
/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 88dbe7b97c2c..f2393ba838eb 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -578,7 +578,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
}
EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
-int dm_bm_unlock(struct dm_block *b)
+void dm_bm_unlock(struct dm_block *b)
{
struct buffer_aux *aux;
aux = dm_bufio_get_aux_data(to_buffer(b));
@@ -590,8 +590,6 @@ int dm_bm_unlock(struct dm_block *b)
bl_up_read(&aux->lock);
dm_bufio_release(to_buffer(b));
-
- return 0;
}
EXPORT_SYMBOL_GPL(dm_bm_unlock);
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index 84330f59886d..3627d1b7667a 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -94,7 +94,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, dm_block_t b,
struct dm_block_validator *v,
struct dm_block **result);
-int dm_bm_unlock(struct dm_block *b);
+void dm_bm_unlock(struct dm_block *b);
/*
* It's a common idiom to have a superblock that should be committed last.
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index 8731b6ea026b..a240990a7f33 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -52,7 +52,7 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt);
int new_block(struct dm_btree_info *info, struct dm_block **result);
-int unlock_block(struct dm_btree_info *info, struct dm_block *b);
+void unlock_block(struct dm_btree_info *info, struct dm_block *b);
/*
* Spines keep track of the rolling locks. There are 2 variants, read-only
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 2e4c4cb79e4d..21ea537bd55e 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -165,9 +165,9 @@ static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt
return 0;
}
-static int exit_child(struct dm_btree_info *info, struct child *c)
+static void exit_child(struct dm_btree_info *info, struct child *c)
{
- return dm_tm_unlock(info->tm, c->block);
+ dm_tm_unlock(info->tm, c->block);
}
static void shift(struct btree_node *left, struct btree_node *right, int count)
@@ -249,13 +249,10 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
__rebalance2(info, parent, &left, &right);
- r = exit_child(info, &left);
- if (r) {
- exit_child(info, &right);
- return r;
- }
+ exit_child(info, &left);
+ exit_child(info, &right);
- return exit_child(info, &right);
+ return 0;
}
/*
@@ -394,22 +391,9 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
__rebalance3(info, parent, &left, &center, &right);
- r = exit_child(info, &left);
- if (r) {
- exit_child(info, &center);
- exit_child(info, &right);
- return r;
- }
-
- r = exit_child(info, &center);
- if (r) {
- exit_child(info, &right);
- return r;
- }
-
- r = exit_child(info, &right);
- if (r)
- return r;
+ exit_child(info, &left);
+ exit_child(info, &center);
+ exit_child(info, &right);
return 0;
}
@@ -433,9 +417,7 @@ static int rebalance_children(struct shadow_spine *s,
memcpy(n, dm_block_data(child),
dm_bm_block_size(dm_tm_get_bm(info->tm)));
- r = dm_tm_unlock(info->tm, child);
- if (r)
- return r;
+ dm_tm_unlock(info->tm, child);
dm_tm_dec(info->tm, dm_block_location(child));
return 0;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 0dee514ba4c5..b27b8091a1ca 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -117,9 +117,9 @@ int new_block(struct dm_btree_info *info, struct dm_block **result)
return dm_tm_new_block(info->tm, &btree_node_validator, result);
}
-int unlock_block(struct dm_btree_info *info, struct dm_block *b)
+void unlock_block(struct dm_btree_info *info, struct dm_block *b)
{
- return dm_tm_unlock(info->tm, b);
+ dm_tm_unlock(info->tm, b);
}
/*----------------------------------------------------------------*/
@@ -137,9 +137,7 @@ int exit_ro_spine(struct ro_spine *s)
int r = 0, i;
for (i = 0; i < s->count; i++) {
- int r2 = unlock_block(s->info, s->nodes[i]);
- if (r2 < 0)
- r = r2;
+ unlock_block(s->info, s->nodes[i]);
}
return r;
@@ -150,9 +148,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
int r;
if (s->count == 2) {
- r = unlock_block(s->info, s->nodes[0]);
- if (r < 0)
- return r;
+ unlock_block(s->info, s->nodes[0]);
s->nodes[0] = s->nodes[1];
s->count--;
}
@@ -194,9 +190,7 @@ int exit_shadow_spine(struct shadow_spine *s)
int r = 0, i;
for (i = 0; i < s->count; i++) {
- int r2 = unlock_block(s->info, s->nodes[i]);
- if (r2 < 0)
- r = r2;
+ unlock_block(s->info, s->nodes[i]);
}
return r;
@@ -208,9 +202,7 @@ int shadow_step(struct shadow_spine *s, dm_block_t b,
int r;
if (s->count == 2) {
- r = unlock_block(s->info, s->nodes[0]);
- if (r < 0)
- return r;
+ unlock_block(s->info, s->nodes[0]);
s->nodes[0] = s->nodes[1];
s->count--;
}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 0e09aef43998..c573402033b2 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -141,7 +141,9 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
n->header.value_size = cpu_to_le32(info->value_type.size);
*root = dm_block_location(b);
- return unlock_block(info, b);
+ unlock_block(info, b);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(dm_btree_empty);
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index aacbe70c2c2e..306d2e4502c4 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -259,9 +259,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
idx.blocknr = cpu_to_le64(dm_block_location(b));
- r = dm_tm_unlock(ll->tm, b);
- if (r < 0)
- return r;
+ dm_tm_unlock(ll->tm, b);
idx.nr_free = cpu_to_le32(ll->entries_per_block);
idx.none_free_before = 0;
@@ -293,7 +291,9 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
*result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
- return dm_tm_unlock(ll->tm, blk);
+ dm_tm_unlock(ll->tm, blk);
+
+ return 0;
}
static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
@@ -373,9 +373,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
return r;
}
- r = dm_tm_unlock(ll->tm, blk);
- if (r < 0)
- return r;
+ dm_tm_unlock(ll->tm, blk);
*result = i * ll->entries_per_block + (dm_block_t) position;
return 0;
@@ -429,9 +427,7 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
if (ref_count <= 2) {
sm_set_bitmap(bm_le, bit, ref_count);
- r = dm_tm_unlock(ll->tm, nb);
- if (r < 0)
- return r;
+ dm_tm_unlock(ll->tm, nb);
if (old > 2) {
r = dm_btree_remove(&ll->ref_count_info,
@@ -445,9 +441,7 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
__le32 le_rc = cpu_to_le32(ref_count);
sm_set_bitmap(bm_le, bit, 3);
- r = dm_tm_unlock(ll->tm, nb);
- if (r < 0)
- return r;
+ dm_tm_unlock(ll->tm, nb);
__dm_bless_for_disk(&le_rc);
r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
@@ -556,7 +550,9 @@ static int metadata_ll_init_index(struct ll_disk *ll)
memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
ll->bitmap_root = dm_block_location(b);
- return dm_tm_unlock(ll->tm, b);
+ dm_tm_unlock(ll->tm, b);
+
+ return 0;
}
static int metadata_ll_open(struct ll_disk *ll)
@@ -570,7 +566,9 @@ static int metadata_ll_open(struct ll_disk *ll)
return r;
memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
- return dm_tm_unlock(ll->tm, block);
+ dm_tm_unlock(ll->tm, block);
+
+ return 0;
}
static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
@@ -590,7 +588,9 @@ static int metadata_ll_commit(struct ll_disk *ll)
memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
ll->bitmap_root = dm_block_location(b);
- return dm_tm_unlock(ll->tm, b);
+ dm_tm_unlock(ll->tm, b);
+
+ return 0;
}
int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 9cb797d800cf..abe2c5dd0993 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -342,9 +342,9 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
}
EXPORT_SYMBOL_GPL(dm_tm_read_lock);
-int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
+void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
{
- return dm_bm_unlock(b);
+ dm_bm_unlock(b);
}
EXPORT_SYMBOL_GPL(dm_tm_unlock);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h
index 2e0d4d66fb1b..f3a18be68f30 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.h
+++ b/drivers/md/persistent-data/dm-transaction-manager.h
@@ -94,7 +94,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
struct dm_block_validator *v,
struct dm_block **result);
-int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b);
+void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b);
/*
* Functions for altering the reference count of a block directly.