aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-table.c
diff options
context:
space:
mode:
authorDenis Semakin <d.semakin@omprussia.ru>2018-03-13 13:23:45 +0400
committerMike Snitzer <snitzer@redhat.com>2018-04-03 15:04:21 -0400
commit00716545c894fc464e00612809d9cb836b180c99 (patch)
tree7492fc26be641c58a49b4c81ab707f60a597f629 /drivers/md/dm-table.c
parentdm: backfill abnormal IO support to non-splitting IO submission (diff)
downloadlinux-dev-00716545c894fc464e00612809d9cb836b180c99.tar.xz
linux-dev-00716545c894fc464e00612809d9cb836b180c99.zip
dm: add support for secure erase forwarding
Set QUEUE_FLAG_SECERASE in DM device's queue_flags if a DM table's data devices support secure erase. Also, add support for secure erase to both the linear and striped targets. Signed-off-by: Denis Semakin <d.semakin@omprussia.ru> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to '')
-rw-r--r--drivers/md/dm-table.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 7eb3e2a3c07d..f20fbc96a805 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1846,6 +1846,34 @@ static bool dm_table_supports_discards(struct dm_table *t)
return true;
}
+static int device_not_secure_erase_capable(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && !blk_queue_secure_erase(q);
+}
+
+static bool dm_table_supports_secure_erase(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned int i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (!ti->num_secure_erase_bios)
+ return false;
+
+ if (!ti->type->iterate_devices ||
+ ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
+ return false;
+ }
+
+ return true;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1867,6 +1895,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
} else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ if (dm_table_supports_secure_erase(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
+
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))