diff options
Diffstat (limited to 'drivers/nvdimm/namespace_devs.c')
-rw-r--r-- | drivers/nvdimm/namespace_devs.c | 787 |
1 files changed, 179 insertions, 608 deletions
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 032dc61725ff..c60ec0b373c5 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -10,6 +10,7 @@ #include <linux/nd.h> #include "nd-core.h" #include "pmem.h" +#include "pfn.h" #include "nd.h" static void namespace_io_release(struct device *dev) @@ -31,35 +32,17 @@ static void namespace_pmem_release(struct device *dev) kfree(nspm); } -static void namespace_blk_release(struct device *dev) -{ - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - struct nd_region *nd_region = to_nd_region(dev->parent); - - if (nsblk->id >= 0) - ida_simple_remove(&nd_region->ns_ida, nsblk->id); - kfree(nsblk->alt_name); - kfree(nsblk->uuid); - kfree(nsblk->res); - kfree(nsblk); -} - static bool is_namespace_pmem(const struct device *dev); -static bool is_namespace_blk(const struct device *dev); static bool is_namespace_io(const struct device *dev); static int is_uuid_busy(struct device *dev, void *data) { - u8 *uuid1 = data, *uuid2 = NULL; + uuid_t *uuid1 = data, *uuid2 = NULL; if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); uuid2 = nspm->uuid; - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - uuid2 = nsblk->uuid; } else if (is_nd_btt(dev)) { struct nd_btt *nd_btt = to_nd_btt(dev); @@ -70,7 +53,7 @@ static int is_uuid_busy(struct device *dev, void *data) uuid2 = nd_pfn->uuid; } - if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0) + if (uuid2 && uuid_equal(uuid1, uuid2)) return -EBUSY; return 0; @@ -88,7 +71,7 @@ static int is_namespace_uuid_busy(struct device *dev, void *data) * @dev: any device on a nvdimm_bus * @uuid: uuid to check */ -bool nd_is_uuid_unique(struct device *dev, u8 *uuid) +bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid) { struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); @@ -177,12 +160,6 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, else sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : ""); - } else if (is_namespace_blk(&ndns->dev)) { - struct nd_namespace_blk *nsblk; - - nsblk = to_nd_namespace_blk(&ndns->dev); - sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, - suffix ? suffix : ""); } else { return NULL; } @@ -191,23 +168,14 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, } EXPORT_SYMBOL(nvdimm_namespace_disk_name); -const u8 *nd_dev_to_uuid(struct device *dev) +const uuid_t *nd_dev_to_uuid(struct device *dev) { - static const u8 null_uuid[16]; - - if (!dev) - return null_uuid; - - if (is_namespace_pmem(dev)) { + if (dev && is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); return nspm->uuid; - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - return nsblk->uuid; - } else - return null_uuid; + } + return &uuid_null; } EXPORT_SYMBOL(nd_dev_to_uuid); @@ -230,10 +198,6 @@ static ssize_t __alt_name_store(struct device *dev, const char *buf, struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); ns_altname = &nspm->alt_name; - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - ns_altname = &nsblk->alt_name; } else return -ENXIO; @@ -265,83 +229,6 @@ out: return rc; } -static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk) -{ - struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent); - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct nd_label_id label_id; - resource_size_t size = 0; - struct resource *res; - - if (!nsblk->uuid) - return 0; - nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); - for_each_dpa_resource(ndd, res) - if (strcmp(res->name, label_id.id) == 0) - size += resource_size(res); - return size; -} - -static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk) -{ - struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent); - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct nd_label_id label_id; - struct resource *res; - int count, i; - - if (!nsblk->uuid || !nsblk->lbasize || !ndd) - return false; - - count = 0; - nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); - for_each_dpa_resource(ndd, res) { - if (strcmp(res->name, label_id.id) != 0) - continue; - /* - * Resources with unacknowledged adjustments indicate a - * failure to update labels - */ - if (res->flags & DPA_RESOURCE_ADJUSTED) - return false; - count++; - } - - /* These values match after a successful label update */ - if (count != nsblk->num_resources) - return false; - - for (i = 0; i < nsblk->num_resources; i++) { - struct resource *found = NULL; - - for_each_dpa_resource(ndd, res) - if (res == nsblk->res[i]) { - found = res; - break; - } - /* stale resource */ - if (!found) - return false; - } - - return true; -} - -resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk) -{ - resource_size_t size; - - nvdimm_bus_lock(&nsblk->common.dev); - size = __nd_namespace_blk_validate(nsblk); - nvdimm_bus_unlock(&nsblk->common.dev); - - return size; -} -EXPORT_SYMBOL(nd_namespace_blk_validate); - - static int nd_namespace_label_update(struct nd_region *nd_region, struct device *dev) { @@ -364,16 +251,6 @@ static int nd_namespace_label_update(struct nd_region *nd_region, return 0; return nd_pmem_namespace_label_update(nd_region, nspm, size); - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - resource_size_t size = nd_namespace_blk_size(nsblk); - - if (size == 0 && nsblk->uuid) - /* delete allocation */; - else if (!nsblk->uuid || !nsblk->lbasize) - return 0; - - return nd_blk_namespace_label_update(nd_region, nsblk, size); } else return -ENXIO; } @@ -384,7 +261,7 @@ static ssize_t alt_name_store(struct device *dev, struct nd_region *nd_region = to_nd_region(dev->parent); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __alt_name_store(dev, buf, len); @@ -392,7 +269,7 @@ static ssize_t alt_name_store(struct device *dev, rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc < 0 ? rc : len; } @@ -406,10 +283,6 @@ static ssize_t alt_name_show(struct device *dev, struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); ns_altname = nspm->alt_name; - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - ns_altname = nsblk->alt_name; } else return -ENXIO; @@ -421,13 +294,11 @@ static int scan_free(struct nd_region *nd_region, struct nd_mapping *nd_mapping, struct nd_label_id *label_id, resource_size_t n) { - bool is_blk = strncmp(label_id->id, "blk", 3) == 0; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); int rc = 0; while (n) { struct resource *res, *last; - resource_size_t new_start; last = NULL; for_each_dpa_resource(ndd, res) @@ -445,16 +316,7 @@ static int scan_free(struct nd_region *nd_region, continue; } - /* - * Keep BLK allocations relegated to high DPA as much as - * possible - */ - if (is_blk) - new_start = res->start + n; - else - new_start = res->start; - - rc = adjust_resource(res, new_start, resource_size(res) - n); + rc = adjust_resource(res, res->start, resource_size(res) - n); if (rc == 0) res->flags |= DPA_RESOURCE_ADJUSTED; nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc); @@ -496,20 +358,12 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id, struct nd_region *nd_region, struct nd_mapping *nd_mapping, resource_size_t n) { - bool is_blk = strncmp(label_id->id, "blk", 3) == 0; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - resource_size_t first_dpa; struct resource *res; int rc = 0; - /* allocate blk from highest dpa first */ - if (is_blk) - first_dpa = nd_mapping->start + nd_mapping->size - n; - else - first_dpa = nd_mapping->start; - /* first resource allocation for this label-id or dimm */ - res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n); + res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n); if (!res) rc = -EBUSY; @@ -531,7 +385,7 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id, * * BLK-space is valid as long as it does not precede a PMEM * allocation in a given region. PMEM-space must be contiguous - * and adjacent to an existing existing allocation (if one + * and adjacent to an existing allocation (if one * exists). If reserving PMEM any space is valid. */ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, @@ -540,7 +394,11 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, resource_size_t n, struct resource *valid) { bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; + unsigned long align; + + align = nd_region->align / nd_region->ndr_mappings; + valid->start = ALIGN(valid->start, align); + valid->end = ALIGN_DOWN(valid->end + 1, align) - 1; if (valid->start >= valid->end) goto invalid; @@ -548,21 +406,6 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, if (is_reserve) return; - if (!is_pmem) { - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - struct nvdimm_bus *nvdimm_bus; - struct blk_alloc_info info = { - .nd_mapping = nd_mapping, - .available = nd_mapping->size, - .res = valid, - }; - - WARN_ON(!is_nd_blk(&nd_region->dev)); - nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); - device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); - return; - } - /* allocation needs to be contiguous, so this is all or nothing */ if (resource_size(valid) < n) goto invalid; @@ -590,7 +433,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, resource_size_t n) { resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct resource *res, *exist = NULL, valid; const resource_size_t to_allocate = n; @@ -688,10 +530,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, } if (strcmp(action, "allocate") == 0) { - /* BLK allocate bottom up */ - if (!is_pmem) - valid.start += available - allocate; - new_res = nvdimm_allocate_dpa(ndd, label_id, valid.start, allocate); if (!new_res) @@ -727,12 +565,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, return 0; } - /* - * If we allocated nothing in the BLK case it may be because we are in - * an initial "pmem-reserve pass". Only do an initial BLK allocation - * when none of the DPA space is reserved. - */ - if ((is_pmem || !ndd->dpa.child) && n == to_allocate) + if (n == to_allocate) return init_dpa_allocation(label_id, nd_region, nd_mapping, n); return n; } @@ -791,7 +624,7 @@ int __reserve_free_pmem(struct device *dev, void *data) if (nd_mapping->nvdimm != nvdimm) continue; - n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem); + n = nd_pmem_available_dpa(nd_region, nd_mapping); if (n == 0) return 0; rem = scan_allocate(nd_region, nd_mapping, &label_id, n); @@ -816,19 +649,6 @@ void release_free_pmem(struct nvdimm_bus *nvdimm_bus, nvdimm_free_dpa(ndd, res); } -static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus, - struct nd_mapping *nd_mapping) -{ - struct nvdimm *nvdimm = nd_mapping->nvdimm; - int rc; - - rc = device_for_each_child(&nvdimm_bus->dev, nvdimm, - __reserve_free_pmem); - if (rc) - release_free_pmem(nvdimm_bus, nd_mapping); - return rc; -} - /** * grow_dpa_allocation - for each dimm allocate n bytes for @label_id * @nd_region: the set of dimms to allocate @n more bytes from @@ -845,37 +665,14 @@ static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus, static int grow_dpa_allocation(struct nd_region *nd_region, struct nd_label_id *label_id, resource_size_t n) { - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; int i; for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; resource_size_t rem = n; - int rc, j; - - /* - * In the BLK case try once with all unallocated PMEM - * reserved, and once without - */ - for (j = is_pmem; j < 2; j++) { - bool blk_only = j == 0; - - if (blk_only) { - rc = reserve_free_pmem(nvdimm_bus, nd_mapping); - if (rc) - return rc; - } - rem = scan_allocate(nd_region, nd_mapping, - label_id, rem); - if (blk_only) - release_free_pmem(nvdimm_bus, nd_mapping); - - /* try again and allow encroachments into PMEM */ - if (rem == 0) - break; - } + int rc; + rem = scan_allocate(nd_region, nd_mapping, label_id, rem); dev_WARN_ONCE(&nd_region->dev, rem, "allocation underrun: %#llx of %#llx bytes\n", (unsigned long long) n - rem, @@ -932,7 +729,8 @@ static void nd_namespace_pmem_set_resource(struct nd_region *nd_region, res->end = res->start + size - 1; } -static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where) +static bool uuid_not_set(const uuid_t *uuid, struct device *dev, + const char *where) { if (!uuid) { dev_dbg(dev, "%s: uuid not set\n", where); @@ -951,7 +749,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) struct nd_label_id label_id; u32 flags = 0, remainder; int rc, i, id = -1; - u8 *uuid = NULL; + uuid_t *uuid = NULL; if (dev->driver || ndns->claim) return -EBUSY; @@ -961,12 +759,6 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) uuid = nspm->uuid; id = nspm->id; - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - uuid = nsblk->uuid; - flags = NSLABEL_FLAG_LOCAL; - id = nsblk->id; } /* @@ -980,10 +772,10 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) return -ENXIO; } - div_u64_rem(val, PAGE_SIZE * nd_region->ndr_mappings, &remainder); + div_u64_rem(val, nd_region->align, &remainder); if (remainder) { dev_dbg(dev, "%llu is not %ldK aligned\n", val, - (PAGE_SIZE * nd_region->ndr_mappings) / SZ_1K); + nd_region->align / SZ_1K); return -EINVAL; } @@ -993,8 +785,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) ndd = to_ndd(nd_mapping); /* - * All dimms in an interleave set, or the base dimm for a blk - * region, need to be enabled for the size to be changed. + * All dimms in an interleave set, need to be enabled + * for the size to be changed. */ if (!ndd) return -ENXIO; @@ -1044,40 +836,31 @@ static ssize_t size_store(struct device *dev, { struct nd_region *nd_region = to_nd_region(dev->parent); unsigned long long val; - u8 **uuid = NULL; int rc; rc = kstrtoull(buf, 0, &val); if (rc) return rc; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __size_store(dev, val); if (rc >= 0) rc = nd_namespace_label_update(nd_region, dev); - if (is_namespace_pmem(dev)) { + /* setting size zero == 'delete namespace' */ + if (rc == 0 && val == 0 && is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); - uuid = &nspm->uuid; - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - uuid = &nsblk->uuid; - } - - if (rc == 0 && val == 0 && uuid) { - /* setting size zero == 'delete namespace' */ - kfree(*uuid); - *uuid = NULL; + kfree(nspm->uuid); + nspm->uuid = NULL; } dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc < 0 ? rc : len; } @@ -1090,8 +873,6 @@ resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns) struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); return resource_size(&nspm->nsio.res); - } else if (is_namespace_blk(dev)) { - return nd_namespace_blk_size(to_nd_namespace_blk(dev)); } else if (is_namespace_io(dev)) { struct nd_namespace_io *nsio = to_nd_namespace_io(dev); @@ -1141,24 +922,20 @@ static ssize_t size_show(struct device *dev, } static DEVICE_ATTR(size, 0444, size_show, size_store); -static u8 *namespace_to_uuid(struct device *dev) +static uuid_t *namespace_to_uuid(struct device *dev) { if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); return nspm->uuid; - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - return nsblk->uuid; - } else - return ERR_PTR(-ENXIO); + } + return ERR_PTR(-ENXIO); } -static ssize_t uuid_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, + char *buf) { - u8 *uuid = namespace_to_uuid(dev); + uuid_t *uuid = namespace_to_uuid(dev); if (IS_ERR(uuid)) return PTR_ERR(uuid); @@ -1175,9 +952,9 @@ static ssize_t uuid_show(struct device *dev, * @old_uuid: reference to the uuid storage location in the namespace object */ static int namespace_update_uuid(struct nd_region *nd_region, - struct device *dev, u8 *new_uuid, u8 **old_uuid) + struct device *dev, uuid_t *new_uuid, + uuid_t **old_uuid) { - u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0; struct nd_label_id old_label_id; struct nd_label_id new_label_id; int i; @@ -1208,8 +985,8 @@ static int namespace_update_uuid(struct nd_region *nd_region, return -EBUSY; } - nd_label_gen_id(&old_label_id, *old_uuid, flags); - nd_label_gen_id(&new_label_id, new_uuid, flags); + nd_label_gen_id(&old_label_id, *old_uuid, 0); + nd_label_gen_id(&new_label_id, new_uuid, 0); for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); @@ -1225,11 +1002,13 @@ static int namespace_update_uuid(struct nd_region *nd_region, list_for_each_entry(label_ent, &nd_mapping->labels, list) { struct nd_namespace_label *nd_label = label_ent->label; struct nd_label_id label_id; + uuid_t uuid; if (!nd_label) continue; - nd_label_gen_id(&label_id, nd_label->uuid, - __le32_to_cpu(nd_label->flags)); + nsl_get_uuid(ndd, nd_label, &uuid); + nd_label_gen_id(&label_id, &uuid, + nsl_get_flags(ndd, nd_label)); if (strcmp(old_label_id.id, label_id.id) == 0) set_bit(ND_LABEL_REAP, &label_ent->flags); } @@ -1245,22 +1024,18 @@ static ssize_t uuid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct nd_region *nd_region = to_nd_region(dev->parent); - u8 *uuid = NULL; + uuid_t *uuid = NULL; + uuid_t **ns_uuid; ssize_t rc = 0; - u8 **ns_uuid; if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); ns_uuid = &nspm->uuid; - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - ns_uuid = &nsblk->uuid; } else return -ENXIO; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); if (to_ndns(dev)->claim) @@ -1276,7 +1051,7 @@ static ssize_t uuid_store(struct device *dev, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc < 0 ? rc : len; } @@ -1303,23 +1078,13 @@ static ssize_t resource_show(struct device *dev, return -ENXIO; return sprintf(buf, "%#llx\n", (unsigned long long) res->start); } -static DEVICE_ATTR(resource, 0400, resource_show, NULL); - -static const unsigned long blk_lbasize_supported[] = { 512, 520, 528, - 4096, 4104, 4160, 4224, 0 }; +static DEVICE_ATTR_ADMIN_RO(resource); static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 }; static ssize_t sector_size_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - return nd_size_select_show(nsblk->lbasize, - blk_lbasize_supported, buf); - } - if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); @@ -1337,12 +1102,7 @@ static ssize_t sector_size_store(struct device *dev, unsigned long *lbasize; ssize_t rc = 0; - if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - lbasize = &nsblk->lbasize; - supported = blk_lbasize_supported; - } else if (is_namespace_pmem(dev)) { + if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); lbasize = &nspm->lbasize; @@ -1350,7 +1110,7 @@ static ssize_t sector_size_store(struct device *dev, } else return -ENXIO; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); if (to_ndns(dev)->claim) rc = -EBUSY; @@ -1361,7 +1121,7 @@ static ssize_t sector_size_store(struct device *dev, dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc ? rc : len; } @@ -1372,8 +1132,8 @@ static ssize_t dpa_extents_show(struct device *dev, { struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_label_id label_id; + uuid_t *uuid = NULL; int count = 0, i; - u8 *uuid = NULL; u32 flags = 0; nvdimm_bus_lock(dev); @@ -1382,11 +1142,6 @@ static ssize_t dpa_extents_show(struct device *dev, uuid = nspm->uuid; flags = 0; - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - uuid = nsblk->uuid; - flags = NSLABEL_FLAG_LOCAL; } if (!uuid) @@ -1476,9 +1231,9 @@ static ssize_t holder_show(struct device *dev, struct nd_namespace_common *ndns = to_ndns(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : ""); - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -1515,7 +1270,7 @@ static ssize_t holder_class_store(struct device *dev, struct nd_region *nd_region = to_nd_region(dev->parent); int rc; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __holder_class_store(dev, buf); @@ -1523,7 +1278,7 @@ static ssize_t holder_class_store(struct device *dev, rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc < 0 ? rc : len; } @@ -1534,7 +1289,7 @@ static ssize_t holder_class_show(struct device *dev, struct nd_namespace_common *ndns = to_ndns(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); if (ndns->claim_class == NVDIMM_CCLASS_NONE) rc = sprintf(buf, "\n"); else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) || @@ -1546,7 +1301,7 @@ static ssize_t holder_class_show(struct device *dev, rc = sprintf(buf, "dax\n"); else rc = sprintf(buf, "<unknown>\n"); - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -1560,7 +1315,7 @@ static ssize_t mode_show(struct device *dev, char *mode; ssize_t rc; - nd_device_lock(dev); + device_lock(dev); claim = ndns->claim; if (claim && is_nd_btt(claim)) mode = "safe"; @@ -1573,7 +1328,7 @@ static ssize_t mode_show(struct device *dev, else mode = "raw"; rc = sprintf(buf, "%s\n", mode); - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -1619,21 +1374,18 @@ static umode_t namespace_visible(struct kobject *kobj, { struct device *dev = container_of(kobj, struct device, kobj); - if (a == &dev_attr_resource.attr && is_namespace_blk(dev)) - return 0; - - if (is_namespace_pmem(dev) || is_namespace_blk(dev)) { + if (is_namespace_pmem(dev)) { if (a == &dev_attr_size.attr) return 0644; return a->mode; } - if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr - || a == &dev_attr_holder.attr - || a == &dev_attr_holder_class.attr - || a == &dev_attr_force_raw.attr - || a == &dev_attr_mode.attr) + /* base is_namespace_io() attributes */ + if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr || + a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr || + a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr || + a == &dev_attr_resource.attr) return a->mode; return 0; @@ -1663,22 +1415,11 @@ static const struct device_type namespace_pmem_device_type = { .groups = nd_namespace_attribute_groups, }; -static const struct device_type namespace_blk_device_type = { - .name = "nd_namespace_blk", - .release = namespace_blk_release, - .groups = nd_namespace_attribute_groups, -}; - static bool is_namespace_pmem(const struct device *dev) { return dev ? dev->type == &namespace_pmem_device_type : false; } -static bool is_namespace_blk(const struct device *dev) -{ - return dev ? dev->type == &namespace_blk_device_type : false; -} - static bool is_namespace_io(const struct device *dev) { return dev ? dev->type == &namespace_io_device_type : false; @@ -1707,8 +1448,8 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) * Flush any in-progess probes / removals in the driver * for the raw personality of this namespace. */ - nd_device_lock(&ndns->dev); - nd_device_unlock(&ndns->dev); + device_lock(&ndns->dev); + device_unlock(&ndns->dev); if (ndns->dev.driver) { dev_dbg(&ndns->dev, "is active, can't bind %s\n", dev_name(dev)); @@ -1739,24 +1480,28 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) return ERR_PTR(-ENODEV); } + /* + * Note, alignment validation for fsdax and devdax mode + * namespaces happens in nd_pfn_validate() where infoblock + * padding parameters can be applied. + */ + if (pmem_should_map_pages(dev)) { + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + struct resource *res = &nsio->res; + + if (!IS_ALIGNED(res->start | (res->end + 1), + memremap_compat_align())) { + dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res); + return ERR_PTR(-EOPNOTSUPP); + } + } + if (is_namespace_pmem(&ndns->dev)) { struct nd_namespace_pmem *nspm; nspm = to_nd_namespace_pmem(&ndns->dev); if (uuid_not_set(nspm->uuid, &ndns->dev, __func__)) return ERR_PTR(-ENODEV); - } else if (is_namespace_blk(&ndns->dev)) { - struct nd_namespace_blk *nsblk; - - nsblk = to_nd_namespace_blk(&ndns->dev); - if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__)) - return ERR_PTR(-ENODEV); - if (!nsblk->lbasize) { - dev_dbg(&ndns->dev, "sector size not set\n"); - return ERR_PTR(-ENODEV); - } - if (!nd_namespace_blk_validate(nsblk)) - return ERR_PTR(-ENODEV); } return ndns; @@ -1766,16 +1511,12 @@ EXPORT_SYMBOL(nvdimm_namespace_common_probe); int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns, resource_size_t size) { - if (is_namespace_blk(&ndns->dev)) - return 0; return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size); } EXPORT_SYMBOL_GPL(devm_namespace_enable); void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns) { - if (is_namespace_blk(&ndns->dev)) - return; devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev)); } EXPORT_SYMBOL_GPL(devm_namespace_disable); @@ -1809,8 +1550,8 @@ static struct device **create_namespace_io(struct nd_region *nd_region) return devs; } -static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, - u64 cookie, u16 pos) +static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid, + u64 cookie, u16 pos) { struct nd_namespace_label *found = NULL; int i; @@ -1824,36 +1565,28 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, list_for_each_entry(label_ent, &nd_mapping->labels, list) { struct nd_namespace_label *nd_label = label_ent->label; - u16 position, nlabel; - u64 isetcookie; + u16 position; if (!nd_label) continue; - isetcookie = __le64_to_cpu(nd_label->isetcookie); - position = __le16_to_cpu(nd_label->position); - nlabel = __le16_to_cpu(nd_label->nlabel); + position = nsl_get_position(ndd, nd_label); - if (isetcookie != cookie) + if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) continue; - if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0) + if (!nsl_uuid_equal(ndd, nd_label, uuid)) continue; - if (namespace_label_has(ndd, type_guid) - && !guid_equal(&nd_set->type_guid, - &nd_label->type_guid)) { - dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", - &nd_set->type_guid, - &nd_label->type_guid); + if (!nsl_validate_type_guid(ndd, nd_label, + &nd_set->type_guid)) continue; - } if (found_uuid) { dev_dbg(ndd->dev, "duplicate entry for uuid\n"); return false; } found_uuid = true; - if (nlabel != nd_region->ndr_mappings) + if (!nsl_validate_nlabel(nd_region, ndd, nd_label)) continue; if (position != pos) continue; @@ -1866,7 +1599,7 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, return found != NULL; } -static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) +static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id) { int i; @@ -1885,7 +1618,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) nd_label = label_ent->label; if (!nd_label) continue; - if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0) + if (nsl_uuid_equal(ndd, nd_label, pmem_id)) break; nd_label = NULL; } @@ -1901,14 +1634,15 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) */ hw_start = nd_mapping->start; hw_end = hw_start + nd_mapping->size; - pmem_start = __le64_to_cpu(nd_label->dpa); - pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize); + pmem_start = nsl_get_dpa(ndd, nd_label); + pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label); if (pmem_start >= hw_start && pmem_start < hw_end && pmem_end <= hw_end && pmem_end > hw_start) /* pass */; else { dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n", - dev_name(ndd->dev), nd_label->uuid); + dev_name(ndd->dev), + nsl_uuid_raw(ndd, nd_label)); return -EINVAL; } @@ -1925,17 +1659,20 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) * @nd_label: target pmem namespace label to evaluate */ static struct device *create_namespace_pmem(struct nd_region *nd_region, - struct nd_namespace_index *nsindex, - struct nd_namespace_label *nd_label) + struct nd_mapping *nd_mapping, + struct nd_namespace_label *nd_label) { + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); + struct nd_namespace_index *nsindex = + to_namespace_index(ndd, ndd->ns_current); u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex); u64 altcookie = nd_region_interleave_set_altcookie(nd_region); struct nd_label_ent *label_ent; struct nd_namespace_pmem *nspm; - struct nd_mapping *nd_mapping; resource_size_t size = 0; struct resource *res; struct device *dev; + uuid_t uuid; int rc = 0; u16 i; @@ -1944,14 +1681,14 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, return ERR_PTR(-ENXIO); } - if (__le64_to_cpu(nd_label->isetcookie) != cookie) { + if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) { dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", - nd_label->uuid); - if (__le64_to_cpu(nd_label->isetcookie) != altcookie) + nsl_uuid_raw(ndd, nd_label)); + if (!nsl_validate_isetcookie(ndd, nd_label, altcookie)) return ERR_PTR(-EAGAIN); dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n", - nd_label->uuid); + nsl_uuid_raw(ndd, nd_label)); } nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); @@ -1967,9 +1704,10 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, res->flags = IORESOURCE_MEM; for (i = 0; i < nd_region->ndr_mappings; i++) { - if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i)) + nsl_get_uuid(ndd, nd_label, &uuid); + if (has_uuid_at_pos(nd_region, &uuid, cookie, i)) continue; - if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i)) + if (has_uuid_at_pos(nd_region, &uuid, altcookie, i)) continue; break; } @@ -1983,7 +1721,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, * find a dimm with two instances of the same uuid. */ dev_err(&nd_region->dev, "%s missing label for %pUb\n", - nvdimm_name(nvdimm), nd_label->uuid); + nvdimm_name(nvdimm), nsl_uuid_raw(ndd, nd_label)); rc = -EINVAL; goto err; } @@ -1991,12 +1729,10 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, /* * Fix up each mapping's 'labels' to have the validated pmem label for * that position at labels[0], and NULL at labels[1]. In the process, - * check that the namespace aligns with interleave-set. We know - * that it does not overlap with any blk namespaces by virtue of - * the dimm being enabled (i.e. nd_label_reserve_dpa() - * succeeded). + * check that the namespace aligns with interleave-set. */ - rc = select_pmem_id(nd_region, nd_label->uuid); + nsl_get_uuid(ndd, nd_label, &uuid); + rc = select_pmem_id(nd_region, &uuid); if (rc) goto err; @@ -2015,20 +1751,18 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, continue; } - size += __le64_to_cpu(label0->rawsize); - if (__le16_to_cpu(label0->position) != 0) + ndd = to_ndd(nd_mapping); + size += nsl_get_rawsize(ndd, label0); + if (nsl_get_position(ndd, label0) != 0) continue; WARN_ON(nspm->alt_name || nspm->uuid); - nspm->alt_name = kmemdup((void __force *) label0->name, - NSLABEL_NAME_LEN, GFP_KERNEL); - nspm->uuid = kmemdup((void __force *) label0->uuid, - NSLABEL_UUID_LEN, GFP_KERNEL); - nspm->lbasize = __le64_to_cpu(label0->lbasize); - ndd = to_ndd(nd_mapping); - if (namespace_label_has(ndd, abstraction_guid)) - nspm->nsio.common.claim_class - = to_nvdimm_cclass(&label0->abstraction_guid); - + nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0), + NSLABEL_NAME_LEN, GFP_KERNEL); + nsl_get_uuid(ndd, label0, &uuid); + nspm->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL); + nspm->lbasize = nsl_get_lbasize(ndd, label0); + nspm->nsio.common.claim_class = + nsl_get_claim_class(ndd, label0); } if (!nspm->alt_name || !nspm->uuid) { @@ -2055,54 +1789,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, return ERR_PTR(rc); } -struct resource *nsblk_add_resource(struct nd_region *nd_region, - struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk, - resource_size_t start) -{ - struct nd_label_id label_id; - struct resource *res; - - nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); - res = krealloc(nsblk->res, - sizeof(void *) * (nsblk->num_resources + 1), - GFP_KERNEL); - if (!res) - return NULL; - nsblk->res = (struct resource **) res; - for_each_dpa_resource(ndd, res) - if (strcmp(res->name, label_id.id) == 0 - && res->start == start) { - nsblk->res[nsblk->num_resources++] = res; - return res; - } - return NULL; -} - -static struct device *nd_namespace_blk_create(struct nd_region *nd_region) -{ - struct nd_namespace_blk *nsblk; - struct device *dev; - - if (!is_nd_blk(&nd_region->dev)) - return NULL; - - nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); - if (!nsblk) - return NULL; - - dev = &nsblk->common.dev; - dev->type = &namespace_blk_device_type; - nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL); - if (nsblk->id < 0) { - kfree(nsblk); - return NULL; - } - dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id); - dev->parent = &nd_region->dev; - - return &nsblk->common.dev; -} - static struct device *nd_namespace_pmem_create(struct nd_region *nd_region) { struct nd_namespace_pmem *nspm; @@ -2134,6 +1820,8 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region) return dev; } +static struct lock_class_key nvdimm_namespace_key; + void nd_region_create_ns_seed(struct nd_region *nd_region) { WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); @@ -2141,20 +1829,20 @@ void nd_region_create_ns_seed(struct nd_region *nd_region) if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO) return; - if (is_nd_blk(&nd_region->dev)) - nd_region->ns_seed = nd_namespace_blk_create(nd_region); - else - nd_region->ns_seed = nd_namespace_pmem_create(nd_region); + nd_region->ns_seed = nd_namespace_pmem_create(nd_region); /* * Seed creation failures are not fatal, provisioning is simply * disabled until memory becomes available */ if (!nd_region->ns_seed) - dev_err(&nd_region->dev, "failed to create %s namespace\n", - is_nd_blk(&nd_region->dev) ? "blk" : "pmem"); - else + dev_err(&nd_region->dev, "failed to create namespace\n"); + else { + device_initialize(nd_region->ns_seed); + lockdep_set_class(&nd_region->ns_seed->mutex, + &nvdimm_namespace_key); nd_device_register(nd_region->ns_seed); + } } void nd_region_create_dax_seed(struct nd_region *nd_region) @@ -2202,114 +1890,32 @@ static int add_namespace_resource(struct nd_region *nd_region, int i; for (i = 0; i < count; i++) { - u8 *uuid = namespace_to_uuid(devs[i]); - struct resource *res; + uuid_t *uuid = namespace_to_uuid(devs[i]); - if (IS_ERR_OR_NULL(uuid)) { + if (IS_ERR(uuid)) { WARN_ON(1); continue; } - if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0) + if (!nsl_uuid_equal(ndd, nd_label, uuid)) continue; - if (is_namespace_blk(devs[i])) { - res = nsblk_add_resource(nd_region, ndd, - to_nd_namespace_blk(devs[i]), - __le64_to_cpu(nd_label->dpa)); - if (!res) - return -ENXIO; - nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count); - } else { - dev_err(&nd_region->dev, - "error: conflicting extents for uuid: %pUb\n", - nd_label->uuid); - return -ENXIO; - } - break; + dev_err(&nd_region->dev, + "error: conflicting extents for uuid: %pUb\n", uuid); + return -ENXIO; } return i; } -static struct device *create_namespace_blk(struct nd_region *nd_region, - struct nd_namespace_label *nd_label, int count) -{ - - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - struct nd_interleave_set *nd_set = nd_region->nd_set; - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct nd_namespace_blk *nsblk; - char name[NSLABEL_NAME_LEN]; - struct device *dev = NULL; - struct resource *res; - - if (namespace_label_has(ndd, type_guid)) { - if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) { - dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", - &nd_set->type_guid, - &nd_label->type_guid); - return ERR_PTR(-EAGAIN); - } - - if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) { - dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", - nd_set->cookie2, - __le64_to_cpu(nd_label->isetcookie)); - return ERR_PTR(-EAGAIN); - } - } - - nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); - if (!nsblk) - return ERR_PTR(-ENOMEM); - dev = &nsblk->common.dev; - dev->type = &namespace_blk_device_type; - dev->parent = &nd_region->dev; - nsblk->id = -1; - nsblk->lbasize = __le64_to_cpu(nd_label->lbasize); - nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, - GFP_KERNEL); - if (namespace_label_has(ndd, abstraction_guid)) - nsblk->common.claim_class - = to_nvdimm_cclass(&nd_label->abstraction_guid); - if (!nsblk->uuid) - goto blk_err; - memcpy(name, nd_label->name, NSLABEL_NAME_LEN); - if (name[0]) { - nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, - GFP_KERNEL); - if (!nsblk->alt_name) - goto blk_err; - } - res = nsblk_add_resource(nd_region, ndd, nsblk, - __le64_to_cpu(nd_label->dpa)); - if (!res) - goto blk_err; - nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count); - return dev; - blk_err: - namespace_blk_release(dev); - return ERR_PTR(-ENXIO); -} - static int cmp_dpa(const void *a, const void *b) { const struct device *dev_a = *(const struct device **) a; const struct device *dev_b = *(const struct device **) b; - struct nd_namespace_blk *nsblk_a, *nsblk_b; struct nd_namespace_pmem *nspm_a, *nspm_b; if (is_namespace_io(dev_a)) return 0; - if (is_namespace_blk(dev_a)) { - nsblk_a = to_nd_namespace_blk(dev_a); - nsblk_b = to_nd_namespace_blk(dev_b); - - return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start, - sizeof(resource_size_t)); - } - nspm_a = to_nd_namespace_pmem(dev_a); nspm_b = to_nd_namespace_pmem(dev_b); @@ -2323,27 +1929,21 @@ static struct device **scan_labels(struct nd_region *nd_region) struct device *dev, **devs = NULL; struct nd_label_ent *label_ent, *e; struct nd_mapping *nd_mapping = &nd_region->mapping[0]; + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; /* "safe" because create_namespace_pmem() might list_move() label_ent */ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { struct nd_namespace_label *nd_label = label_ent->label; struct device **__devs; - u32 flags; if (!nd_label) continue; - flags = __le32_to_cpu(nd_label->flags); - if (is_nd_blk(&nd_region->dev) - == !!(flags & NSLABEL_FLAG_LOCAL)) - /* pass, region matches label type */; - else - continue; /* skip labels that describe extents outside of the region */ - if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start || - __le64_to_cpu(nd_label->dpa) > map_end) - continue; + if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start || + nsl_get_dpa(ndd, nd_label) > map_end) + continue; i = add_namespace_resource(nd_region, nd_label, devs, count); if (i < 0) @@ -2357,16 +1957,7 @@ static struct device **scan_labels(struct nd_region *nd_region) kfree(devs); devs = __devs; - if (is_nd_blk(&nd_region->dev)) - dev = create_namespace_blk(nd_region, nd_label, count); - else { - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct nd_namespace_index *nsindex; - - nsindex = to_namespace_index(ndd, ndd->ns_current); - dev = create_namespace_pmem(nd_region, nsindex, nd_label); - } - + dev = create_namespace_pmem(nd_region, nd_mapping, nd_label); if (IS_ERR(dev)) { switch (PTR_ERR(dev)) { case -EAGAIN: @@ -2383,35 +1974,25 @@ static struct device **scan_labels(struct nd_region *nd_region) } - dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n", - count, is_nd_blk(&nd_region->dev) - ? "blk" : "pmem", count == 1 ? "" : "s"); + dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count, + count == 1 ? "" : "s"); if (count == 0) { + struct nd_namespace_pmem *nspm; + /* Publish a zero-sized namespace for userspace to configure. */ nd_mapping_free_labels(nd_mapping); devs = kcalloc(2, sizeof(dev), GFP_KERNEL); if (!devs) goto err; - if (is_nd_blk(&nd_region->dev)) { - struct nd_namespace_blk *nsblk; - - nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); - if (!nsblk) - goto err; - dev = &nsblk->common.dev; - dev->type = &namespace_blk_device_type; - } else { - struct nd_namespace_pmem *nspm; - nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); - if (!nspm) - goto err; - dev = &nspm->nsio.common.dev; - dev->type = &namespace_pmem_device_type; - nd_namespace_pmem_set_resource(nd_region, nspm, 0); - } + nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); + if (!nspm) + goto err; + dev = &nspm->nsio.common.dev; + dev->type = &namespace_pmem_device_type; + nd_namespace_pmem_set_resource(nd_region, nspm, 0); dev->parent = &nd_region->dev; devs[count++] = dev; } else if (is_memory(&nd_region->dev)) { @@ -2446,10 +2027,7 @@ static struct device **scan_labels(struct nd_region *nd_region) err: if (devs) { for (i = 0; devs[i]; i++) - if (is_nd_blk(&nd_region->dev)) - namespace_blk_release(devs[i]); - else - namespace_pmem_release(devs[i]); + namespace_pmem_release(devs[i]); kfree(devs); } return NULL; @@ -2505,7 +2083,7 @@ static void deactivate_labels(void *region) static int init_active_labels(struct nd_region *nd_region) { - int i; + int i, rc = 0; for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; @@ -2521,16 +2099,17 @@ static int init_active_labels(struct nd_region *nd_region) if (!ndd) { if (test_bit(NDD_LOCKED, &nvdimm->flags)) /* fail, label data may be unreadable */; - else if (test_bit(NDD_ALIASING, &nvdimm->flags)) + else if (test_bit(NDD_LABELING, &nvdimm->flags)) /* fail, labels needed to disambiguate dpa */; else - return 0; + continue; dev_err(&nd_region->dev, "%s: is %s, failing probe\n", dev_name(&nd_mapping->nvdimm->dev), test_bit(NDD_LOCKED, &nvdimm->flags) ? "locked" : "disabled"); - return -ENXIO; + rc = -ENXIO; + goto out; } nd_mapping->ndd = ndd; atomic_inc(&nvdimm->busy); @@ -2547,12 +2126,6 @@ static int init_active_labels(struct nd_region *nd_region) if (!label_ent) break; label = nd_label_active(ndd, j); - if (test_bit(NDD_NOBLK, &nvdimm->flags)) { - u32 flags = __le32_to_cpu(label->flags); - - flags &= ~NSLABEL_FLAG_LOCAL; - label->flags = __cpu_to_le32(flags); - } label_ent->label = label; mutex_lock(&nd_mapping->lock); @@ -2564,13 +2137,17 @@ static int init_active_labels(struct nd_region *nd_region) break; } - if (i < nd_region->ndr_mappings) { + if (i < nd_region->ndr_mappings) + rc = -ENOMEM; + +out: + if (rc) { deactivate_labels(nd_region); - return -ENOMEM; + return rc; } return devm_add_action_or_reset(&nd_region->dev, deactivate_labels, - nd_region); + nd_region); } int nd_region_register_namespaces(struct nd_region *nd_region, int *err) @@ -2592,7 +2169,6 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err) devs = create_namespace_io(nd_region); break; case ND_DEVICE_NAMESPACE_PMEM: - case ND_DEVICE_NAMESPACE_BLK: devs = create_namespaces(nd_region); break; default: @@ -2607,19 +2183,12 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err) struct device *dev = devs[i]; int id; - if (type == ND_DEVICE_NAMESPACE_BLK) { - struct nd_namespace_blk *nsblk; - - nsblk = to_nd_namespace_blk(dev); - id = ida_simple_get(&nd_region->ns_ida, 0, 0, - GFP_KERNEL); - nsblk->id = id; - } else if (type == ND_DEVICE_NAMESPACE_PMEM) { + if (type == ND_DEVICE_NAMESPACE_PMEM) { struct nd_namespace_pmem *nspm; nspm = to_nd_namespace_pmem(dev); id = ida_simple_get(&nd_region->ns_ida, 0, 0, - GFP_KERNEL); + GFP_KERNEL); nspm->id = id; } else id = i; @@ -2627,6 +2196,8 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err) if (id < 0) break; dev_set_name(dev, "namespace%d.%d", nd_region->id, id); + device_initialize(dev); + lockdep_set_class(&dev->mutex, &nvdimm_namespace_key); nd_device_register(dev); } if (i) |