aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvdimm
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2019-03-11 12:20:30 -0700
committerDan Williams <dan.j.williams@intel.com>2019-03-11 12:20:30 -0700
commit6fd96ff557963de8e62842a0dc360a6e3610d2bb (patch)
tree90ea3e549269fbe6a2333986d42c2cd8c2df8d9d /drivers/nvdimm
parentMerge branch 'for-5.1/libnvdimm' into libnvdimm-for-next (diff)
parentlibnvdimm/pfn: Remove dax_label_reserve (diff)
downloadlinux-dev-6fd96ff557963de8e62842a0dc360a6e3610d2bb.tar.xz
linux-dev-6fd96ff557963de8e62842a0dc360a6e3610d2bb.zip
Merge branch 'for-5.1/libnvdimm-start-pad' into libnvdimm-for-next
Merge the initial lead-in cleanups and fixes that resulted from the effort to resolve bugs in the section-alignment padding implementation in the nvdimm core. The back half of this topic is abandoned in favor of implementing sub-section hotplug support.
Diffstat (limited to 'drivers/nvdimm')
-rw-r--r--drivers/nvdimm/namespace_devs.c4
-rw-r--r--drivers/nvdimm/pfn_devs.c24
2 files changed, 18 insertions, 10 deletions
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 17fb7f931f0c..7849bf1812c4 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
bool pmem_should_map_pages(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_namespace_common *ndns = to_ndns(dev);
struct nd_namespace_io *nsio;
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
@@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
if (is_nd_pfn(dev) || is_nd_btt(dev))
return false;
+ if (ndns->force_raw)
+ return false;
+
nsio = to_nd_namespace_io(dev);
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
IORESOURCE_SYSTEM_RAM,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 6f22272e8d80..d271bd731af7 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -580,6 +580,11 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
}
EXPORT_SYMBOL(nd_pfn_probe);
+static u32 info_block_reserve(void)
+{
+ return ALIGN(SZ_8K, PAGE_SIZE);
+}
+
/*
* We hotplug memory at section granularity, pad the reserved area from
* the previous section base to the namespace base address.
@@ -593,7 +598,7 @@ static unsigned long init_altmap_base(resource_size_t base)
static unsigned long init_altmap_reserve(resource_size_t base)
{
- unsigned long reserve = PHYS_PFN(SZ_8K);
+ unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
unsigned long base_pfn = PHYS_PFN(base);
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
@@ -608,6 +613,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
u64 offset = le64_to_cpu(pfn_sb->dataoff);
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+ u32 reserve = info_block_reserve();
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t base = nsio->res.start + start_pad;
@@ -621,7 +627,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
res->end -= end_trunc;
if (nd_pfn->mode == PFN_MODE_RAM) {
- if (offset < SZ_8K)
+ if (offset < reserve)
return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
pgmap->altmap_valid = false;
@@ -634,7 +640,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
le64_to_cpu(nd_pfn->pfn_sb->npfns),
nd_pfn->npfns);
memcpy(altmap, &__altmap, sizeof(*altmap));
- altmap->free = PHYS_PFN(offset - SZ_8K);
+ altmap->free = PHYS_PFN(offset - reserve);
altmap->alloc = 0;
pgmap->altmap_valid = true;
} else
@@ -678,18 +684,17 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
IORES_DESC_NONE) == REGION_MIXED
|| !IS_ALIGNED(end, nd_pfn->align)
- || nd_region_conflict(nd_region, start, size + adjust))
+ || nd_region_conflict(nd_region, start, size))
*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
}
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
- u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ u32 start_pad, end_trunc, reserve = info_block_reserve();
resource_size_t start, size;
struct nd_region *nd_region;
- u32 start_pad, end_trunc;
struct nd_pfn_sb *pfn_sb;
unsigned long npfns;
phys_addr_t offset;
@@ -734,7 +739,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
*/
start = nsio->res.start + start_pad;
size = resource_size(&nsio->res);
- npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
+ npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
/ PAGE_SIZE);
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
@@ -742,11 +747,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*/
- offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
+ offset = ALIGN(start + reserve + 64 * npfns,
max(nd_pfn->align, PMD_SIZE)) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
- offset = ALIGN(start + SZ_8K + dax_label_reserve,
- nd_pfn->align) - start;
+ offset = ALIGN(start + reserve, nd_pfn->align) - start;
else
return -ENXIO;