提交 98206f34 编写于 作者: D Dan Williams 提交者: Greg Kroah-Hartman

libnvdimm, pfn: Pad pfn namespaces relative to other regions

commit ae86cbfef3818300f1972e52f67a93211acb0e24 upstream.

Commit cfe30b87 "libnvdimm, pmem: adjust for section collisions with
'System RAM'" enabled Linux to workaround occasions where platform
firmware arranges for "System RAM" and "Persistent Memory" to collide
within a single section boundary. Unfortunately, as reported in this
issue [1], platform firmware can inflict the same collision between
persistent memory regions.

The approach of interrogating iomem_resource does not work in this
case because platform firmware may merge multiple regions into a single
iomem_resource range. Instead provide a method to interrogate regions
that share the same parent bus.

This is a stop-gap until the core-MM can grow support for hotplug on
sub-section boundaries.

[1]: https://github.com/pmem/ndctl/issues/76

Fixes: cfe30b87 ("libnvdimm, pmem: adjust for section collisions with...")
Cc: <stable@vger.kernel.org>
Reported-by: NPatrick Geary <patrickg@supermicro.com>
Tested-by: NPatrick Geary <patrickg@supermicro.com>
Reviewed-by: NVishal Verma <vishal.l.verma@intel.com>
Signed-off-by: NDan Williams <dan.j.williams@intel.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 beb68a72
...@@ -112,6 +112,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, ...@@ -112,6 +112,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, resource_size_t *overlap); struct nd_mapping *nd_mapping, resource_size_t *overlap);
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
resource_size_t nd_region_available_dpa(struct nd_region *nd_region); resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
resource_size_t size);
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id); struct nd_label_id *label_id);
int alias_dpa_busy(struct device *dev, void *data); int alias_dpa_busy(struct device *dev, void *data);
......
...@@ -590,14 +590,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys) ...@@ -590,14 +590,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
ALIGN_DOWN(phys, nd_pfn->align)); ALIGN_DOWN(phys, nd_pfn->align));
} }
/*
* Check if pmem collides with 'System RAM', or other regions when
* section aligned. Trim it accordingly.
*/
static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
{
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
const resource_size_t start = nsio->res.start;
const resource_size_t end = start + resource_size(&nsio->res);
resource_size_t adjust, size;
*start_pad = 0;
*end_trunc = 0;
adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
size = resource_size(&nsio->res) + adjust;
if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
IORES_DESC_NONE) == REGION_MIXED
|| nd_region_conflict(nd_region, start - adjust, size))
*start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
/* Now check that end of the range does not collide. */
adjust = PHYS_SECTION_ALIGN_UP(end) - end;
size = resource_size(&nsio->res) + adjust;
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
IORES_DESC_NONE) == REGION_MIXED
|| !IS_ALIGNED(end, nd_pfn->align)
|| nd_region_conflict(nd_region, start, size + adjust))
*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
}
static int nd_pfn_init(struct nd_pfn *nd_pfn) static int nd_pfn_init(struct nd_pfn *nd_pfn)
{ {
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
struct nd_namespace_common *ndns = nd_pfn->ndns; struct nd_namespace_common *ndns = nd_pfn->ndns;
u32 start_pad = 0, end_trunc = 0; struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t start, size; resource_size_t start, size;
struct nd_namespace_io *nsio;
struct nd_region *nd_region; struct nd_region *nd_region;
u32 start_pad, end_trunc;
struct nd_pfn_sb *pfn_sb; struct nd_pfn_sb *pfn_sb;
unsigned long npfns; unsigned long npfns;
phys_addr_t offset; phys_addr_t offset;
...@@ -629,30 +662,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) ...@@ -629,30 +662,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
memset(pfn_sb, 0, sizeof(*pfn_sb)); memset(pfn_sb, 0, sizeof(*pfn_sb));
/* trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
* Check if pmem collides with 'System RAM' when section aligned and
* trim it accordingly
*/
nsio = to_nd_namespace_io(&ndns->dev);
start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
size = resource_size(&nsio->res);
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
IORES_DESC_NONE) == REGION_MIXED) {
start = nsio->res.start;
start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
}
start = nsio->res.start;
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
IORES_DESC_NONE) == REGION_MIXED
|| !IS_ALIGNED(start + resource_size(&nsio->res),
nd_pfn->align)) {
size = resource_size(&nsio->res);
end_trunc = start + size - phys_pmem_align_down(nd_pfn,
start + size);
}
if (start_pad + end_trunc) if (start_pad + end_trunc)
dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n", dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
dev_name(&ndns->dev), start_pad + end_trunc); dev_name(&ndns->dev), start_pad + end_trunc);
...@@ -663,7 +673,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) ...@@ -663,7 +673,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
* implementation will limit the pfns advertised through * implementation will limit the pfns advertised through
* ->direct_access() to those that are included in the memmap. * ->direct_access() to those that are included in the memmap.
*/ */
start += start_pad; start = nsio->res.start + start_pad;
size = resource_size(&nsio->res); size = resource_size(&nsio->res);
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K) npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
/ PAGE_SIZE); / PAGE_SIZE);
......
...@@ -1184,6 +1184,47 @@ int nvdimm_has_cache(struct nd_region *nd_region) ...@@ -1184,6 +1184,47 @@ int nvdimm_has_cache(struct nd_region *nd_region)
} }
EXPORT_SYMBOL_GPL(nvdimm_has_cache); EXPORT_SYMBOL_GPL(nvdimm_has_cache);
struct conflict_context {
struct nd_region *nd_region;
resource_size_t start, size;
};
static int region_conflict(struct device *dev, void *data)
{
struct nd_region *nd_region;
struct conflict_context *ctx = data;
resource_size_t res_end, region_end, region_start;
if (!is_memory(dev))
return 0;
nd_region = to_nd_region(dev);
if (nd_region == ctx->nd_region)
return 0;
res_end = ctx->start + ctx->size;
region_start = nd_region->ndr_start;
region_end = region_start + nd_region->ndr_size;
if (ctx->start >= region_start && ctx->start < region_end)
return -EBUSY;
if (res_end > region_start && res_end <= region_end)
return -EBUSY;
return 0;
}
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
resource_size_t size)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
struct conflict_context ctx = {
.nd_region = nd_region,
.start = start,
.size = size,
};
return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
}
void __exit nd_region_devs_exit(void) void __exit nd_region_devs_exit(void)
{ {
ida_destroy(&region_ida); ida_destroy(&region_ida);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册