提交 c2f3011e 编写于 作者: D Dan Williams 提交者: Linus Torvalds

device-dax: add an allocation interface for device-dax instances

In preparation for a facility that enables dax regions to be sub-divided,
introduce infrastructure to track and allocate region capacity.

The new dax_region/available_size attribute is only enabled for volatile
hmem devices, not pmem devices that are defined by nvdimm namespace
boundaries.  This is per Jeff's feedback the last time dynamic device-dax
capacity allocation support was discussed.
Signed-off-by: NDan Williams <dan.j.williams@intel.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Brice Goglin <Brice.Goglin@inria.fr>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jia He <justin.he@arm.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: David Airlie <airlied@linux.ie>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Hulk Robot <hulkci@huawei.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Jason Yan <yanaijie@huawei.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: kernel test robot <lkp@intel.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/linux-nvdimm/x49shpp3zn8.fsf@segfault.boston.devel.redhat.com
Link: https://lkml.kernel.org/r/159643101035.4062302.6785857915652647857.stgit@dwillia2-desk3.amr.corp.intel.com
Link: https://lkml.kernel.org/r/160106112801.30709.14601438735305335071.stgit@dwillia2-desk3.amr.corp.intel.comSigned-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 0513bd5b
...@@ -130,6 +130,11 @@ ATTRIBUTE_GROUPS(dax_drv); ...@@ -130,6 +130,11 @@ ATTRIBUTE_GROUPS(dax_drv);
static int dax_bus_match(struct device *dev, struct device_driver *drv); static int dax_bus_match(struct device *dev, struct device_driver *drv);
static bool is_static(struct dax_region *dax_region)
{
return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0;
}
static struct bus_type dax_bus_type = { static struct bus_type dax_bus_type = {
.name = "dax", .name = "dax",
.uevent = dax_bus_uevent, .uevent = dax_bus_uevent,
...@@ -185,7 +190,48 @@ static ssize_t align_show(struct device *dev, ...@@ -185,7 +190,48 @@ static ssize_t align_show(struct device *dev,
} }
static DEVICE_ATTR_RO(align); static DEVICE_ATTR_RO(align);
#define for_each_dax_region_resource(dax_region, res) \
for (res = (dax_region)->res.child; res; res = res->sibling)
static unsigned long long dax_region_avail_size(struct dax_region *dax_region)
{
resource_size_t size = resource_size(&dax_region->res);
struct resource *res;
device_lock_assert(dax_region->dev);
for_each_dax_region_resource(dax_region, res)
size -= resource_size(res);
return size;
}
static ssize_t available_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
unsigned long long size;
device_lock(dev);
size = dax_region_avail_size(dax_region);
device_unlock(dev);
return sprintf(buf, "%llu\n", size);
}
static DEVICE_ATTR_RO(available_size);
static umode_t dax_region_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct dax_region *dax_region = dev_get_drvdata(dev);
if (is_static(dax_region) && a == &dev_attr_available_size.attr)
return 0;
return a->mode;
}
static struct attribute *dax_region_attributes[] = { static struct attribute *dax_region_attributes[] = {
&dev_attr_available_size.attr,
&dev_attr_region_size.attr, &dev_attr_region_size.attr,
&dev_attr_align.attr, &dev_attr_align.attr,
&dev_attr_id.attr, &dev_attr_id.attr,
...@@ -195,6 +241,7 @@ static struct attribute *dax_region_attributes[] = { ...@@ -195,6 +241,7 @@ static struct attribute *dax_region_attributes[] = {
static const struct attribute_group dax_region_attribute_group = { static const struct attribute_group dax_region_attribute_group = {
.name = "dax_region", .name = "dax_region",
.attrs = dax_region_attributes, .attrs = dax_region_attributes,
.is_visible = dax_region_visible,
}; };
static const struct attribute_group *dax_region_attribute_groups[] = { static const struct attribute_group *dax_region_attribute_groups[] = {
...@@ -226,7 +273,8 @@ static void dax_region_unregister(void *region) ...@@ -226,7 +273,8 @@ static void dax_region_unregister(void *region)
} }
struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align) struct resource *res, int target_node, unsigned int align,
unsigned long flags)
{ {
struct dax_region *dax_region; struct dax_region *dax_region;
...@@ -249,12 +297,17 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id, ...@@ -249,12 +297,17 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
return NULL; return NULL;
dev_set_drvdata(parent, dax_region); dev_set_drvdata(parent, dax_region);
memcpy(&dax_region->res, res, sizeof(*res));
kref_init(&dax_region->kref); kref_init(&dax_region->kref);
dax_region->id = region_id; dax_region->id = region_id;
dax_region->align = align; dax_region->align = align;
dax_region->dev = parent; dax_region->dev = parent;
dax_region->target_node = target_node; dax_region->target_node = target_node;
dax_region->res = (struct resource) {
.start = res->start,
.end = res->end,
.flags = IORESOURCE_MEM | flags,
};
if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
kfree(dax_region); kfree(dax_region);
return NULL; return NULL;
...@@ -267,6 +320,32 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id, ...@@ -267,6 +320,32 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
} }
EXPORT_SYMBOL_GPL(alloc_dax_region); EXPORT_SYMBOL_GPL(alloc_dax_region);
static int alloc_dev_dax_range(struct dev_dax *dev_dax, resource_size_t size)
{
struct dax_region *dax_region = dev_dax->region;
struct resource *res = &dax_region->res;
struct device *dev = &dev_dax->dev;
struct resource *alloc;
device_lock_assert(dax_region->dev);
/* TODO: handle multiple allocations per region */
if (res->child)
return -ENOMEM;
alloc = __request_region(res, res->start, size, dev_name(dev), 0);
if (!alloc)
return -ENOMEM;
dev_dax->range = (struct range) {
.start = alloc->start,
.end = alloc->end,
};
return 0;
}
static ssize_t size_show(struct device *dev, static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
...@@ -361,6 +440,15 @@ void kill_dev_dax(struct dev_dax *dev_dax) ...@@ -361,6 +440,15 @@ void kill_dev_dax(struct dev_dax *dev_dax)
} }
EXPORT_SYMBOL_GPL(kill_dev_dax); EXPORT_SYMBOL_GPL(kill_dev_dax);
static void free_dev_dax_range(struct dev_dax *dev_dax)
{
struct dax_region *dax_region = dev_dax->region;
struct range *range = &dev_dax->range;
device_lock_assert(dax_region->dev);
__release_region(&dax_region->res, range->start, range_len(range));
}
static void dev_dax_release(struct device *dev) static void dev_dax_release(struct device *dev)
{ {
struct dev_dax *dev_dax = to_dev_dax(dev); struct dev_dax *dev_dax = to_dev_dax(dev);
...@@ -385,6 +473,7 @@ static void unregister_dev_dax(void *dev) ...@@ -385,6 +473,7 @@ static void unregister_dev_dax(void *dev)
dev_dbg(dev, "%s\n", __func__); dev_dbg(dev, "%s\n", __func__);
kill_dev_dax(dev_dax); kill_dev_dax(dev_dax);
free_dev_dax_range(dev_dax);
device_del(dev); device_del(dev);
put_device(dev); put_device(dev);
} }
...@@ -397,7 +486,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) ...@@ -397,7 +486,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
struct dev_dax *dev_dax; struct dev_dax *dev_dax;
struct inode *inode; struct inode *inode;
struct device *dev; struct device *dev;
int rc = -ENOMEM; int rc;
if (data->id < 0) if (data->id < 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -406,11 +495,25 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) ...@@ -406,11 +495,25 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
if (!dev_dax) if (!dev_dax)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
dev_dax->region = dax_region;
dev = &dev_dax->dev;
device_initialize(dev);
dev_set_name(dev, "dax%d.%d", dax_region->id, data->id);
rc = alloc_dev_dax_range(dev_dax, data->size);
if (rc)
goto err_range;
if (data->pgmap) { if (data->pgmap) {
dev_WARN_ONCE(parent, !is_static(dax_region),
"custom dev_pagemap requires a static dax_region\n");
dev_dax->pgmap = kmemdup(data->pgmap, dev_dax->pgmap = kmemdup(data->pgmap,
sizeof(struct dev_pagemap), GFP_KERNEL); sizeof(struct dev_pagemap), GFP_KERNEL);
if (!dev_dax->pgmap) if (!dev_dax->pgmap) {
rc = -ENOMEM;
goto err_pgmap; goto err_pgmap;
}
} }
/* /*
...@@ -427,12 +530,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) ...@@ -427,12 +530,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
kill_dax(dax_dev); kill_dax(dax_dev);
/* from here on we're committed to teardown via dev_dax_release() */ /* from here on we're committed to teardown via dev_dax_release() */
dev = &dev_dax->dev;
device_initialize(dev);
dev_dax->dax_dev = dax_dev; dev_dax->dax_dev = dax_dev;
dev_dax->region = dax_region;
dev_dax->range = data->range;
dev_dax->target_node = dax_region->target_node; dev_dax->target_node = dax_region->target_node;
kref_get(&dax_region->kref); kref_get(&dax_region->kref);
...@@ -444,7 +542,6 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) ...@@ -444,7 +542,6 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
dev->class = dax_class; dev->class = dax_class;
dev->parent = parent; dev->parent = parent;
dev->type = &dev_dax_type; dev->type = &dev_dax_type;
dev_set_name(dev, "dax%d.%d", dax_region->id, data->id);
rc = device_add(dev); rc = device_add(dev);
if (rc) { if (rc) {
...@@ -458,9 +555,12 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) ...@@ -458,9 +555,12 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
return ERR_PTR(rc); return ERR_PTR(rc);
return dev_dax; return dev_dax;
err_alloc_dax: err_alloc_dax:
kfree(dev_dax->pgmap); kfree(dev_dax->pgmap);
err_pgmap: err_pgmap:
free_dev_dax_range(dev_dax);
err_range:
kfree(dev_dax); kfree(dev_dax);
return ERR_PTR(rc); return ERR_PTR(rc);
......
...@@ -10,8 +10,11 @@ struct resource; ...@@ -10,8 +10,11 @@ struct resource;
struct dax_device; struct dax_device;
struct dax_region; struct dax_region;
void dax_region_put(struct dax_region *dax_region); void dax_region_put(struct dax_region *dax_region);
#define IORESOURCE_DAX_STATIC (1UL << 0)
struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align); struct resource *res, int target_node, unsigned int align,
unsigned long flags);
enum dev_dax_subsys { enum dev_dax_subsys {
DEV_DAX_BUS = 0, /* zeroed dev_dax_data picks this by default */ DEV_DAX_BUS = 0, /* zeroed dev_dax_data picks this by default */
...@@ -22,7 +25,7 @@ struct dev_dax_data { ...@@ -22,7 +25,7 @@ struct dev_dax_data {
struct dax_region *dax_region; struct dax_region *dax_region;
struct dev_pagemap *pgmap; struct dev_pagemap *pgmap;
enum dev_dax_subsys subsys; enum dev_dax_subsys subsys;
struct range range; resource_size_t size;
int id; int id;
}; };
......
...@@ -22,7 +22,7 @@ void dax_bus_exit(void); ...@@ -22,7 +22,7 @@ void dax_bus_exit(void);
* @kref: to pin while other agents have a need to do lookups * @kref: to pin while other agents have a need to do lookups
* @dev: parent device backing this region * @dev: parent device backing this region
* @align: allocation and mapping alignment for child dax devices * @align: allocation and mapping alignment for child dax devices
* @res: physical address range of the region * @res: resource tree to track instance allocations
*/ */
struct dax_region { struct dax_region {
int id; int id;
......
...@@ -20,17 +20,14 @@ static int dax_hmem_probe(struct platform_device *pdev) ...@@ -20,17 +20,14 @@ static int dax_hmem_probe(struct platform_device *pdev)
mri = dev->platform_data; mri = dev->platform_data;
dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node, dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node,
PMD_SIZE); PMD_SIZE, 0);
if (!dax_region) if (!dax_region)
return -ENOMEM; return -ENOMEM;
data = (struct dev_dax_data) { data = (struct dev_dax_data) {
.dax_region = dax_region, .dax_region = dax_region,
.id = 0, .id = 0,
.range = { .size = resource_size(res),
.start = res->start,
.end = res->end,
},
}; };
dev_dax = devm_create_dev_dax(&data); dev_dax = devm_create_dev_dax(&data);
if (IS_ERR(dev_dax)) if (IS_ERR(dev_dax))
......
...@@ -54,7 +54,8 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) ...@@ -54,7 +54,8 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
memcpy(&res, &pgmap.res, sizeof(res)); memcpy(&res, &pgmap.res, sizeof(res));
res.start += offset; res.start += offset;
dax_region = alloc_dax_region(dev, region_id, &res, dax_region = alloc_dax_region(dev, region_id, &res,
nd_region->target_node, le32_to_cpu(pfn_sb->align)); nd_region->target_node, le32_to_cpu(pfn_sb->align),
IORESOURCE_DAX_STATIC);
if (!dax_region) if (!dax_region)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -63,10 +64,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) ...@@ -63,10 +64,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
.id = id, .id = id,
.pgmap = &pgmap, .pgmap = &pgmap,
.subsys = subsys, .subsys = subsys,
.range = { .size = resource_size(&res),
.start = res.start,
.end = res.end,
},
}; };
dev_dax = devm_create_dev_dax(&data); dev_dax = devm_create_dev_dax(&data);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册