region_devs.c 24.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */
13
#include <linux/scatterlist.h>
14
#include <linux/highmem.h>
15
#include <linux/sched.h>
16
#include <linux/slab.h>
D
Dan Williams 已提交
17
#include <linux/hash.h>
18
#include <linux/pmem.h>
19
#include <linux/sort.h>
20
#include <linux/io.h>
21
#include <linux/nd.h>
22 23 24
#include "nd-core.h"
#include "nd.h"

25 26 27 28 29 30
/*
 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
 * irrelevant.
 */
#include <linux/io-64-nonatomic-hi-lo.h>

31
static DEFINE_IDA(region_ida);
D
Dan Williams 已提交
32
static DEFINE_PER_CPU(int, flush_idx);
33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
		struct nd_region_data *ndrd)
{
	int i, j;

	dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
			nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
	for (i = 0; i < nvdimm->num_flush; i++) {
		struct resource *res = &nvdimm->flush_wpq[i];
		unsigned long pfn = PHYS_PFN(res->start);
		void __iomem *flush_page;

		/* check if flush hints share a page */
		for (j = 0; j < i; j++) {
			struct resource *res_j = &nvdimm->flush_wpq[j];
			unsigned long pfn_j = PHYS_PFN(res_j->start);

			if (pfn == pfn_j)
				break;
		}

		if (j < i)
			flush_page = (void __iomem *) ((unsigned long)
					ndrd->flush_wpq[dimm][j] & PAGE_MASK);
		else
			flush_page = devm_nvdimm_ioremap(dev,
60
					PFN_PHYS(pfn), PAGE_SIZE);
61 62 63 64 65 66 67 68 69 70 71
		if (!flush_page)
			return -ENXIO;
		ndrd->flush_wpq[dimm][i] = flush_page
			+ (res->start & ~PAGE_MASK);
	}

	return 0;
}

int nd_region_activate(struct nd_region *nd_region)
{
D
Dan Williams 已提交
72
	int i, num_flush = 0;
73 74 75 76 77 78 79 80 81 82 83
	struct nd_region_data *ndrd;
	struct device *dev = &nd_region->dev;
	size_t flush_data_size = sizeof(void *);

	nvdimm_bus_lock(&nd_region->dev);
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;

		/* at least one null hint slot per-dimm for the "no-hint" case */
		flush_data_size += sizeof(void *);
D
Dan Williams 已提交
84
		num_flush = min_not_zero(num_flush, nvdimm->num_flush);
85 86 87 88 89 90 91 92 93 94 95
		if (!nvdimm->num_flush)
			continue;
		flush_data_size += nvdimm->num_flush * sizeof(void *);
	}
	nvdimm_bus_unlock(&nd_region->dev);

	ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
	if (!ndrd)
		return -ENOMEM;
	dev_set_drvdata(dev, ndrd);

D
Dan Williams 已提交
96
	ndrd->flush_mask = (1 << ilog2(num_flush)) - 1;
97 98 99 100 101 102 103 104 105 106 107 108
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;
		int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);

		if (rc)
			return rc;
	}

	return 0;
}

109 110 111 112 113 114 115 116 117 118 119
static void nd_region_release(struct device *dev)
{
	struct nd_region *nd_region = to_nd_region(dev);
	u16 i;

	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;

		put_device(&nvdimm->dev);
	}
V
Vishal Verma 已提交
120
	free_percpu(nd_region->lane);
121
	ida_simple_remove(&region_ida, nd_region->id);
122 123 124 125
	if (is_nd_blk(dev))
		kfree(to_nd_blk_region(dev));
	else
		kfree(nd_region);
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
}

static struct device_type nd_blk_device_type = {
	.name = "nd_blk",
	.release = nd_region_release,
};

static struct device_type nd_pmem_device_type = {
	.name = "nd_pmem",
	.release = nd_region_release,
};

static struct device_type nd_volatile_device_type = {
	.name = "nd_volatile",
	.release = nd_region_release,
};

143
bool is_nd_pmem(struct device *dev)
144 145 146 147
{
	return dev ? dev->type == &nd_pmem_device_type : false;
}

148 149 150 151 152
bool is_nd_blk(struct device *dev)
{
	return dev ? dev->type == &nd_blk_device_type : false;
}

153 154 155 156 157 158 159 160 161
struct nd_region *to_nd_region(struct device *dev)
{
	struct nd_region *nd_region = container_of(dev, struct nd_region, dev);

	WARN_ON(dev->type->release != nd_region_release);
	return nd_region;
}
EXPORT_SYMBOL_GPL(to_nd_region);

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
struct nd_blk_region *to_nd_blk_region(struct device *dev)
{
	struct nd_region *nd_region = to_nd_region(dev);

	WARN_ON(!is_nd_blk(dev));
	return container_of(nd_region, struct nd_blk_region, nd_region);
}
EXPORT_SYMBOL_GPL(to_nd_blk_region);

void *nd_region_provider_data(struct nd_region *nd_region)
{
	return nd_region->provider_data;
}
EXPORT_SYMBOL_GPL(nd_region_provider_data);

void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
{
	return ndbr->blk_provider_data;
}
EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);

void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
{
	ndbr->blk_provider_data = data;
}
EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
/**
 * nd_region_to_nstype() - region to an integer namespace type
 * @nd_region: region-device to interrogate
 *
 * This is the 'nstype' attribute of a region as well, an input to the
 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
 * namespace devices with namespace drivers.
 */
int nd_region_to_nstype(struct nd_region *nd_region)
{
	if (is_nd_pmem(&nd_region->dev)) {
		u16 i, alias;

		for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
			struct nvdimm *nvdimm = nd_mapping->nvdimm;

			if (nvdimm->flags & NDD_ALIASING)
				alias++;
		}
		if (alias)
			return ND_DEVICE_NAMESPACE_PMEM;
		else
			return ND_DEVICE_NAMESPACE_IO;
	} else if (is_nd_blk(&nd_region->dev)) {
		return ND_DEVICE_NAMESPACE_BLK;
	}

	return 0;
}
219 220
EXPORT_SYMBOL(nd_region_to_nstype);

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
static ssize_t size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	unsigned long long size = 0;

	if (is_nd_pmem(dev)) {
		size = nd_region->ndr_size;
	} else if (nd_region->ndr_mappings == 1) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[0];

		size = nd_mapping->size;
	}

	return sprintf(buf, "%llu\n", size);
}
static DEVICE_ATTR_RO(size);

static ssize_t mappings_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return sprintf(buf, "%d\n", nd_region->ndr_mappings);
}
static DEVICE_ATTR_RO(mappings);

248 249 250 251 252 253 254 255 256
static ssize_t nstype_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
}
static DEVICE_ATTR_RO(nstype);

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
static ssize_t set_cookie_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	struct nd_interleave_set *nd_set = nd_region->nd_set;

	if (is_nd_pmem(dev) && nd_set)
		/* pass, should be precluded by region_visible */;
	else
		return -ENXIO;

	return sprintf(buf, "%#llx\n", nd_set->cookie);
}
static DEVICE_ATTR_RO(set_cookie);

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
{
	resource_size_t blk_max_overlap = 0, available, overlap;
	int i;

	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));

 retry:
	available = 0;
	overlap = blk_max_overlap;
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);

		/* if a dimm is disabled the available capacity is zero */
		if (!ndd)
			return 0;

		if (is_nd_pmem(&nd_region->dev)) {
			available += nd_pmem_available_dpa(nd_region,
					nd_mapping, &overlap);
			if (overlap > blk_max_overlap) {
				blk_max_overlap = overlap;
				goto retry;
			}
		} else if (is_nd_blk(&nd_region->dev)) {
298
			available += nd_blk_available_dpa(nd_mapping);
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
		}
	}

	return available;
}

static ssize_t available_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	unsigned long long available = 0;

	/*
	 * Flush in-flight updates and grab a snapshot of the available
	 * size.  Of course, this value is potentially invalidated the
	 * memory nvdimm_bus_lock() is dropped, but that's userspace's
	 * problem to not race itself.
	 */
	nvdimm_bus_lock(dev);
	wait_nvdimm_bus_probe_idle(dev);
	available = nd_region_available_dpa(nd_region);
	nvdimm_bus_unlock(dev);

	return sprintf(buf, "%llu\n", available);
}
static DEVICE_ATTR_RO(available_size);

326 327 328
static ssize_t init_namespaces_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
329
	struct nd_region_data *ndrd = dev_get_drvdata(dev);
330 331 332
	ssize_t rc;

	nvdimm_bus_lock(dev);
333 334
	if (ndrd)
		rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
335 336 337 338 339 340 341 342
	else
		rc = -ENXIO;
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(init_namespaces);

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
static ssize_t namespace_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->ns_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);
	return rc;
}
static DEVICE_ATTR_RO(namespace_seed);

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
static ssize_t btt_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->btt_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(btt_seed);

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
static ssize_t pfn_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->pfn_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(pfn_seed);

393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
static ssize_t dax_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->dax_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(dax_seed);

410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
static ssize_t read_only_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return sprintf(buf, "%d\n", nd_region->ro);
}

static ssize_t read_only_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	bool ro;
	int rc = strtobool(buf, &ro);
	struct nd_region *nd_region = to_nd_region(dev);

	if (rc)
		return rc;

	nd_region->ro = ro;
	return len;
}
static DEVICE_ATTR_RW(read_only);

433 434
static struct attribute *nd_region_attributes[] = {
	&dev_attr_size.attr,
435
	&dev_attr_nstype.attr,
436
	&dev_attr_mappings.attr,
437
	&dev_attr_btt_seed.attr,
438
	&dev_attr_pfn_seed.attr,
439
	&dev_attr_dax_seed.attr,
440
	&dev_attr_read_only.attr,
441
	&dev_attr_set_cookie.attr,
442 443
	&dev_attr_available_size.attr,
	&dev_attr_namespace_seed.attr,
444
	&dev_attr_init_namespaces.attr,
445 446 447
	NULL,
};

448 449 450 451 452
static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, typeof(*dev), kobj);
	struct nd_region *nd_region = to_nd_region(dev);
	struct nd_interleave_set *nd_set = nd_region->nd_set;
453
	int type = nd_region_to_nstype(nd_region);
454

455 456 457
	if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
		return 0;

458 459 460
	if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
		return 0;

461 462
	if (a != &dev_attr_set_cookie.attr
			&& a != &dev_attr_available_size.attr)
463 464
		return a->mode;

465 466 467 468 469 470
	if ((type == ND_DEVICE_NAMESPACE_PMEM
				|| type == ND_DEVICE_NAMESPACE_BLK)
			&& a == &dev_attr_available_size.attr)
		return a->mode;
	else if (is_nd_pmem(dev) && nd_set)
		return a->mode;
471 472 473 474

	return 0;
}

475 476
struct attribute_group nd_region_attribute_group = {
	.attrs = nd_region_attributes,
477
	.is_visible = region_visible,
478 479 480
};
EXPORT_SYMBOL_GPL(nd_region_attribute_group);

481 482 483 484 485 486 487 488 489
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
{
	struct nd_interleave_set *nd_set = nd_region->nd_set;

	if (nd_set)
		return nd_set->cookie;
	return 0;
}

490 491
/*
 * Upon successful probe/remove, take/release a reference on the
492
 * associated interleave set (if present), and plant new btt + namespace
493 494
 * seeds.  Also, on the removal of a BLK region, notify the provider to
 * disable the region.
495 496 497 498
 */
static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
		struct device *dev, bool probe)
{
499 500
	struct nd_region *nd_region;

501
	if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
502 503
		int i;

504
		nd_region = to_nd_region(dev);
505 506
		for (i = 0; i < nd_region->ndr_mappings; i++) {
			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
507
			struct nvdimm_drvdata *ndd = nd_mapping->ndd;
508 509
			struct nvdimm *nvdimm = nd_mapping->nvdimm;

510 511 512 513
			kfree(nd_mapping->labels);
			nd_mapping->labels = NULL;
			put_ndd(ndd);
			nd_mapping->ndd = NULL;
514 515
			if (ndd)
				atomic_dec(&nvdimm->busy);
516
		}
517 518 519

		if (is_nd_pmem(dev))
			return;
520 521 522
	}
	if (dev->parent && is_nd_blk(dev->parent) && probe) {
		nd_region = to_nd_region(dev->parent);
523 524 525 526
		nvdimm_bus_lock(dev);
		if (nd_region->ns_seed == dev)
			nd_region_create_blk_seed(nd_region);
		nvdimm_bus_unlock(dev);
527
	}
528
	if (is_nd_btt(dev) && probe) {
529 530
		struct nd_btt *nd_btt = to_nd_btt(dev);

531 532 533 534
		nd_region = to_nd_region(dev->parent);
		nvdimm_bus_lock(dev);
		if (nd_region->btt_seed == dev)
			nd_region_create_btt_seed(nd_region);
535 536 537
		if (nd_region->ns_seed == &nd_btt->ndns->dev &&
				is_nd_blk(dev->parent))
			nd_region_create_blk_seed(nd_region);
538 539
		nvdimm_bus_unlock(dev);
	}
540 541 542 543 544 545 546
	if (is_nd_pfn(dev) && probe) {
		nd_region = to_nd_region(dev->parent);
		nvdimm_bus_lock(dev);
		if (nd_region->pfn_seed == dev)
			nd_region_create_pfn_seed(nd_region);
		nvdimm_bus_unlock(dev);
	}
547 548 549 550 551 552 553
	if (is_nd_dax(dev) && probe) {
		nd_region = to_nd_region(dev->parent);
		nvdimm_bus_lock(dev);
		if (nd_region->dax_seed == dev)
			nd_region_create_dax_seed(nd_region);
		nvdimm_bus_unlock(dev);
	}
554 555 556 557 558 559 560 561 562 563 564 565
}

void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
{
	nd_region_notify_driver_action(nvdimm_bus, dev, true);
}

void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
{
	nd_region_notify_driver_action(nvdimm_bus, dev, false);
}

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static ssize_t mappingN(struct device *dev, char *buf, int n)
{
	struct nd_region *nd_region = to_nd_region(dev);
	struct nd_mapping *nd_mapping;
	struct nvdimm *nvdimm;

	if (n >= nd_region->ndr_mappings)
		return -ENXIO;
	nd_mapping = &nd_region->mapping[n];
	nvdimm = nd_mapping->nvdimm;

	return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
			nd_mapping->start, nd_mapping->size);
}

#define REGION_MAPPING(idx) \
static ssize_t mapping##idx##_show(struct device *dev,		\
		struct device_attribute *attr, char *buf)	\
{								\
	return mappingN(dev, buf, idx);				\
}								\
static DEVICE_ATTR_RO(mapping##idx)

/*
 * 32 should be enough for a while, even in the presence of socket
 * interleave a 32-way interleave set is a degenerate case.
 */
REGION_MAPPING(0);
REGION_MAPPING(1);
REGION_MAPPING(2);
REGION_MAPPING(3);
REGION_MAPPING(4);
REGION_MAPPING(5);
REGION_MAPPING(6);
REGION_MAPPING(7);
REGION_MAPPING(8);
REGION_MAPPING(9);
REGION_MAPPING(10);
REGION_MAPPING(11);
REGION_MAPPING(12);
REGION_MAPPING(13);
REGION_MAPPING(14);
REGION_MAPPING(15);
REGION_MAPPING(16);
REGION_MAPPING(17);
REGION_MAPPING(18);
REGION_MAPPING(19);
REGION_MAPPING(20);
REGION_MAPPING(21);
REGION_MAPPING(22);
REGION_MAPPING(23);
REGION_MAPPING(24);
REGION_MAPPING(25);
REGION_MAPPING(26);
REGION_MAPPING(27);
REGION_MAPPING(28);
REGION_MAPPING(29);
REGION_MAPPING(30);
REGION_MAPPING(31);

static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
	struct nd_region *nd_region = to_nd_region(dev);

	if (n < nd_region->ndr_mappings)
		return a->mode;
	return 0;
}

static struct attribute *mapping_attributes[] = {
	&dev_attr_mapping0.attr,
	&dev_attr_mapping1.attr,
	&dev_attr_mapping2.attr,
	&dev_attr_mapping3.attr,
	&dev_attr_mapping4.attr,
	&dev_attr_mapping5.attr,
	&dev_attr_mapping6.attr,
	&dev_attr_mapping7.attr,
	&dev_attr_mapping8.attr,
	&dev_attr_mapping9.attr,
	&dev_attr_mapping10.attr,
	&dev_attr_mapping11.attr,
	&dev_attr_mapping12.attr,
	&dev_attr_mapping13.attr,
	&dev_attr_mapping14.attr,
	&dev_attr_mapping15.attr,
	&dev_attr_mapping16.attr,
	&dev_attr_mapping17.attr,
	&dev_attr_mapping18.attr,
	&dev_attr_mapping19.attr,
	&dev_attr_mapping20.attr,
	&dev_attr_mapping21.attr,
	&dev_attr_mapping22.attr,
	&dev_attr_mapping23.attr,
	&dev_attr_mapping24.attr,
	&dev_attr_mapping25.attr,
	&dev_attr_mapping26.attr,
	&dev_attr_mapping27.attr,
	&dev_attr_mapping28.attr,
	&dev_attr_mapping29.attr,
	&dev_attr_mapping30.attr,
	&dev_attr_mapping31.attr,
	NULL,
};

struct attribute_group nd_mapping_attribute_group = {
	.is_visible = mapping_visible,
	.attrs = mapping_attributes,
};
EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);

678
int nd_blk_region_init(struct nd_region *nd_region)
679
{
680 681 682 683 684 685 686 687 688 689 690 691
	struct device *dev = &nd_region->dev;
	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);

	if (!is_nd_blk(dev))
		return 0;

	if (nd_region->ndr_mappings < 1) {
		dev_err(dev, "invalid BLK region\n");
		return -ENXIO;
	}

	return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
692 693
}

V
Vishal Verma 已提交
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
/**
 * nd_region_acquire_lane - allocate and lock a lane
 * @nd_region: region id and number of lanes possible
 *
 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
 * We optimize for the common case where there are 256 lanes, one
 * per-cpu.  For larger systems we need to lock to share lanes.  For now
 * this implementation assumes the cost of maintaining an allocator for
 * free lanes is on the order of the lock hold time, so it implements a
 * static lane = cpu % num_lanes mapping.
 *
 * In the case of a BTT instance on top of a BLK namespace a lane may be
 * acquired recursively.  We lock on the first instance.
 *
 * In the case of a BTT instance on top of PMEM, we only acquire a lane
 * for the BTT metadata updates.
 */
unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
{
	unsigned int cpu, lane;

	cpu = get_cpu();
	if (nd_region->num_lanes < nr_cpu_ids) {
		struct nd_percpu_lane *ndl_lock, *ndl_count;

		lane = cpu % nd_region->num_lanes;
		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
		if (ndl_count->count++ == 0)
			spin_lock(&ndl_lock->lock);
	} else
		lane = cpu;

	return lane;
}
EXPORT_SYMBOL(nd_region_acquire_lane);

void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
{
	if (nd_region->num_lanes < nr_cpu_ids) {
		unsigned int cpu = get_cpu();
		struct nd_percpu_lane *ndl_lock, *ndl_count;

		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
		if (--ndl_count->count == 0)
			spin_unlock(&ndl_lock->lock);
		put_cpu();
	}
	put_cpu();
}
EXPORT_SYMBOL(nd_region_release_lane);

747 748 749 750 751 752
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc, struct device_type *dev_type,
		const char *caller)
{
	struct nd_region *nd_region;
	struct device *dev;
753
	void *region_buf;
V
Vishal Verma 已提交
754
	unsigned int i;
755
	int ro = 0;
756 757 758 759 760 761 762 763 764 765 766

	for (i = 0; i < ndr_desc->num_mappings; i++) {
		struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;

		if ((nd_mapping->start | nd_mapping->size) % SZ_4K) {
			dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
					caller, dev_name(&nvdimm->dev), i);

			return NULL;
		}
767 768 769

		if (nvdimm->flags & NDD_UNARMED)
			ro = 1;
770 771
	}

772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
	if (dev_type == &nd_blk_device_type) {
		struct nd_blk_region_desc *ndbr_desc;
		struct nd_blk_region *ndbr;

		ndbr_desc = to_blk_region_desc(ndr_desc);
		ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
				* ndr_desc->num_mappings,
				GFP_KERNEL);
		if (ndbr) {
			nd_region = &ndbr->nd_region;
			ndbr->enable = ndbr_desc->enable;
			ndbr->do_io = ndbr_desc->do_io;
		}
		region_buf = ndbr;
	} else {
		nd_region = kzalloc(sizeof(struct nd_region)
				+ sizeof(struct nd_mapping)
				* ndr_desc->num_mappings,
				GFP_KERNEL);
		region_buf = nd_region;
	}

	if (!region_buf)
795 796
		return NULL;
	nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
V
Vishal Verma 已提交
797 798 799 800 801 802 803 804 805 806 807 808 809
	if (nd_region->id < 0)
		goto err_id;

	nd_region->lane = alloc_percpu(struct nd_percpu_lane);
	if (!nd_region->lane)
		goto err_percpu;

        for (i = 0; i < nr_cpu_ids; i++) {
		struct nd_percpu_lane *ndl;

		ndl = per_cpu_ptr(nd_region->lane, i);
		spin_lock_init(&ndl->lock);
		ndl->count = 0;
810 811 812 813 814 815 816 817 818 819 820 821
	}

	memcpy(nd_region->mapping, ndr_desc->nd_mapping,
			sizeof(struct nd_mapping) * ndr_desc->num_mappings);
	for (i = 0; i < ndr_desc->num_mappings; i++) {
		struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;

		get_device(&nvdimm->dev);
	}
	nd_region->ndr_mappings = ndr_desc->num_mappings;
	nd_region->provider_data = ndr_desc->provider_data;
822
	nd_region->nd_set = ndr_desc->nd_set;
V
Vishal Verma 已提交
823
	nd_region->num_lanes = ndr_desc->num_lanes;
824
	nd_region->flags = ndr_desc->flags;
825
	nd_region->ro = ro;
826
	nd_region->numa_node = ndr_desc->numa_node;
827
	ida_init(&nd_region->ns_ida);
828
	ida_init(&nd_region->btt_ida);
829
	ida_init(&nd_region->pfn_ida);
830
	ida_init(&nd_region->dax_ida);
831 832 833 834 835 836 837 838 839 840
	dev = &nd_region->dev;
	dev_set_name(dev, "region%d", nd_region->id);
	dev->parent = &nvdimm_bus->dev;
	dev->type = dev_type;
	dev->groups = ndr_desc->attr_groups;
	nd_region->ndr_size = resource_size(ndr_desc->res);
	nd_region->ndr_start = ndr_desc->res->start;
	nd_device_register(dev);

	return nd_region;
V
Vishal Verma 已提交
841 842 843 844

 err_percpu:
	ida_simple_remove(&region_ida, nd_region->id);
 err_id:
845
	kfree(region_buf);
V
Vishal Verma 已提交
846
	return NULL;
847 848 849 850 851
}

struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc)
{
V
Vishal Verma 已提交
852
	ndr_desc->num_lanes = ND_MAX_LANES;
853 854 855 856 857 858 859 860 861 862
	return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
			__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);

struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc)
{
	if (ndr_desc->num_mappings > 1)
		return NULL;
V
Vishal Verma 已提交
863
	ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
864 865 866 867 868 869 870 871
	return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
			__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);

struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc)
{
V
Vishal Verma 已提交
872
	ndr_desc->num_lanes = ND_MAX_LANES;
873 874 875 876
	return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
			__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
D
Dan Williams 已提交
877

878 879 880 881 882 883 884
/**
 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
 * @nd_region: blk or interleaved pmem region
 */
void nvdimm_flush(struct nd_region *nd_region)
{
	struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
D
Dan Williams 已提交
885 886 887 888 889 890 891 892
	int i, idx;

	/*
	 * Try to encourage some diversity in flush hint addresses
	 * across cpus assuming a limited number of flush hints.
	 */
	idx = this_cpu_read(flush_idx);
	idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
893 894 895 896 897 898 899 900 901 902 903

	/*
	 * The first wmb() is needed to 'sfence' all previous writes
	 * such that they are architecturally visible for the platform
	 * buffer flush.  Note that we've already arranged for pmem
	 * writes to avoid the cache via arch_memcpy_to_pmem().  The
	 * final wmb() ensures ordering for the NVDIMM flush write.
	 */
	wmb();
	for (i = 0; i < nd_region->ndr_mappings; i++)
		if (ndrd->flush_wpq[i][0])
D
Dan Williams 已提交
904
			writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]);
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
	wmb();
}
EXPORT_SYMBOL_GPL(nvdimm_flush);

/**
 * nvdimm_has_flush - determine write flushing requirements
 * @nd_region: blk or interleaved pmem region
 *
 * Returns 1 if writes require flushing
 * Returns 0 if writes do not require flushing
 * Returns -ENXIO if flushing capability can not be determined
 */
int nvdimm_has_flush(struct nd_region *nd_region)
{
	struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
	int i;

	/* no nvdimm == flushing capability unknown */
	if (nd_region->ndr_mappings == 0)
		return -ENXIO;

	for (i = 0; i < nd_region->ndr_mappings; i++)
		/* flush hints present, flushing required */
		if (ndrd->flush_wpq[i][0])
			return 1;

	/*
	 * The platform defines dimm devices without hints, assume
	 * platform persistence mechanism like ADR
	 */
	return 0;
}
EXPORT_SYMBOL_GPL(nvdimm_has_flush);

D
Dan Williams 已提交
939 940 941 942
void __exit nd_region_devs_exit(void)
{
	ida_destroy(&region_ida);
}