region_devs.c 27.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */
13
#include <linux/scatterlist.h>
14
#include <linux/highmem.h>
15
#include <linux/sched.h>
16
#include <linux/slab.h>
D
Dan Williams 已提交
17
#include <linux/hash.h>
18
#include <linux/sort.h>
19
#include <linux/io.h>
20
#include <linux/nd.h>
21 22 23
#include "nd-core.h"
#include "nd.h"

24 25 26 27 28 29
/*
 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
 * irrelevant.
 */
#include <linux/io-64-nonatomic-hi-lo.h>

30
static DEFINE_IDA(region_ida);
D
Dan Williams 已提交
31
static DEFINE_PER_CPU(int, flush_idx);
32

33 34 35 36 37 38 39
static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
		struct nd_region_data *ndrd)
{
	int i, j;

	dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
			nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
40
	for (i = 0; i < (1 << ndrd->hints_shift); i++) {
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
		struct resource *res = &nvdimm->flush_wpq[i];
		unsigned long pfn = PHYS_PFN(res->start);
		void __iomem *flush_page;

		/* check if flush hints share a page */
		for (j = 0; j < i; j++) {
			struct resource *res_j = &nvdimm->flush_wpq[j];
			unsigned long pfn_j = PHYS_PFN(res_j->start);

			if (pfn == pfn_j)
				break;
		}

		if (j < i)
			flush_page = (void __iomem *) ((unsigned long)
56 57
					ndrd_get_flush_wpq(ndrd, dimm, j)
					& PAGE_MASK);
58 59
		else
			flush_page = devm_nvdimm_ioremap(dev,
60
					PFN_PHYS(pfn), PAGE_SIZE);
61 62
		if (!flush_page)
			return -ENXIO;
63 64
		ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
				+ (res->start & ~PAGE_MASK));
65 66 67 68 69 70 71
	}

	return 0;
}

int nd_region_activate(struct nd_region *nd_region)
{
72
	int i, j, num_flush = 0;
73 74 75 76 77 78 79 80 81 82 83
	struct nd_region_data *ndrd;
	struct device *dev = &nd_region->dev;
	size_t flush_data_size = sizeof(void *);

	nvdimm_bus_lock(&nd_region->dev);
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;

		/* at least one null hint slot per-dimm for the "no-hint" case */
		flush_data_size += sizeof(void *);
D
Dan Williams 已提交
84
		num_flush = min_not_zero(num_flush, nvdimm->num_flush);
85 86 87 88 89 90 91 92 93 94 95
		if (!nvdimm->num_flush)
			continue;
		flush_data_size += nvdimm->num_flush * sizeof(void *);
	}
	nvdimm_bus_unlock(&nd_region->dev);

	ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
	if (!ndrd)
		return -ENOMEM;
	dev_set_drvdata(dev, ndrd);

96 97 98 99
	if (!num_flush)
		return 0;

	ndrd->hints_shift = ilog2(num_flush);
100 101 102 103 104 105 106 107 108
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;
		int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);

		if (rc)
			return rc;
	}

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	/*
	 * Clear out entries that are duplicates. This should prevent the
	 * extra flushings.
	 */
	for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
		/* ignore if NULL already */
		if (!ndrd_get_flush_wpq(ndrd, i, 0))
			continue;

		for (j = i + 1; j < nd_region->ndr_mappings; j++)
			if (ndrd_get_flush_wpq(ndrd, i, 0) ==
			    ndrd_get_flush_wpq(ndrd, j, 0))
				ndrd_set_flush_wpq(ndrd, j, 0, NULL);
	}

124 125 126
	return 0;
}

127 128 129 130 131 132 133 134 135 136 137
static void nd_region_release(struct device *dev)
{
	struct nd_region *nd_region = to_nd_region(dev);
	u16 i;

	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;

		put_device(&nvdimm->dev);
	}
V
Vishal Verma 已提交
138
	free_percpu(nd_region->lane);
139
	ida_simple_remove(&region_ida, nd_region->id);
140 141 142 143
	if (is_nd_blk(dev))
		kfree(to_nd_blk_region(dev));
	else
		kfree(nd_region);
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
}

static struct device_type nd_blk_device_type = {
	.name = "nd_blk",
	.release = nd_region_release,
};

static struct device_type nd_pmem_device_type = {
	.name = "nd_pmem",
	.release = nd_region_release,
};

static struct device_type nd_volatile_device_type = {
	.name = "nd_volatile",
	.release = nd_region_release,
};

161
bool is_nd_pmem(struct device *dev)
162 163 164 165
{
	return dev ? dev->type == &nd_pmem_device_type : false;
}

166 167 168 169 170
bool is_nd_blk(struct device *dev)
{
	return dev ? dev->type == &nd_blk_device_type : false;
}

171 172 173 174 175 176 177 178 179
struct nd_region *to_nd_region(struct device *dev)
{
	struct nd_region *nd_region = container_of(dev, struct nd_region, dev);

	WARN_ON(dev->type->release != nd_region_release);
	return nd_region;
}
EXPORT_SYMBOL_GPL(to_nd_region);

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
struct nd_blk_region *to_nd_blk_region(struct device *dev)
{
	struct nd_region *nd_region = to_nd_region(dev);

	WARN_ON(!is_nd_blk(dev));
	return container_of(nd_region, struct nd_blk_region, nd_region);
}
EXPORT_SYMBOL_GPL(to_nd_blk_region);

void *nd_region_provider_data(struct nd_region *nd_region)
{
	return nd_region->provider_data;
}
EXPORT_SYMBOL_GPL(nd_region_provider_data);

void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
{
	return ndbr->blk_provider_data;
}
EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);

void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
{
	ndbr->blk_provider_data = data;
}
EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
/**
 * nd_region_to_nstype() - region to an integer namespace type
 * @nd_region: region-device to interrogate
 *
 * This is the 'nstype' attribute of a region as well, an input to the
 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
 * namespace devices with namespace drivers.
 */
int nd_region_to_nstype(struct nd_region *nd_region)
{
	if (is_nd_pmem(&nd_region->dev)) {
		u16 i, alias;

		for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
			struct nvdimm *nvdimm = nd_mapping->nvdimm;

224
			if (test_bit(NDD_ALIASING, &nvdimm->flags))
225 226 227 228 229 230 231 232 233 234 235 236
				alias++;
		}
		if (alias)
			return ND_DEVICE_NAMESPACE_PMEM;
		else
			return ND_DEVICE_NAMESPACE_IO;
	} else if (is_nd_blk(&nd_region->dev)) {
		return ND_DEVICE_NAMESPACE_BLK;
	}

	return 0;
}
237 238
EXPORT_SYMBOL(nd_region_to_nstype);

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
static ssize_t size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	unsigned long long size = 0;

	if (is_nd_pmem(dev)) {
		size = nd_region->ndr_size;
	} else if (nd_region->ndr_mappings == 1) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[0];

		size = nd_mapping->size;
	}

	return sprintf(buf, "%llu\n", size);
}
static DEVICE_ATTR_RO(size);

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
static ssize_t deep_flush_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	/*
	 * NOTE: in the nvdimm_has_flush() error case this attribute is
	 * not visible.
	 */
	return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
}

static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
		const char *buf, size_t len)
{
	bool flush;
	int rc = strtobool(buf, &flush);
	struct nd_region *nd_region = to_nd_region(dev);

	if (rc)
		return rc;
	if (!flush)
		return -EINVAL;
	nvdimm_flush(nd_region);

	return len;
}
static DEVICE_ATTR_RW(deep_flush);

286 287 288 289 290 291 292 293 294
static ssize_t mappings_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return sprintf(buf, "%d\n", nd_region->ndr_mappings);
}
static DEVICE_ATTR_RO(mappings);

295 296 297 298 299 300 301 302 303
static ssize_t nstype_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
}
static DEVICE_ATTR_RO(nstype);

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
static ssize_t set_cookie_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	struct nd_interleave_set *nd_set = nd_region->nd_set;

	if (is_nd_pmem(dev) && nd_set)
		/* pass, should be precluded by region_visible */;
	else
		return -ENXIO;

	return sprintf(buf, "%#llx\n", nd_set->cookie);
}
static DEVICE_ATTR_RO(set_cookie);

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
{
	resource_size_t blk_max_overlap = 0, available, overlap;
	int i;

	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));

 retry:
	available = 0;
	overlap = blk_max_overlap;
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);

		/* if a dimm is disabled the available capacity is zero */
		if (!ndd)
			return 0;

		if (is_nd_pmem(&nd_region->dev)) {
			available += nd_pmem_available_dpa(nd_region,
					nd_mapping, &overlap);
			if (overlap > blk_max_overlap) {
				blk_max_overlap = overlap;
				goto retry;
			}
344 345
		} else if (is_nd_blk(&nd_region->dev))
			available += nd_blk_available_dpa(nd_region);
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
	}

	return available;
}

static ssize_t available_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	unsigned long long available = 0;

	/*
	 * Flush in-flight updates and grab a snapshot of the available
	 * size.  Of course, this value is potentially invalidated the
	 * memory nvdimm_bus_lock() is dropped, but that's userspace's
	 * problem to not race itself.
	 */
	nvdimm_bus_lock(dev);
	wait_nvdimm_bus_probe_idle(dev);
	available = nd_region_available_dpa(nd_region);
	nvdimm_bus_unlock(dev);

	return sprintf(buf, "%llu\n", available);
}
static DEVICE_ATTR_RO(available_size);

372 373 374
static ssize_t init_namespaces_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
375
	struct nd_region_data *ndrd = dev_get_drvdata(dev);
376 377 378
	ssize_t rc;

	nvdimm_bus_lock(dev);
379 380
	if (ndrd)
		rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
381 382 383 384 385 386 387 388
	else
		rc = -ENXIO;
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(init_namespaces);

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
static ssize_t namespace_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->ns_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);
	return rc;
}
static DEVICE_ATTR_RO(namespace_seed);

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
static ssize_t btt_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->btt_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(btt_seed);

422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
static ssize_t pfn_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->pfn_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(pfn_seed);

439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
static ssize_t dax_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->dax_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(dax_seed);

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
static ssize_t read_only_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return sprintf(buf, "%d\n", nd_region->ro);
}

static ssize_t read_only_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	bool ro;
	int rc = strtobool(buf, &ro);
	struct nd_region *nd_region = to_nd_region(dev);

	if (rc)
		return rc;

	nd_region->ro = ro;
	return len;
}
static DEVICE_ATTR_RW(read_only);

479
static ssize_t region_badblocks_show(struct device *dev,
480 481 482 483 484 485
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return badblocks_show(&nd_region->bb, buf, 0);
}
486 487

static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
488

489 490 491 492 493 494 495 496 497
static ssize_t resource_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return sprintf(buf, "%#llx\n", nd_region->ndr_start);
}
static DEVICE_ATTR_RO(resource);

498 499
static struct attribute *nd_region_attributes[] = {
	&dev_attr_size.attr,
500
	&dev_attr_nstype.attr,
501
	&dev_attr_mappings.attr,
502
	&dev_attr_btt_seed.attr,
503
	&dev_attr_pfn_seed.attr,
504
	&dev_attr_dax_seed.attr,
505
	&dev_attr_deep_flush.attr,
506
	&dev_attr_read_only.attr,
507
	&dev_attr_set_cookie.attr,
508 509
	&dev_attr_available_size.attr,
	&dev_attr_namespace_seed.attr,
510
	&dev_attr_init_namespaces.attr,
511
	&dev_attr_badblocks.attr,
512
	&dev_attr_resource.attr,
513 514 515
	NULL,
};

516 517 518 519 520
static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, typeof(*dev), kobj);
	struct nd_region *nd_region = to_nd_region(dev);
	struct nd_interleave_set *nd_set = nd_region->nd_set;
521
	int type = nd_region_to_nstype(nd_region);
522

523 524 525
	if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
		return 0;

526 527 528
	if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
		return 0;

529
	if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
530 531
		return 0;

532 533 534
	if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
		return 0;

535 536 537 538 539 540 541 542 543 544 545
	if (a == &dev_attr_deep_flush.attr) {
		int has_flush = nvdimm_has_flush(nd_region);

		if (has_flush == 1)
			return a->mode;
		else if (has_flush == 0)
			return 0444;
		else
			return 0;
	}

546 547
	if (a != &dev_attr_set_cookie.attr
			&& a != &dev_attr_available_size.attr)
548 549
		return a->mode;

550 551 552 553 554 555
	if ((type == ND_DEVICE_NAMESPACE_PMEM
				|| type == ND_DEVICE_NAMESPACE_BLK)
			&& a == &dev_attr_available_size.attr)
		return a->mode;
	else if (is_nd_pmem(dev) && nd_set)
		return a->mode;
556 557 558 559

	return 0;
}

560 561
struct attribute_group nd_region_attribute_group = {
	.attrs = nd_region_attributes,
562
	.is_visible = region_visible,
563 564 565
};
EXPORT_SYMBOL_GPL(nd_region_attribute_group);

566 567 568 569 570 571 572 573 574
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
{
	struct nd_interleave_set *nd_set = nd_region->nd_set;

	if (nd_set)
		return nd_set->cookie;
	return 0;
}

575 576 577 578 579 580 581 582 583
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
{
	struct nd_interleave_set *nd_set = nd_region->nd_set;

	if (nd_set)
		return nd_set->altcookie;
	return 0;
}

584 585 586 587
void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
{
	struct nd_label_ent *label_ent, *e;

588
	lockdep_assert_held(&nd_mapping->lock);
589 590 591 592 593 594
	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
		list_del(&label_ent->list);
		kfree(label_ent);
	}
}

595 596
/*
 * Upon successful probe/remove, take/release a reference on the
597
 * associated interleave set (if present), and plant new btt + namespace
598 599
 * seeds.  Also, on the removal of a BLK region, notify the provider to
 * disable the region.
600 601 602 603
 */
static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
		struct device *dev, bool probe)
{
604 605
	struct nd_region *nd_region;

606
	if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
607 608
		int i;

609
		nd_region = to_nd_region(dev);
610 611
		for (i = 0; i < nd_region->ndr_mappings; i++) {
			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
612
			struct nvdimm_drvdata *ndd = nd_mapping->ndd;
613 614
			struct nvdimm *nvdimm = nd_mapping->nvdimm;

615 616 617 618
			mutex_lock(&nd_mapping->lock);
			nd_mapping_free_labels(nd_mapping);
			mutex_unlock(&nd_mapping->lock);

619 620
			put_ndd(ndd);
			nd_mapping->ndd = NULL;
621 622
			if (ndd)
				atomic_dec(&nvdimm->busy);
623
		}
624 625 626

		if (is_nd_pmem(dev))
			return;
627
	}
628 629
	if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent))
			&& probe) {
630
		nd_region = to_nd_region(dev->parent);
631 632
		nvdimm_bus_lock(dev);
		if (nd_region->ns_seed == dev)
633
			nd_region_create_ns_seed(nd_region);
634
		nvdimm_bus_unlock(dev);
635
	}
636
	if (is_nd_btt(dev) && probe) {
637 638
		struct nd_btt *nd_btt = to_nd_btt(dev);

639 640 641 642
		nd_region = to_nd_region(dev->parent);
		nvdimm_bus_lock(dev);
		if (nd_region->btt_seed == dev)
			nd_region_create_btt_seed(nd_region);
643 644
		if (nd_region->ns_seed == &nd_btt->ndns->dev)
			nd_region_create_ns_seed(nd_region);
645 646
		nvdimm_bus_unlock(dev);
	}
647
	if (is_nd_pfn(dev) && probe) {
648 649
		struct nd_pfn *nd_pfn = to_nd_pfn(dev);

650 651 652 653
		nd_region = to_nd_region(dev->parent);
		nvdimm_bus_lock(dev);
		if (nd_region->pfn_seed == dev)
			nd_region_create_pfn_seed(nd_region);
654 655
		if (nd_region->ns_seed == &nd_pfn->ndns->dev)
			nd_region_create_ns_seed(nd_region);
656 657
		nvdimm_bus_unlock(dev);
	}
658
	if (is_nd_dax(dev) && probe) {
659 660
		struct nd_dax *nd_dax = to_nd_dax(dev);

661 662 663 664
		nd_region = to_nd_region(dev->parent);
		nvdimm_bus_lock(dev);
		if (nd_region->dax_seed == dev)
			nd_region_create_dax_seed(nd_region);
665 666
		if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
			nd_region_create_ns_seed(nd_region);
667 668
		nvdimm_bus_unlock(dev);
	}
669 670 671 672 673 674 675 676 677 678 679 680
}

void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
{
	nd_region_notify_driver_action(nvdimm_bus, dev, true);
}

void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
{
	nd_region_notify_driver_action(nvdimm_bus, dev, false);
}

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
static ssize_t mappingN(struct device *dev, char *buf, int n)
{
	struct nd_region *nd_region = to_nd_region(dev);
	struct nd_mapping *nd_mapping;
	struct nvdimm *nvdimm;

	if (n >= nd_region->ndr_mappings)
		return -ENXIO;
	nd_mapping = &nd_region->mapping[n];
	nvdimm = nd_mapping->nvdimm;

	return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
			nd_mapping->start, nd_mapping->size);
}

#define REGION_MAPPING(idx) \
static ssize_t mapping##idx##_show(struct device *dev,		\
		struct device_attribute *attr, char *buf)	\
{								\
	return mappingN(dev, buf, idx);				\
}								\
static DEVICE_ATTR_RO(mapping##idx)

/*
 * 32 should be enough for a while, even in the presence of socket
 * interleave a 32-way interleave set is a degenerate case.
 */
REGION_MAPPING(0);
REGION_MAPPING(1);
REGION_MAPPING(2);
REGION_MAPPING(3);
REGION_MAPPING(4);
REGION_MAPPING(5);
REGION_MAPPING(6);
REGION_MAPPING(7);
REGION_MAPPING(8);
REGION_MAPPING(9);
REGION_MAPPING(10);
REGION_MAPPING(11);
REGION_MAPPING(12);
REGION_MAPPING(13);
REGION_MAPPING(14);
REGION_MAPPING(15);
REGION_MAPPING(16);
REGION_MAPPING(17);
REGION_MAPPING(18);
REGION_MAPPING(19);
REGION_MAPPING(20);
REGION_MAPPING(21);
REGION_MAPPING(22);
REGION_MAPPING(23);
REGION_MAPPING(24);
REGION_MAPPING(25);
REGION_MAPPING(26);
REGION_MAPPING(27);
REGION_MAPPING(28);
REGION_MAPPING(29);
REGION_MAPPING(30);
REGION_MAPPING(31);

static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
	struct nd_region *nd_region = to_nd_region(dev);

	if (n < nd_region->ndr_mappings)
		return a->mode;
	return 0;
}

static struct attribute *mapping_attributes[] = {
	&dev_attr_mapping0.attr,
	&dev_attr_mapping1.attr,
	&dev_attr_mapping2.attr,
	&dev_attr_mapping3.attr,
	&dev_attr_mapping4.attr,
	&dev_attr_mapping5.attr,
	&dev_attr_mapping6.attr,
	&dev_attr_mapping7.attr,
	&dev_attr_mapping8.attr,
	&dev_attr_mapping9.attr,
	&dev_attr_mapping10.attr,
	&dev_attr_mapping11.attr,
	&dev_attr_mapping12.attr,
	&dev_attr_mapping13.attr,
	&dev_attr_mapping14.attr,
	&dev_attr_mapping15.attr,
	&dev_attr_mapping16.attr,
	&dev_attr_mapping17.attr,
	&dev_attr_mapping18.attr,
	&dev_attr_mapping19.attr,
	&dev_attr_mapping20.attr,
	&dev_attr_mapping21.attr,
	&dev_attr_mapping22.attr,
	&dev_attr_mapping23.attr,
	&dev_attr_mapping24.attr,
	&dev_attr_mapping25.attr,
	&dev_attr_mapping26.attr,
	&dev_attr_mapping27.attr,
	&dev_attr_mapping28.attr,
	&dev_attr_mapping29.attr,
	&dev_attr_mapping30.attr,
	&dev_attr_mapping31.attr,
	NULL,
};

struct attribute_group nd_mapping_attribute_group = {
	.is_visible = mapping_visible,
	.attrs = mapping_attributes,
};
EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);

793
int nd_blk_region_init(struct nd_region *nd_region)
794
{
795 796 797 798 799 800 801 802 803 804 805 806
	struct device *dev = &nd_region->dev;
	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);

	if (!is_nd_blk(dev))
		return 0;

	if (nd_region->ndr_mappings < 1) {
		dev_err(dev, "invalid BLK region\n");
		return -ENXIO;
	}

	return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
807 808
}

V
Vishal Verma 已提交
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
/**
 * nd_region_acquire_lane - allocate and lock a lane
 * @nd_region: region id and number of lanes possible
 *
 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
 * We optimize for the common case where there are 256 lanes, one
 * per-cpu.  For larger systems we need to lock to share lanes.  For now
 * this implementation assumes the cost of maintaining an allocator for
 * free lanes is on the order of the lock hold time, so it implements a
 * static lane = cpu % num_lanes mapping.
 *
 * In the case of a BTT instance on top of a BLK namespace a lane may be
 * acquired recursively.  We lock on the first instance.
 *
 * In the case of a BTT instance on top of PMEM, we only acquire a lane
 * for the BTT metadata updates.
 */
unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
{
	unsigned int cpu, lane;

	cpu = get_cpu();
	if (nd_region->num_lanes < nr_cpu_ids) {
		struct nd_percpu_lane *ndl_lock, *ndl_count;

		lane = cpu % nd_region->num_lanes;
		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
		if (ndl_count->count++ == 0)
			spin_lock(&ndl_lock->lock);
	} else
		lane = cpu;

	return lane;
}
EXPORT_SYMBOL(nd_region_acquire_lane);

void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
{
	if (nd_region->num_lanes < nr_cpu_ids) {
		unsigned int cpu = get_cpu();
		struct nd_percpu_lane *ndl_lock, *ndl_count;

		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
		if (--ndl_count->count == 0)
			spin_unlock(&ndl_lock->lock);
		put_cpu();
	}
	put_cpu();
}
EXPORT_SYMBOL(nd_region_release_lane);

862 863 864 865 866 867
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc, struct device_type *dev_type,
		const char *caller)
{
	struct nd_region *nd_region;
	struct device *dev;
868
	void *region_buf;
V
Vishal Verma 已提交
869
	unsigned int i;
870
	int ro = 0;
871 872

	for (i = 0; i < ndr_desc->num_mappings; i++) {
873 874
		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
		struct nvdimm *nvdimm = mapping->nvdimm;
875

876
		if ((mapping->start | mapping->size) % SZ_4K) {
877 878 879 880 881
			dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
					caller, dev_name(&nvdimm->dev), i);

			return NULL;
		}
882

883
		if (test_bit(NDD_UNARMED, &nvdimm->flags))
884
			ro = 1;
885 886
	}

887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
	if (dev_type == &nd_blk_device_type) {
		struct nd_blk_region_desc *ndbr_desc;
		struct nd_blk_region *ndbr;

		ndbr_desc = to_blk_region_desc(ndr_desc);
		ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
				* ndr_desc->num_mappings,
				GFP_KERNEL);
		if (ndbr) {
			nd_region = &ndbr->nd_region;
			ndbr->enable = ndbr_desc->enable;
			ndbr->do_io = ndbr_desc->do_io;
		}
		region_buf = ndbr;
	} else {
		nd_region = kzalloc(sizeof(struct nd_region)
				+ sizeof(struct nd_mapping)
				* ndr_desc->num_mappings,
				GFP_KERNEL);
		region_buf = nd_region;
	}

	if (!region_buf)
910 911
		return NULL;
	nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
V
Vishal Verma 已提交
912 913 914 915 916 917 918 919 920 921 922 923 924
	if (nd_region->id < 0)
		goto err_id;

	nd_region->lane = alloc_percpu(struct nd_percpu_lane);
	if (!nd_region->lane)
		goto err_percpu;

        for (i = 0; i < nr_cpu_ids; i++) {
		struct nd_percpu_lane *ndl;

		ndl = per_cpu_ptr(nd_region->lane, i);
		spin_lock_init(&ndl->lock);
		ndl->count = 0;
925 926 927
	}

	for (i = 0; i < ndr_desc->num_mappings; i++) {
928 929 930 931 932 933
		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
		struct nvdimm *nvdimm = mapping->nvdimm;

		nd_region->mapping[i].nvdimm = nvdimm;
		nd_region->mapping[i].start = mapping->start;
		nd_region->mapping[i].size = mapping->size;
934 935
		INIT_LIST_HEAD(&nd_region->mapping[i].labels);
		mutex_init(&nd_region->mapping[i].lock);
936 937 938 939 940

		get_device(&nvdimm->dev);
	}
	nd_region->ndr_mappings = ndr_desc->num_mappings;
	nd_region->provider_data = ndr_desc->provider_data;
941
	nd_region->nd_set = ndr_desc->nd_set;
V
Vishal Verma 已提交
942
	nd_region->num_lanes = ndr_desc->num_lanes;
943
	nd_region->flags = ndr_desc->flags;
944
	nd_region->ro = ro;
945
	nd_region->numa_node = ndr_desc->numa_node;
946
	ida_init(&nd_region->ns_ida);
947
	ida_init(&nd_region->btt_ida);
948
	ida_init(&nd_region->pfn_ida);
949
	ida_init(&nd_region->dax_ida);
950 951 952 953 954 955 956 957 958 959
	dev = &nd_region->dev;
	dev_set_name(dev, "region%d", nd_region->id);
	dev->parent = &nvdimm_bus->dev;
	dev->type = dev_type;
	dev->groups = ndr_desc->attr_groups;
	nd_region->ndr_size = resource_size(ndr_desc->res);
	nd_region->ndr_start = ndr_desc->res->start;
	nd_device_register(dev);

	return nd_region;
V
Vishal Verma 已提交
960 961 962 963

 err_percpu:
	ida_simple_remove(&region_ida, nd_region->id);
 err_id:
964
	kfree(region_buf);
V
Vishal Verma 已提交
965
	return NULL;
966 967 968 969 970
}

struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc)
{
V
Vishal Verma 已提交
971
	ndr_desc->num_lanes = ND_MAX_LANES;
972 973 974 975 976 977 978 979 980 981
	return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
			__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);

struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc)
{
	if (ndr_desc->num_mappings > 1)
		return NULL;
V
Vishal Verma 已提交
982
	ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
983 984 985 986 987 988 989 990
	return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
			__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);

struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc)
{
V
Vishal Verma 已提交
991
	ndr_desc->num_lanes = ND_MAX_LANES;
992 993 994 995
	return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
			__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
D
Dan Williams 已提交
996

997 998 999 1000 1001 1002 1003
/**
 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
 * @nd_region: blk or interleaved pmem region
 */
void nvdimm_flush(struct nd_region *nd_region)
{
	struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
D
Dan Williams 已提交
1004 1005 1006 1007 1008 1009 1010 1011
	int i, idx;

	/*
	 * Try to encourage some diversity in flush hint addresses
	 * across cpus assuming a limited number of flush hints.
	 */
	idx = this_cpu_read(flush_idx);
	idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
1012 1013 1014 1015 1016

	/*
	 * The first wmb() is needed to 'sfence' all previous writes
	 * such that they are architecturally visible for the platform
	 * buffer flush.  Note that we've already arranged for pmem
1017 1018
	 * writes to avoid the cache via memcpy_flushcache().  The final
	 * wmb() ensures ordering for the NVDIMM flush write.
1019 1020 1021
	 */
	wmb();
	for (i = 0; i < nd_region->ndr_mappings; i++)
1022 1023
		if (ndrd_get_flush_wpq(ndrd, i, 0))
			writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
	wmb();
}
EXPORT_SYMBOL_GPL(nvdimm_flush);

/**
 * nvdimm_has_flush - determine write flushing requirements
 * @nd_region: blk or interleaved pmem region
 *
 * Returns 1 if writes require flushing
 * Returns 0 if writes do not require flushing
 * Returns -ENXIO if flushing capability can not be determined
 */
int nvdimm_has_flush(struct nd_region *nd_region)
{
	int i;

	/* no nvdimm == flushing capability unknown */
	if (nd_region->ndr_mappings == 0)
		return -ENXIO;

1044 1045 1046 1047 1048 1049
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;

		/* flush hints present / available */
		if (nvdimm->num_flush)
1050
			return 1;
1051
	}
1052 1053 1054 1055 1056 1057 1058 1059 1060

	/*
	 * The platform defines dimm devices without hints, assume
	 * platform persistence mechanism like ADR
	 */
	return 0;
}
EXPORT_SYMBOL_GPL(nvdimm_has_flush);

D
Dan Williams 已提交
1061 1062 1063 1064
void __exit nd_region_devs_exit(void)
{
	ida_destroy(&region_ida);
}