region_devs.c 31.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */
13
#include <linux/scatterlist.h>
14
#include <linux/highmem.h>
15
#include <linux/sched.h>
16
#include <linux/slab.h>
D
Dan Williams 已提交
17
#include <linux/hash.h>
18
#include <linux/sort.h>
19
#include <linux/io.h>
20
#include <linux/nd.h>
21 22 23
#include "nd-core.h"
#include "nd.h"

24 25 26 27 28 29
/*
 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
 * irrelevant.
 */
#include <linux/io-64-nonatomic-hi-lo.h>

30
static DEFINE_IDA(region_ida);
D
Dan Williams 已提交
31
static DEFINE_PER_CPU(int, flush_idx);
32

33 34 35 36 37 38 39
static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
		struct nd_region_data *ndrd)
{
	int i, j;

	dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
			nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
40
	for (i = 0; i < (1 << ndrd->hints_shift); i++) {
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
		struct resource *res = &nvdimm->flush_wpq[i];
		unsigned long pfn = PHYS_PFN(res->start);
		void __iomem *flush_page;

		/* check if flush hints share a page */
		for (j = 0; j < i; j++) {
			struct resource *res_j = &nvdimm->flush_wpq[j];
			unsigned long pfn_j = PHYS_PFN(res_j->start);

			if (pfn == pfn_j)
				break;
		}

		if (j < i)
			flush_page = (void __iomem *) ((unsigned long)
56 57
					ndrd_get_flush_wpq(ndrd, dimm, j)
					& PAGE_MASK);
58 59
		else
			flush_page = devm_nvdimm_ioremap(dev,
60
					PFN_PHYS(pfn), PAGE_SIZE);
61 62
		if (!flush_page)
			return -ENXIO;
63 64
		ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
				+ (res->start & ~PAGE_MASK));
65 66 67 68 69 70 71
	}

	return 0;
}

int nd_region_activate(struct nd_region *nd_region)
{
72
	int i, j, num_flush = 0;
73 74 75 76 77 78 79 80 81 82 83
	struct nd_region_data *ndrd;
	struct device *dev = &nd_region->dev;
	size_t flush_data_size = sizeof(void *);

	nvdimm_bus_lock(&nd_region->dev);
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;

		/* at least one null hint slot per-dimm for the "no-hint" case */
		flush_data_size += sizeof(void *);
D
Dan Williams 已提交
84
		num_flush = min_not_zero(num_flush, nvdimm->num_flush);
85 86 87 88 89 90 91 92 93 94 95
		if (!nvdimm->num_flush)
			continue;
		flush_data_size += nvdimm->num_flush * sizeof(void *);
	}
	nvdimm_bus_unlock(&nd_region->dev);

	ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
	if (!ndrd)
		return -ENOMEM;
	dev_set_drvdata(dev, ndrd);

96 97 98 99
	if (!num_flush)
		return 0;

	ndrd->hints_shift = ilog2(num_flush);
100 101 102 103 104 105 106 107 108
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;
		int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);

		if (rc)
			return rc;
	}

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	/*
	 * Clear out entries that are duplicates. This should prevent the
	 * extra flushings.
	 */
	for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
		/* ignore if NULL already */
		if (!ndrd_get_flush_wpq(ndrd, i, 0))
			continue;

		for (j = i + 1; j < nd_region->ndr_mappings; j++)
			if (ndrd_get_flush_wpq(ndrd, i, 0) ==
			    ndrd_get_flush_wpq(ndrd, j, 0))
				ndrd_set_flush_wpq(ndrd, j, 0, NULL);
	}

124 125 126
	return 0;
}

127 128 129 130 131 132 133 134 135 136 137
static void nd_region_release(struct device *dev)
{
	struct nd_region *nd_region = to_nd_region(dev);
	u16 i;

	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;

		put_device(&nvdimm->dev);
	}
V
Vishal Verma 已提交
138
	free_percpu(nd_region->lane);
139
	ida_simple_remove(&region_ida, nd_region->id);
140 141 142 143
	if (is_nd_blk(dev))
		kfree(to_nd_blk_region(dev));
	else
		kfree(nd_region);
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
}

static struct device_type nd_blk_device_type = {
	.name = "nd_blk",
	.release = nd_region_release,
};

static struct device_type nd_pmem_device_type = {
	.name = "nd_pmem",
	.release = nd_region_release,
};

static struct device_type nd_volatile_device_type = {
	.name = "nd_volatile",
	.release = nd_region_release,
};

161
bool is_nd_pmem(struct device *dev)
162 163 164 165
{
	return dev ? dev->type == &nd_pmem_device_type : false;
}

166 167 168 169 170
bool is_nd_blk(struct device *dev)
{
	return dev ? dev->type == &nd_blk_device_type : false;
}

171 172 173 174 175
bool is_nd_volatile(struct device *dev)
{
	return dev ? dev->type == &nd_volatile_device_type : false;
}

176 177 178 179 180 181 182 183 184
struct nd_region *to_nd_region(struct device *dev)
{
	struct nd_region *nd_region = container_of(dev, struct nd_region, dev);

	WARN_ON(dev->type->release != nd_region_release);
	return nd_region;
}
EXPORT_SYMBOL_GPL(to_nd_region);

185 186 187 188 189 190 191 192
struct device *nd_region_dev(struct nd_region *nd_region)
{
	if (!nd_region)
		return NULL;
	return &nd_region->dev;
}
EXPORT_SYMBOL_GPL(nd_region_dev);

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
struct nd_blk_region *to_nd_blk_region(struct device *dev)
{
	struct nd_region *nd_region = to_nd_region(dev);

	WARN_ON(!is_nd_blk(dev));
	return container_of(nd_region, struct nd_blk_region, nd_region);
}
EXPORT_SYMBOL_GPL(to_nd_blk_region);

void *nd_region_provider_data(struct nd_region *nd_region)
{
	return nd_region->provider_data;
}
EXPORT_SYMBOL_GPL(nd_region_provider_data);

void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
{
	return ndbr->blk_provider_data;
}
EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);

void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
{
	ndbr->blk_provider_data = data;
}
EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);

220 221 222 223 224 225 226 227 228 229
/**
 * nd_region_to_nstype() - region to an integer namespace type
 * @nd_region: region-device to interrogate
 *
 * This is the 'nstype' attribute of a region as well, an input to the
 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
 * namespace devices with namespace drivers.
 */
int nd_region_to_nstype(struct nd_region *nd_region)
{
230
	if (is_memory(&nd_region->dev)) {
231 232 233 234 235 236
		u16 i, alias;

		for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
			struct nvdimm *nvdimm = nd_mapping->nvdimm;

237
			if (test_bit(NDD_ALIASING, &nvdimm->flags))
238 239 240 241 242 243 244 245 246 247 248 249
				alias++;
		}
		if (alias)
			return ND_DEVICE_NAMESPACE_PMEM;
		else
			return ND_DEVICE_NAMESPACE_IO;
	} else if (is_nd_blk(&nd_region->dev)) {
		return ND_DEVICE_NAMESPACE_BLK;
	}

	return 0;
}
250 251
EXPORT_SYMBOL(nd_region_to_nstype);

252 253 254 255 256 257
static ssize_t size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	unsigned long long size = 0;

258
	if (is_memory(dev)) {
259 260 261 262 263 264 265 266 267 268 269
		size = nd_region->ndr_size;
	} else if (nd_region->ndr_mappings == 1) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[0];

		size = nd_mapping->size;
	}

	return sprintf(buf, "%llu\n", size);
}
static DEVICE_ATTR_RO(size);

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
static ssize_t deep_flush_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	/*
	 * NOTE: in the nvdimm_has_flush() error case this attribute is
	 * not visible.
	 */
	return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
}

static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
		const char *buf, size_t len)
{
	bool flush;
	int rc = strtobool(buf, &flush);
	struct nd_region *nd_region = to_nd_region(dev);

	if (rc)
		return rc;
	if (!flush)
		return -EINVAL;
293 294 295
	rc = nvdimm_flush(nd_region, NULL);
	if (rc)
		return rc;
296 297 298 299 300

	return len;
}
static DEVICE_ATTR_RW(deep_flush);

301 302 303 304 305 306 307 308 309
static ssize_t mappings_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return sprintf(buf, "%d\n", nd_region->ndr_mappings);
}
static DEVICE_ATTR_RO(mappings);

310 311 312 313 314 315 316 317 318
static ssize_t nstype_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
}
static DEVICE_ATTR_RO(nstype);

319 320 321 322 323
static ssize_t set_cookie_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	struct nd_interleave_set *nd_set = nd_region->nd_set;
324
	ssize_t rc = 0;
325

326
	if (is_memory(dev) && nd_set)
327 328 329 330
		/* pass, should be precluded by region_visible */;
	else
		return -ENXIO;

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	/*
	 * The cookie to show depends on which specification of the
	 * labels we are using. If there are not labels then default to
	 * the v1.1 namespace label cookie definition. To read all this
	 * data we need to wait for probing to settle.
	 */
	device_lock(dev);
	nvdimm_bus_lock(dev);
	wait_nvdimm_bus_probe_idle(dev);
	if (nd_region->ndr_mappings) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);

		if (ndd) {
			struct nd_namespace_index *nsindex;

			nsindex = to_namespace_index(ndd, ndd->ns_current);
			rc = sprintf(buf, "%#llx\n",
					nd_region_interleave_set_cookie(nd_region,
						nsindex));
		}
	}
	nvdimm_bus_unlock(dev);
	device_unlock(dev);

	if (rc)
		return rc;
	return sprintf(buf, "%#llx\n", nd_set->cookie1);
359 360 361
}
static DEVICE_ATTR_RO(set_cookie);

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
{
	resource_size_t blk_max_overlap = 0, available, overlap;
	int i;

	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));

 retry:
	available = 0;
	overlap = blk_max_overlap;
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);

		/* if a dimm is disabled the available capacity is zero */
		if (!ndd)
			return 0;

380
		if (is_memory(&nd_region->dev)) {
381 382 383 384 385 386
			available += nd_pmem_available_dpa(nd_region,
					nd_mapping, &overlap);
			if (overlap > blk_max_overlap) {
				blk_max_overlap = overlap;
				goto retry;
			}
387 388
		} else if (is_nd_blk(&nd_region->dev))
			available += nd_blk_available_dpa(nd_region);
389 390 391 392 393
	}

	return available;
}

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
{
	resource_size_t available = 0;
	int i;

	if (is_memory(&nd_region->dev))
		available = PHYS_ADDR_MAX;

	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];

		if (is_memory(&nd_region->dev))
			available = min(available,
					nd_pmem_max_contiguous_dpa(nd_region,
								   nd_mapping));
		else if (is_nd_blk(&nd_region->dev))
			available += nd_blk_available_dpa(nd_region);
	}
	if (is_memory(&nd_region->dev))
		return available * nd_region->ndr_mappings;
	return available;
}

418 419 420 421 422 423 424 425 426 427 428 429
static ssize_t available_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	unsigned long long available = 0;

	/*
	 * Flush in-flight updates and grab a snapshot of the available
	 * size.  Of course, this value is potentially invalidated the
	 * memory nvdimm_bus_lock() is dropped, but that's userspace's
	 * problem to not race itself.
	 */
430
	device_lock(dev);
431 432 433 434
	nvdimm_bus_lock(dev);
	wait_nvdimm_bus_probe_idle(dev);
	available = nd_region_available_dpa(nd_region);
	nvdimm_bus_unlock(dev);
435
	device_unlock(dev);
436 437 438 439 440

	return sprintf(buf, "%llu\n", available);
}
static DEVICE_ATTR_RO(available_size);

441 442 443 444 445 446
static ssize_t max_available_extent_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	unsigned long long available = 0;

447
	device_lock(dev);
448 449 450 451
	nvdimm_bus_lock(dev);
	wait_nvdimm_bus_probe_idle(dev);
	available = nd_region_allocatable_dpa(nd_region);
	nvdimm_bus_unlock(dev);
452
	device_unlock(dev);
453 454 455 456 457

	return sprintf(buf, "%llu\n", available);
}
static DEVICE_ATTR_RO(max_available_extent);

458 459 460
static ssize_t init_namespaces_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
461
	struct nd_region_data *ndrd = dev_get_drvdata(dev);
462 463 464
	ssize_t rc;

	nvdimm_bus_lock(dev);
465 466
	if (ndrd)
		rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
467 468 469 470 471 472 473 474
	else
		rc = -ENXIO;
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(init_namespaces);

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
static ssize_t namespace_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->ns_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);
	return rc;
}
static DEVICE_ATTR_RO(namespace_seed);

491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
static ssize_t btt_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->btt_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(btt_seed);

508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
static ssize_t pfn_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->pfn_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(pfn_seed);

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
static ssize_t dax_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	ssize_t rc;

	nvdimm_bus_lock(dev);
	if (nd_region->dax_seed)
		rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
	else
		rc = sprintf(buf, "\n");
	nvdimm_bus_unlock(dev);

	return rc;
}
static DEVICE_ATTR_RO(dax_seed);

542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
static ssize_t read_only_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return sprintf(buf, "%d\n", nd_region->ro);
}

static ssize_t read_only_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	bool ro;
	int rc = strtobool(buf, &ro);
	struct nd_region *nd_region = to_nd_region(dev);

	if (rc)
		return rc;

	nd_region->ro = ro;
	return len;
}
static DEVICE_ATTR_RW(read_only);

565
static ssize_t region_badblocks_show(struct device *dev,
566 567 568
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
569
	ssize_t rc;
570

571 572 573 574 575 576
	device_lock(dev);
	if (dev->driver)
		rc = badblocks_show(&nd_region->bb, buf, 0);
	else
		rc = -ENXIO;
	device_unlock(dev);
577

578 579
	return rc;
}
580
static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
581

582 583 584 585 586 587 588 589 590
static ssize_t resource_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

	return sprintf(buf, "%#llx\n", nd_region->ndr_start);
}
static DEVICE_ATTR_RO(resource);

591 592 593 594 595
static ssize_t persistence_domain_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);

596 597 598 599 600 601
	if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
		return sprintf(buf, "cpu_cache\n");
	else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
		return sprintf(buf, "memory_controller\n");
	else
		return sprintf(buf, "\n");
602 603 604
}
static DEVICE_ATTR_RO(persistence_domain);

605 606
static struct attribute *nd_region_attributes[] = {
	&dev_attr_size.attr,
607
	&dev_attr_nstype.attr,
608
	&dev_attr_mappings.attr,
609
	&dev_attr_btt_seed.attr,
610
	&dev_attr_pfn_seed.attr,
611
	&dev_attr_dax_seed.attr,
612
	&dev_attr_deep_flush.attr,
613
	&dev_attr_read_only.attr,
614
	&dev_attr_set_cookie.attr,
615
	&dev_attr_available_size.attr,
616
	&dev_attr_max_available_extent.attr,
617
	&dev_attr_namespace_seed.attr,
618
	&dev_attr_init_namespaces.attr,
619
	&dev_attr_badblocks.attr,
620
	&dev_attr_resource.attr,
621
	&dev_attr_persistence_domain.attr,
622 623 624
	NULL,
};

625 626 627 628 629
static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, typeof(*dev), kobj);
	struct nd_region *nd_region = to_nd_region(dev);
	struct nd_interleave_set *nd_set = nd_region->nd_set;
630
	int type = nd_region_to_nstype(nd_region);
631

632
	if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
633 634
		return 0;

635
	if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
636 637
		return 0;

638
	if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
639 640
		return 0;

641
	if (a == &dev_attr_resource.attr) {
642
		if (is_memory(dev))
643 644 645 646
			return 0400;
		else
			return 0;
	}
647

648 649 650 651 652 653 654 655 656 657 658
	if (a == &dev_attr_deep_flush.attr) {
		int has_flush = nvdimm_has_flush(nd_region);

		if (has_flush == 1)
			return a->mode;
		else if (has_flush == 0)
			return 0444;
		else
			return 0;
	}

659 660 661 662 663 664 665
	if (a == &dev_attr_persistence_domain.attr) {
		if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
					| BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
			return 0;
		return a->mode;
	}

666 667
	if (a != &dev_attr_set_cookie.attr
			&& a != &dev_attr_available_size.attr)
668 669
		return a->mode;

670 671 672 673
	if ((type == ND_DEVICE_NAMESPACE_PMEM
				|| type == ND_DEVICE_NAMESPACE_BLK)
			&& a == &dev_attr_available_size.attr)
		return a->mode;
674
	else if (is_memory(dev) && nd_set)
675
		return a->mode;
676 677 678 679

	return 0;
}

680 681
struct attribute_group nd_region_attribute_group = {
	.attrs = nd_region_attributes,
682
	.is_visible = region_visible,
683 684 685
};
EXPORT_SYMBOL_GPL(nd_region_attribute_group);

686 687
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
		struct nd_namespace_index *nsindex)
688 689 690
{
	struct nd_interleave_set *nd_set = nd_region->nd_set;

691 692 693 694 695 696 697
	if (!nd_set)
		return 0;

	if (nsindex && __le16_to_cpu(nsindex->major) == 1
			&& __le16_to_cpu(nsindex->minor) == 1)
		return nd_set->cookie1;
	return nd_set->cookie2;
698 699
}

700 701 702 703 704 705 706 707 708
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
{
	struct nd_interleave_set *nd_set = nd_region->nd_set;

	if (nd_set)
		return nd_set->altcookie;
	return 0;
}

709 710 711 712
void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
{
	struct nd_label_ent *label_ent, *e;

713
	lockdep_assert_held(&nd_mapping->lock);
714 715 716 717 718 719
	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
		list_del(&label_ent->list);
		kfree(label_ent);
	}
}

720 721
/*
 * Upon successful probe/remove, take/release a reference on the
722
 * associated interleave set (if present), and plant new btt + namespace
723 724
 * seeds.  Also, on the removal of a BLK region, notify the provider to
 * disable the region.
725 726 727 728
 */
static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
		struct device *dev, bool probe)
{
729 730
	struct nd_region *nd_region;

731
	if (!probe && is_nd_region(dev)) {
732 733
		int i;

734
		nd_region = to_nd_region(dev);
735 736
		for (i = 0; i < nd_region->ndr_mappings; i++) {
			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
737
			struct nvdimm_drvdata *ndd = nd_mapping->ndd;
738 739
			struct nvdimm *nvdimm = nd_mapping->nvdimm;

740 741 742 743
			mutex_lock(&nd_mapping->lock);
			nd_mapping_free_labels(nd_mapping);
			mutex_unlock(&nd_mapping->lock);

744 745
			put_ndd(ndd);
			nd_mapping->ndd = NULL;
746 747
			if (ndd)
				atomic_dec(&nvdimm->busy);
748
		}
749
	}
750
	if (dev->parent && is_nd_region(dev->parent) && probe) {
751
		nd_region = to_nd_region(dev->parent);
752 753
		nvdimm_bus_lock(dev);
		if (nd_region->ns_seed == dev)
754
			nd_region_create_ns_seed(nd_region);
755
		nvdimm_bus_unlock(dev);
756
	}
757
	if (is_nd_btt(dev) && probe) {
758 759
		struct nd_btt *nd_btt = to_nd_btt(dev);

760 761 762 763
		nd_region = to_nd_region(dev->parent);
		nvdimm_bus_lock(dev);
		if (nd_region->btt_seed == dev)
			nd_region_create_btt_seed(nd_region);
764 765
		if (nd_region->ns_seed == &nd_btt->ndns->dev)
			nd_region_create_ns_seed(nd_region);
766 767
		nvdimm_bus_unlock(dev);
	}
768
	if (is_nd_pfn(dev) && probe) {
769 770
		struct nd_pfn *nd_pfn = to_nd_pfn(dev);

771 772 773 774
		nd_region = to_nd_region(dev->parent);
		nvdimm_bus_lock(dev);
		if (nd_region->pfn_seed == dev)
			nd_region_create_pfn_seed(nd_region);
775 776
		if (nd_region->ns_seed == &nd_pfn->ndns->dev)
			nd_region_create_ns_seed(nd_region);
777 778
		nvdimm_bus_unlock(dev);
	}
779
	if (is_nd_dax(dev) && probe) {
780 781
		struct nd_dax *nd_dax = to_nd_dax(dev);

782 783 784 785
		nd_region = to_nd_region(dev->parent);
		nvdimm_bus_lock(dev);
		if (nd_region->dax_seed == dev)
			nd_region_create_dax_seed(nd_region);
786 787
		if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
			nd_region_create_ns_seed(nd_region);
788 789
		nvdimm_bus_unlock(dev);
	}
790 791 792 793 794 795 796 797 798 799 800 801
}

void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
{
	nd_region_notify_driver_action(nvdimm_bus, dev, true);
}

void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
{
	nd_region_notify_driver_action(nvdimm_bus, dev, false);
}

802 803 804 805 806 807 808 809 810 811 812
static ssize_t mappingN(struct device *dev, char *buf, int n)
{
	struct nd_region *nd_region = to_nd_region(dev);
	struct nd_mapping *nd_mapping;
	struct nvdimm *nvdimm;

	if (n >= nd_region->ndr_mappings)
		return -ENXIO;
	nd_mapping = &nd_region->mapping[n];
	nvdimm = nd_mapping->nvdimm;

813 814 815
	return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
			nd_mapping->start, nd_mapping->size,
			nd_mapping->position);
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
}

#define REGION_MAPPING(idx) \
static ssize_t mapping##idx##_show(struct device *dev,		\
		struct device_attribute *attr, char *buf)	\
{								\
	return mappingN(dev, buf, idx);				\
}								\
static DEVICE_ATTR_RO(mapping##idx)

/*
 * 32 should be enough for a while, even in the presence of socket
 * interleave a 32-way interleave set is a degenerate case.
 */
REGION_MAPPING(0);
REGION_MAPPING(1);
REGION_MAPPING(2);
REGION_MAPPING(3);
REGION_MAPPING(4);
REGION_MAPPING(5);
REGION_MAPPING(6);
REGION_MAPPING(7);
REGION_MAPPING(8);
REGION_MAPPING(9);
REGION_MAPPING(10);
REGION_MAPPING(11);
REGION_MAPPING(12);
REGION_MAPPING(13);
REGION_MAPPING(14);
REGION_MAPPING(15);
REGION_MAPPING(16);
REGION_MAPPING(17);
REGION_MAPPING(18);
REGION_MAPPING(19);
REGION_MAPPING(20);
REGION_MAPPING(21);
REGION_MAPPING(22);
REGION_MAPPING(23);
REGION_MAPPING(24);
REGION_MAPPING(25);
REGION_MAPPING(26);
REGION_MAPPING(27);
REGION_MAPPING(28);
REGION_MAPPING(29);
REGION_MAPPING(30);
REGION_MAPPING(31);

static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
	struct nd_region *nd_region = to_nd_region(dev);

	if (n < nd_region->ndr_mappings)
		return a->mode;
	return 0;
}

static struct attribute *mapping_attributes[] = {
	&dev_attr_mapping0.attr,
	&dev_attr_mapping1.attr,
	&dev_attr_mapping2.attr,
	&dev_attr_mapping3.attr,
	&dev_attr_mapping4.attr,
	&dev_attr_mapping5.attr,
	&dev_attr_mapping6.attr,
	&dev_attr_mapping7.attr,
	&dev_attr_mapping8.attr,
	&dev_attr_mapping9.attr,
	&dev_attr_mapping10.attr,
	&dev_attr_mapping11.attr,
	&dev_attr_mapping12.attr,
	&dev_attr_mapping13.attr,
	&dev_attr_mapping14.attr,
	&dev_attr_mapping15.attr,
	&dev_attr_mapping16.attr,
	&dev_attr_mapping17.attr,
	&dev_attr_mapping18.attr,
	&dev_attr_mapping19.attr,
	&dev_attr_mapping20.attr,
	&dev_attr_mapping21.attr,
	&dev_attr_mapping22.attr,
	&dev_attr_mapping23.attr,
	&dev_attr_mapping24.attr,
	&dev_attr_mapping25.attr,
	&dev_attr_mapping26.attr,
	&dev_attr_mapping27.attr,
	&dev_attr_mapping28.attr,
	&dev_attr_mapping29.attr,
	&dev_attr_mapping30.attr,
	&dev_attr_mapping31.attr,
	NULL,
};

struct attribute_group nd_mapping_attribute_group = {
	.is_visible = mapping_visible,
	.attrs = mapping_attributes,
};
EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);

915
int nd_blk_region_init(struct nd_region *nd_region)
916
{
917 918 919 920 921 922 923
	struct device *dev = &nd_region->dev;
	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);

	if (!is_nd_blk(dev))
		return 0;

	if (nd_region->ndr_mappings < 1) {
924
		dev_dbg(dev, "invalid BLK region\n");
925 926 927 928
		return -ENXIO;
	}

	return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
929 930
}

V
Vishal Verma 已提交
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983
/**
 * nd_region_acquire_lane - allocate and lock a lane
 * @nd_region: region id and number of lanes possible
 *
 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
 * We optimize for the common case where there are 256 lanes, one
 * per-cpu.  For larger systems we need to lock to share lanes.  For now
 * this implementation assumes the cost of maintaining an allocator for
 * free lanes is on the order of the lock hold time, so it implements a
 * static lane = cpu % num_lanes mapping.
 *
 * In the case of a BTT instance on top of a BLK namespace a lane may be
 * acquired recursively.  We lock on the first instance.
 *
 * In the case of a BTT instance on top of PMEM, we only acquire a lane
 * for the BTT metadata updates.
 */
unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
{
	unsigned int cpu, lane;

	cpu = get_cpu();
	if (nd_region->num_lanes < nr_cpu_ids) {
		struct nd_percpu_lane *ndl_lock, *ndl_count;

		lane = cpu % nd_region->num_lanes;
		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
		if (ndl_count->count++ == 0)
			spin_lock(&ndl_lock->lock);
	} else
		lane = cpu;

	return lane;
}
EXPORT_SYMBOL(nd_region_acquire_lane);

void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
{
	if (nd_region->num_lanes < nr_cpu_ids) {
		unsigned int cpu = get_cpu();
		struct nd_percpu_lane *ndl_lock, *ndl_count;

		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
		if (--ndl_count->count == 0)
			spin_unlock(&ndl_lock->lock);
		put_cpu();
	}
	put_cpu();
}
EXPORT_SYMBOL(nd_region_release_lane);

984 985 986 987 988 989
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc, struct device_type *dev_type,
		const char *caller)
{
	struct nd_region *nd_region;
	struct device *dev;
990
	void *region_buf;
V
Vishal Verma 已提交
991
	unsigned int i;
992
	int ro = 0;
993 994

	for (i = 0; i < ndr_desc->num_mappings; i++) {
995 996
		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
		struct nvdimm *nvdimm = mapping->nvdimm;
997

998
		if ((mapping->start | mapping->size) % SZ_4K) {
999 1000 1001 1002 1003
			dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
					caller, dev_name(&nvdimm->dev), i);

			return NULL;
		}
1004

1005
		if (test_bit(NDD_UNARMED, &nvdimm->flags))
1006
			ro = 1;
1007 1008
	}

1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
	if (dev_type == &nd_blk_device_type) {
		struct nd_blk_region_desc *ndbr_desc;
		struct nd_blk_region *ndbr;

		ndbr_desc = to_blk_region_desc(ndr_desc);
		ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
				* ndr_desc->num_mappings,
				GFP_KERNEL);
		if (ndbr) {
			nd_region = &ndbr->nd_region;
			ndbr->enable = ndbr_desc->enable;
			ndbr->do_io = ndbr_desc->do_io;
		}
		region_buf = ndbr;
	} else {
		nd_region = kzalloc(sizeof(struct nd_region)
				+ sizeof(struct nd_mapping)
				* ndr_desc->num_mappings,
				GFP_KERNEL);
		region_buf = nd_region;
	}

	if (!region_buf)
1032 1033
		return NULL;
	nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
V
Vishal Verma 已提交
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
	if (nd_region->id < 0)
		goto err_id;

	nd_region->lane = alloc_percpu(struct nd_percpu_lane);
	if (!nd_region->lane)
		goto err_percpu;

        for (i = 0; i < nr_cpu_ids; i++) {
		struct nd_percpu_lane *ndl;

		ndl = per_cpu_ptr(nd_region->lane, i);
		spin_lock_init(&ndl->lock);
		ndl->count = 0;
1047 1048 1049
	}

	for (i = 0; i < ndr_desc->num_mappings; i++) {
1050 1051 1052 1053 1054 1055
		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
		struct nvdimm *nvdimm = mapping->nvdimm;

		nd_region->mapping[i].nvdimm = nvdimm;
		nd_region->mapping[i].start = mapping->start;
		nd_region->mapping[i].size = mapping->size;
1056
		nd_region->mapping[i].position = mapping->position;
1057 1058
		INIT_LIST_HEAD(&nd_region->mapping[i].labels);
		mutex_init(&nd_region->mapping[i].lock);
1059 1060 1061 1062 1063

		get_device(&nvdimm->dev);
	}
	nd_region->ndr_mappings = ndr_desc->num_mappings;
	nd_region->provider_data = ndr_desc->provider_data;
1064
	nd_region->nd_set = ndr_desc->nd_set;
V
Vishal Verma 已提交
1065
	nd_region->num_lanes = ndr_desc->num_lanes;
1066
	nd_region->flags = ndr_desc->flags;
1067
	nd_region->ro = ro;
1068
	nd_region->numa_node = ndr_desc->numa_node;
1069
	nd_region->target_node = ndr_desc->target_node;
1070
	ida_init(&nd_region->ns_ida);
1071
	ida_init(&nd_region->btt_ida);
1072
	ida_init(&nd_region->pfn_ida);
1073
	ida_init(&nd_region->dax_ida);
1074 1075 1076 1077 1078
	dev = &nd_region->dev;
	dev_set_name(dev, "region%d", nd_region->id);
	dev->parent = &nvdimm_bus->dev;
	dev->type = dev_type;
	dev->groups = ndr_desc->attr_groups;
1079
	dev->of_node = ndr_desc->of_node;
1080 1081
	nd_region->ndr_size = resource_size(ndr_desc->res);
	nd_region->ndr_start = ndr_desc->res->start;
1082 1083 1084 1085 1086
	if (ndr_desc->flush)
		nd_region->flush = ndr_desc->flush;
	else
		nd_region->flush = NULL;

1087 1088 1089
	nd_device_register(dev);

	return nd_region;
V
Vishal Verma 已提交
1090 1091 1092 1093

 err_percpu:
	ida_simple_remove(&region_ida, nd_region->id);
 err_id:
1094
	kfree(region_buf);
V
Vishal Verma 已提交
1095
	return NULL;
1096 1097 1098 1099 1100
}

struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc)
{
V
Vishal Verma 已提交
1101
	ndr_desc->num_lanes = ND_MAX_LANES;
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
	return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
			__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);

struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc)
{
	if (ndr_desc->num_mappings > 1)
		return NULL;
V
Vishal Verma 已提交
1112
	ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
1113 1114 1115 1116 1117 1118 1119 1120
	return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
			__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);

struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc)
{
V
Vishal Verma 已提交
1121
	ndr_desc->num_lanes = ND_MAX_LANES;
1122 1123 1124 1125
	return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
			__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
D
Dan Williams 已提交
1126

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
{
	int rc = 0;

	if (!nd_region->flush)
		rc = generic_nvdimm_flush(nd_region);
	else {
		if (nd_region->flush(nd_region, bio))
			rc = -EIO;
	}

	return rc;
}
1140 1141 1142 1143
/**
 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
 * @nd_region: blk or interleaved pmem region
 */
1144
int generic_nvdimm_flush(struct nd_region *nd_region)
1145 1146
{
	struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
D
Dan Williams 已提交
1147 1148 1149 1150 1151 1152 1153 1154
	int i, idx;

	/*
	 * Try to encourage some diversity in flush hint addresses
	 * across cpus assuming a limited number of flush hints.
	 */
	idx = this_cpu_read(flush_idx);
	idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
1155 1156 1157 1158 1159

	/*
	 * The first wmb() is needed to 'sfence' all previous writes
	 * such that they are architecturally visible for the platform
	 * buffer flush.  Note that we've already arranged for pmem
1160 1161
	 * writes to avoid the cache via memcpy_flushcache().  The final
	 * wmb() ensures ordering for the NVDIMM flush write.
1162 1163 1164
	 */
	wmb();
	for (i = 0; i < nd_region->ndr_mappings; i++)
1165 1166
		if (ndrd_get_flush_wpq(ndrd, i, 0))
			writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
1167
	wmb();
1168 1169

	return 0;
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
}
EXPORT_SYMBOL_GPL(nvdimm_flush);

/**
 * nvdimm_has_flush - determine write flushing requirements
 * @nd_region: blk or interleaved pmem region
 *
 * Returns 1 if writes require flushing
 * Returns 0 if writes do not require flushing
 * Returns -ENXIO if flushing capability can not be determined
 */
int nvdimm_has_flush(struct nd_region *nd_region)
{
	int i;

1185 1186 1187
	/* no nvdimm or pmem api == flushing capability unknown */
	if (nd_region->ndr_mappings == 0
			|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
1188 1189
		return -ENXIO;

1190 1191 1192 1193 1194 1195
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;

		/* flush hints present / available */
		if (nvdimm->num_flush)
1196
			return 1;
1197
	}
1198 1199 1200 1201 1202 1203 1204 1205 1206

	/*
	 * The platform defines dimm devices without hints, assume
	 * platform persistence mechanism like ADR
	 */
	return 0;
}
EXPORT_SYMBOL_GPL(nvdimm_has_flush);

1207 1208
int nvdimm_has_cache(struct nd_region *nd_region)
{
1209 1210
	return is_nd_pmem(&nd_region->dev) &&
		!test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
1211 1212 1213
}
EXPORT_SYMBOL_GPL(nvdimm_has_cache);

1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
struct conflict_context {
	struct nd_region *nd_region;
	resource_size_t start, size;
};

static int region_conflict(struct device *dev, void *data)
{
	struct nd_region *nd_region;
	struct conflict_context *ctx = data;
	resource_size_t res_end, region_end, region_start;

	if (!is_memory(dev))
		return 0;

	nd_region = to_nd_region(dev);
	if (nd_region == ctx->nd_region)
		return 0;

	res_end = ctx->start + ctx->size;
	region_start = nd_region->ndr_start;
	region_end = region_start + nd_region->ndr_size;
	if (ctx->start >= region_start && ctx->start < region_end)
		return -EBUSY;
	if (res_end > region_start && res_end <= region_end)
		return -EBUSY;
	return 0;
}

int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
		resource_size_t size)
{
	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
	struct conflict_context ctx = {
		.nd_region = nd_region,
		.start = start,
		.size = size,
	};

	return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
}

D
Dan Williams 已提交
1255 1256 1257 1258
void __exit nd_region_devs_exit(void)
{
	ida_destroy(&region_ida);
}