pmem.c 17.8 KB
Newer Older
1 2 3
/*
 * Persistent Memory Driver
 *
4
 * Copyright (c) 2014-2015, Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <asm/cacheflush.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
25
#include <linux/badblocks.h>
D
Dan Williams 已提交
26
#include <linux/memremap.h>
27
#include <linux/vmalloc.h>
D
Dan Williams 已提交
28
#include <linux/pfn_t.h>
29
#include <linux/slab.h>
30
#include <linux/pmem.h>
31
#include <linux/nd.h>
32
#include "pfn.h"
33
#include "nd.h"
34 35 36 37 38 39 40

struct pmem_device {
	struct request_queue	*pmem_queue;
	struct gendisk		*pmem_disk;

	/* One contiguous memory region per device */
	phys_addr_t		phys_addr;
41 42
	/* when non-zero this device is hosting a 'pfn' instance */
	phys_addr_t		data_offset;
A
Arnd Bergmann 已提交
43
	u64			pfn_flags;
44
	void __pmem		*virt_addr;
45
	/* immutable base size of the namespace */
46
	size_t			size;
47 48
	/* trim size when namespace capacity has been section aligned */
	u32			pfn_pad;
49
	struct badblocks	bb;
50 51
};

52 53 54 55 56 57 58 59 60 61 62 63 64
static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
{
	if (bb->count) {
		sector_t first_bad;
		int num_bad;

		return !!badblocks_check(bb, sector, len / 512, &first_bad,
				&num_bad);
	}

	return false;
}

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
		unsigned int len)
{
	struct device *dev = disk_to_dev(pmem->pmem_disk);
	sector_t sector;
	long cleared;

	sector = (offset - pmem->data_offset) / 512;
	cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);

	if (cleared > 0 && cleared / 512) {
		dev_dbg(dev, "%s: %llx clear %ld sector%s\n",
				__func__, (unsigned long long) sector,
				cleared / 512, cleared / 512 > 1 ? "s" : "");
		badblocks_clear(&pmem->bb, sector, cleared / 512);
	}
	invalidate_pmem(pmem->virt_addr + offset, len);
}

84
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
85 86 87
			unsigned int len, unsigned int off, int rw,
			sector_t sector)
{
88
	int rc = 0;
89
	bool bad_pmem = false;
90
	void *mem = kmap_atomic(page);
91
	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
92
	void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
93

94 95 96
	if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
		bad_pmem = true;

97
	if (rw == READ) {
98
		if (unlikely(bad_pmem))
99 100
			rc = -EIO;
		else {
101
			rc = memcpy_from_pmem(mem + off, pmem_addr, len);
102 103
			flush_dcache_page(page);
		}
104
	} else {
105 106 107 108 109 110 111 112 113 114 115 116 117 118
		/*
		 * Note that we write the data both before and after
		 * clearing poison.  The write before clear poison
		 * handles situations where the latest written data is
		 * preserved and the clear poison operation simply marks
		 * the address range as valid without changing the data.
		 * In this case application software can assume that an
		 * interrupted write will either return the new good
		 * data or an error.
		 *
		 * However, if pmem_clear_poison() leaves the data in an
		 * indeterminate state we need to perform the write
		 * after clear poison.
		 */
119
		flush_dcache_page(page);
120
		memcpy_to_pmem(pmem_addr, mem + off, len);
121 122 123 124
		if (unlikely(bad_pmem)) {
			pmem_clear_poison(pmem, pmem_off, len);
			memcpy_to_pmem(pmem_addr, mem + off, len);
		}
125 126 127
	}

	kunmap_atomic(mem);
128
	return rc;
129 130
}

131
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
132
{
133
	int rc = 0;
D
Dan Williams 已提交
134 135
	bool do_acct;
	unsigned long start;
136 137
	struct bio_vec bvec;
	struct bvec_iter iter;
D
Dan Williams 已提交
138 139
	struct block_device *bdev = bio->bi_bdev;
	struct pmem_device *pmem = bdev->bd_disk->private_data;
140

D
Dan Williams 已提交
141
	do_acct = nd_iostat_start(bio, &start);
142 143 144 145 146 147 148 149 150
	bio_for_each_segment(bvec, bio, iter) {
		rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
				bvec.bv_offset, bio_data_dir(bio),
				iter.bi_sector);
		if (rc) {
			bio->bi_error = rc;
			break;
		}
	}
D
Dan Williams 已提交
151 152
	if (do_acct)
		nd_iostat_end(bio, start);
153 154 155 156

	if (bio_data_dir(bio))
		wmb_pmem();

157
	bio_endio(bio);
158
	return BLK_QC_T_NONE;
159 160 161 162 163 164
}

static int pmem_rw_page(struct block_device *bdev, sector_t sector,
		       struct page *page, int rw)
{
	struct pmem_device *pmem = bdev->bd_disk->private_data;
165
	int rc;
166

167
	rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
168 169
	if (rw & WRITE)
		wmb_pmem();
170

171 172 173 174 175 176 177 178 179 180
	/*
	 * The ->rw_page interface is subtle and tricky.  The core
	 * retries on any error, so we can only invoke page_endio() in
	 * the successful completion case.  Otherwise, we'll see crashes
	 * caused by double completion.
	 */
	if (rc == 0)
		page_endio(page, rw & WRITE, 0);

	return rc;
181 182 183
}

static long pmem_direct_access(struct block_device *bdev, sector_t sector,
D
Dan Williams 已提交
184
		      void __pmem **kaddr, pfn_t *pfn)
185 186
{
	struct pmem_device *pmem = bdev->bd_disk->private_data;
187
	resource_size_t offset = sector * 512 + pmem->data_offset;
188

189
	*kaddr = pmem->virt_addr + offset;
D
Dan Williams 已提交
190
	*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
191

192
	return pmem->size - pmem->pfn_pad - offset;
193 194 195 196 197 198
}

static const struct block_device_operations pmem_fops = {
	.owner =		THIS_MODULE,
	.rw_page =		pmem_rw_page,
	.direct_access =	pmem_direct_access,
199
	.revalidate_disk =	nvdimm_revalidate_disk,
200 201
};

202 203
static struct pmem_device *pmem_alloc(struct device *dev,
		struct resource *res, int id)
204 205
{
	struct pmem_device *pmem;
206
	struct request_queue *q;
207

208
	pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
209
	if (!pmem)
210
		return ERR_PTR(-ENOMEM);
211 212 213

	pmem->phys_addr = res->start;
	pmem->size = resource_size(res);
214
	if (!arch_has_wmb_pmem())
215
		dev_warn(dev, "unable to guarantee persistence of writes\n");
216

217 218
	if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
			dev_name(dev))) {
219 220
		dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
				&pmem->phys_addr, pmem->size);
221
		return ERR_PTR(-EBUSY);
222 223
	}

224 225 226 227
	q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
	if (!q)
		return ERR_PTR(-ENOMEM);

D
Dan Williams 已提交
228 229
	pmem->pfn_flags = PFN_DEV;
	if (pmem_should_map_pages(dev)) {
230
		pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
231
				&q->q_usage_counter, NULL);
D
Dan Williams 已提交
232 233
		pmem->pfn_flags |= PFN_MAP;
	} else
D
Dan Williams 已提交
234 235 236
		pmem->virt_addr = (void __pmem *) devm_memremap(dev,
				pmem->phys_addr, pmem->size,
				ARCH_MEMREMAP_PMEM);
237

238 239
	if (IS_ERR(pmem->virt_addr)) {
		blk_cleanup_queue(q);
240
		return (void __force *) pmem->virt_addr;
241
	}
242

243
	pmem->pmem_queue = q;
244 245 246 247 248
	return pmem;
}

static void pmem_detach_disk(struct pmem_device *pmem)
{
249 250 251
	if (!pmem->pmem_disk)
		return;

252 253 254 255 256
	del_gendisk(pmem->pmem_disk);
	put_disk(pmem->pmem_disk);
	blk_cleanup_queue(pmem->pmem_queue);
}

257 258
static int pmem_attach_disk(struct device *dev,
		struct nd_namespace_common *ndns, struct pmem_device *pmem)
259
{
260
	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
261
	int nid = dev_to_node(dev);
262
	struct resource bb_res;
263
	struct gendisk *disk;
264 265

	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
266
	blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
267
	blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
268
	blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
269
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
270

271
	disk = alloc_disk_node(0, nid);
272 273 274 275
	if (!disk) {
		blk_cleanup_queue(pmem->pmem_queue);
		return -ENOMEM;
	}
276 277 278 279 280

	disk->fops		= &pmem_fops;
	disk->private_data	= pmem;
	disk->queue		= pmem->pmem_queue;
	disk->flags		= GENHD_FL_EXT_DEVT;
V
Vishal Verma 已提交
281
	nvdimm_namespace_disk_name(ndns, disk->disk_name);
282
	disk->driverfs_dev = dev;
283 284
	set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
			/ 512);
285
	pmem->pmem_disk = disk;
286
	devm_exit_badblocks(dev, &pmem->bb);
287 288
	if (devm_init_badblocks(dev, &pmem->bb))
		return -ENOMEM;
289 290 291 292 293 294 295 296 297 298 299
	bb_res.start = nsio->res.start + pmem->data_offset;
	bb_res.end = nsio->res.end;
	if (is_nd_pfn(dev)) {
		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;

		bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
		bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
	}
	nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
			&bb_res);
300
	disk->bb = &pmem->bb;
301
	add_disk(disk);
302
	revalidate_disk(disk);
303

304 305
	return 0;
}
306

307 308 309 310 311 312 313 314 315 316
static int pmem_rw_bytes(struct nd_namespace_common *ndns,
		resource_size_t offset, void *buf, size_t size, int rw)
{
	struct pmem_device *pmem = dev_get_drvdata(ndns->claim);

	if (unlikely(offset + size > pmem->size)) {
		dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
		return -EFAULT;
	}

317 318 319 320 321
	if (rw == READ) {
		unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);

		if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
			return -EIO;
322
		return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
323
	} else {
324 325 326
		memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
		wmb_pmem();
	}
327 328 329 330

	return 0;
}

331 332 333 334
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
	struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
	struct nd_namespace_common *ndns = nd_pfn->ndns;
335 336 337
	u32 start_pad = 0, end_trunc = 0;
	resource_size_t start, size;
	struct nd_namespace_io *nsio;
338
	struct nd_region *nd_region;
339
	struct nd_pfn_sb *pfn_sb;
340 341 342 343 344
	unsigned long npfns;
	phys_addr_t offset;
	u64 checksum;
	int rc;

345
	pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
346 347 348 349 350
	if (!pfn_sb)
		return -ENOMEM;

	nd_pfn->pfn_sb = pfn_sb;
	rc = nd_pfn_validate(nd_pfn);
351 352 353
	if (rc == -ENODEV)
		/* no info block, do init */;
	else
354 355 356 357 358 359 360
		return rc;

	nd_region = to_nd_region(nd_pfn->dev.parent);
	if (nd_region->ro) {
		dev_info(&nd_pfn->dev,
				"%s is read-only, unable to init metadata\n",
				dev_name(&nd_region->dev));
361
		return -ENXIO;
362 363 364
	}

	memset(pfn_sb, 0, sizeof(*pfn_sb));
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391

	/*
	 * Check if pmem collides with 'System RAM' when section aligned and
	 * trim it accordingly
	 */
	nsio = to_nd_namespace_io(&ndns->dev);
	start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
	size = resource_size(&nsio->res);
	if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
				IORES_DESC_NONE) == REGION_MIXED) {

		start = nsio->res.start;
		start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
	}

	start = nsio->res.start;
	size = PHYS_SECTION_ALIGN_UP(start + size) - start;
	if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
				IORES_DESC_NONE) == REGION_MIXED) {
		size = resource_size(&nsio->res);
		end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
	}

	if (start_pad + end_trunc)
		dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
				dev_name(&ndns->dev), start_pad + end_trunc);

392 393 394 395 396 397
	/*
	 * Note, we use 64 here for the standard size of struct page,
	 * debugging options may cause it to be larger in which case the
	 * implementation will limit the pfns advertised through
	 * ->direct_access() to those that are included in the memmap.
	 */
398 399
	start += start_pad;
	npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
400
	if (nd_pfn->mode == PFN_MODE_PMEM)
401 402
		offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
			- start;
403
	else if (nd_pfn->mode == PFN_MODE_RAM)
404
		offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
405
	else
406
		return -ENXIO;
407

408 409 410
	if (offset + start_pad + end_trunc >= pmem->size) {
		dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
				dev_name(&ndns->dev));
411
		return -ENXIO;
412 413 414
	}

	npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K;
415 416 417 418 419
	pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
	pfn_sb->dataoff = cpu_to_le64(offset);
	pfn_sb->npfns = cpu_to_le64(npfns);
	memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
	memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
420
	memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
421
	pfn_sb->version_major = cpu_to_le16(1);
422 423 424
	pfn_sb->version_minor = cpu_to_le16(1);
	pfn_sb->start_pad = cpu_to_le32(start_pad);
	pfn_sb->end_trunc = cpu_to_le32(end_trunc);
425 426 427
	checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
	pfn_sb->checksum = cpu_to_le64(checksum);

428
	return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
429 430
}

431
static void nvdimm_namespace_detach_pfn(struct nd_pfn *nd_pfn)
432 433 434 435 436 437 438 439
{
	struct pmem_device *pmem;

	/* free pmem disk */
	pmem = dev_get_drvdata(&nd_pfn->dev);
	pmem_detach_disk(pmem);
}

440 441 442 443 444 445
/*
 * We hotplug memory at section granularity, pad the reserved area from
 * the previous section base to the namespace base address.
 */
static unsigned long init_altmap_base(resource_size_t base)
{
446
	unsigned long base_pfn = PHYS_PFN(base);
447 448 449 450 451 452

	return PFN_SECTION_ALIGN_DOWN(base_pfn);
}

static unsigned long init_altmap_reserve(resource_size_t base)
{
453 454
	unsigned long reserve = PHYS_PFN(SZ_8K);
	unsigned long base_pfn = PHYS_PFN(base);
455 456 457 458 459

	reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
	return reserve;
}

460
static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
461
{
462
	int rc;
463 464 465 466 467 468 469 470 471 472 473
	struct resource res;
	struct request_queue *q;
	struct pmem_device *pmem;
	struct vmem_altmap *altmap;
	struct device *dev = &nd_pfn->dev;
	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
	struct nd_namespace_common *ndns = nd_pfn->ndns;
	u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
	u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
	resource_size_t base = nsio->res.start + start_pad;
474
	struct vmem_altmap __altmap = {
475 476
		.base_pfn = init_altmap_base(base),
		.reserve = init_altmap_reserve(base),
477
	};
478

479 480 481
	pmem = dev_get_drvdata(dev);
	pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
	pmem->pfn_pad = start_pad + end_trunc;
482 483
	nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
	if (nd_pfn->mode == PFN_MODE_RAM) {
484
		if (pmem->data_offset < SZ_8K)
485 486 487
			return -EINVAL;
		nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
		altmap = NULL;
488
	} else if (nd_pfn->mode == PFN_MODE_PMEM) {
489
		nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset)
490 491 492 493 494 495 496
			/ PAGE_SIZE;
		if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
			dev_info(&nd_pfn->dev,
					"number of pfns truncated from %lld to %ld\n",
					le64_to_cpu(nd_pfn->pfn_sb->npfns),
					nd_pfn->npfns);
		altmap = & __altmap;
497
		altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
498
		altmap->alloc = 0;
499 500 501 502 503 504
	} else {
		rc = -ENXIO;
		goto err;
	}

	/* establish pfn range for lookup, and switch to direct map */
505
	q = pmem->pmem_queue;
506 507 508
	memcpy(&res, &nsio->res, sizeof(res));
	res.start += start_pad;
	res.end -= end_trunc;
D
Dan Williams 已提交
509
	devm_memunmap(dev, (void __force *) pmem->virt_addr);
510
	pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
511
			&q->q_usage_counter, altmap);
D
Dan Williams 已提交
512
	pmem->pfn_flags |= PFN_MAP;
513 514 515 516 517 518 519 520 521 522 523 524
	if (IS_ERR(pmem->virt_addr)) {
		rc = PTR_ERR(pmem->virt_addr);
		goto err;
	}

	/* attach pmem disk in "pfn-mode" */
	rc = pmem_attach_disk(dev, ndns, pmem);
	if (rc)
		goto err;

	return rc;
 err:
D
Dan Williams 已提交
525
	nvdimm_namespace_detach_pfn(nd_pfn);
526
	return rc;
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542

}

static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
{
	struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
	int rc;

	if (!nd_pfn->uuid || !nd_pfn->ndns)
		return -ENODEV;

	rc = nd_pfn_init(nd_pfn);
	if (rc)
		return rc;
	/* we need a valid pfn_sb before we can init a vmem_altmap */
	return __nvdimm_namespace_attach_pfn(nd_pfn);
543 544
}

545
static int nd_pmem_probe(struct device *dev)
546
{
547
	struct nd_region *nd_region = to_nd_region(dev->parent);
548 549
	struct nd_namespace_common *ndns;
	struct nd_namespace_io *nsio;
550 551
	struct pmem_device *pmem;

552 553 554
	ndns = nvdimm_namespace_common_probe(dev);
	if (IS_ERR(ndns))
		return PTR_ERR(ndns);
555

556
	nsio = to_nd_namespace_io(&ndns->dev);
557
	pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
558 559 560
	if (IS_ERR(pmem))
		return PTR_ERR(pmem);

561
	dev_set_drvdata(dev, pmem);
562
	ndns->rw_bytes = pmem_rw_bytes;
563 564
	if (devm_init_badblocks(dev, &pmem->bb))
		return -ENOMEM;
565
	nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
566

567 568 569 570
	if (is_nd_btt(dev)) {
		/* btt allocates its own request_queue */
		blk_cleanup_queue(pmem->pmem_queue);
		pmem->pmem_queue = NULL;
571
		return nvdimm_namespace_attach_btt(ndns);
572
	}
573

574 575 576
	if (is_nd_pfn(dev))
		return nvdimm_namespace_attach_pfn(ndns);

577
	if (nd_btt_probe(dev, ndns, pmem) == 0
578
			|| nd_pfn_probe(dev, ndns, pmem) == 0) {
579 580 581 582 583
		/*
		 * We'll come back as either btt-pmem, or pfn-pmem, so
		 * drop the queue allocation for now.
		 */
		blk_cleanup_queue(pmem->pmem_queue);
584 585 586 587
		return -ENXIO;
	}

	return pmem_attach_disk(dev, ndns, pmem);
588 589
}

590
static int nd_pmem_remove(struct device *dev)
591
{
592
	struct pmem_device *pmem = dev_get_drvdata(dev);
593

594
	if (is_nd_btt(dev))
D
Dan Williams 已提交
595
		nvdimm_namespace_detach_btt(to_nd_btt(dev));
596
	else if (is_nd_pfn(dev))
D
Dan Williams 已提交
597
		nvdimm_namespace_detach_pfn(to_nd_pfn(dev));
598 599 600
	else
		pmem_detach_disk(pmem);

601 602 603
	return 0;
}

604 605
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
{
606
	struct nd_region *nd_region = to_nd_region(dev->parent);
D
Dan Williams 已提交
607 608 609 610 611
	struct pmem_device *pmem = dev_get_drvdata(dev);
	resource_size_t offset = 0, end_trunc = 0;
	struct nd_namespace_common *ndns;
	struct nd_namespace_io *nsio;
	struct resource res;
612 613 614 615

	if (event != NVDIMM_REVALIDATE_POISON)
		return;

D
Dan Williams 已提交
616 617 618 619 620
	if (is_nd_btt(dev)) {
		struct nd_btt *nd_btt = to_nd_btt(dev);

		ndns = nd_btt->ndns;
	} else if (is_nd_pfn(dev)) {
621 622 623
		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;

D
Dan Williams 已提交
624 625 626 627 628
		ndns = nd_pfn->ndns;
		offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
		end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
	} else
		ndns = to_ndns(dev);
629

D
Dan Williams 已提交
630 631 632
	nsio = to_nd_namespace_io(&ndns->dev);
	res.start = nsio->res.start + offset;
	res.end = nsio->res.end - end_trunc;
633
	nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
634 635
}

636 637
MODULE_ALIAS("pmem");
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
638
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
639 640 641
static struct nd_device_driver nd_pmem_driver = {
	.probe = nd_pmem_probe,
	.remove = nd_pmem_remove,
642
	.notify = nd_pmem_notify,
643 644
	.drv = {
		.name = "nd_pmem",
645
	},
646
	.type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
647 648 649 650
};

static int __init pmem_init(void)
{
651
	return nd_driver_register(&nd_pmem_driver);
652 653 654 655 656
}
module_init(pmem_init);

static void pmem_exit(void)
{
657
	driver_unregister(&nd_pmem_driver.drv);
658 659 660 661 662
}
module_exit(pmem_exit);

MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
MODULE_LICENSE("GPL v2");