pmem.c 10.3 KB
Newer Older
1 2 3
/*
 * Persistent Memory Driver
 *
4
 * Copyright (c) 2014-2015, Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <asm/cacheflush.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
25
#include <linux/badblocks.h>
D
Dan Williams 已提交
26
#include <linux/memremap.h>
27
#include <linux/vmalloc.h>
D
Dan Williams 已提交
28
#include <linux/pfn_t.h>
29
#include <linux/slab.h>
30
#include <linux/pmem.h>
31
#include <linux/nd.h>
32
#include "pmem.h"
33
#include "pfn.h"
34
#include "nd.h"
35

36 37 38
static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
		unsigned int len)
{
39
	struct device *dev = pmem->bb.dev;
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
	sector_t sector;
	long cleared;

	sector = (offset - pmem->data_offset) / 512;
	cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);

	if (cleared > 0 && cleared / 512) {
		dev_dbg(dev, "%s: %llx clear %ld sector%s\n",
				__func__, (unsigned long long) sector,
				cleared / 512, cleared / 512 > 1 ? "s" : "");
		badblocks_clear(&pmem->bb, sector, cleared / 512);
	}
	invalidate_pmem(pmem->virt_addr + offset, len);
}

55
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
56 57 58
			unsigned int len, unsigned int off, int rw,
			sector_t sector)
{
59
	int rc = 0;
60
	bool bad_pmem = false;
61
	void *mem = kmap_atomic(page);
62
	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
63
	void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
64

65 66 67
	if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
		bad_pmem = true;

68
	if (rw == READ) {
69
		if (unlikely(bad_pmem))
70 71
			rc = -EIO;
		else {
72
			rc = memcpy_from_pmem(mem + off, pmem_addr, len);
73 74
			flush_dcache_page(page);
		}
75
	} else {
76 77 78 79 80 81 82 83 84 85 86 87 88 89
		/*
		 * Note that we write the data both before and after
		 * clearing poison.  The write before clear poison
		 * handles situations where the latest written data is
		 * preserved and the clear poison operation simply marks
		 * the address range as valid without changing the data.
		 * In this case application software can assume that an
		 * interrupted write will either return the new good
		 * data or an error.
		 *
		 * However, if pmem_clear_poison() leaves the data in an
		 * indeterminate state we need to perform the write
		 * after clear poison.
		 */
90
		flush_dcache_page(page);
91
		memcpy_to_pmem(pmem_addr, mem + off, len);
92 93 94 95
		if (unlikely(bad_pmem)) {
			pmem_clear_poison(pmem, pmem_off, len);
			memcpy_to_pmem(pmem_addr, mem + off, len);
		}
96 97 98
	}

	kunmap_atomic(mem);
99
	return rc;
100 101
}

102
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
103
{
104
	int rc = 0;
D
Dan Williams 已提交
105 106
	bool do_acct;
	unsigned long start;
107 108
	struct bio_vec bvec;
	struct bvec_iter iter;
109
	struct pmem_device *pmem = q->queuedata;
110

D
Dan Williams 已提交
111
	do_acct = nd_iostat_start(bio, &start);
112 113 114 115 116 117 118 119 120
	bio_for_each_segment(bvec, bio, iter) {
		rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
				bvec.bv_offset, bio_data_dir(bio),
				iter.bi_sector);
		if (rc) {
			bio->bi_error = rc;
			break;
		}
	}
D
Dan Williams 已提交
121 122
	if (do_acct)
		nd_iostat_end(bio, start);
123 124 125 126

	if (bio_data_dir(bio))
		wmb_pmem();

127
	bio_endio(bio);
128
	return BLK_QC_T_NONE;
129 130 131 132 133
}

static int pmem_rw_page(struct block_device *bdev, sector_t sector,
		       struct page *page, int rw)
{
134
	struct pmem_device *pmem = bdev->bd_queue->queuedata;
135
	int rc;
136

137
	rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
138 139
	if (rw & WRITE)
		wmb_pmem();
140

141 142 143 144 145 146 147 148 149 150
	/*
	 * The ->rw_page interface is subtle and tricky.  The core
	 * retries on any error, so we can only invoke page_endio() in
	 * the successful completion case.  Otherwise, we'll see crashes
	 * caused by double completion.
	 */
	if (rc == 0)
		page_endio(page, rw & WRITE, 0);

	return rc;
151 152
}

153 154
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
__weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
155
		      void __pmem **kaddr, pfn_t *pfn, long size)
156
{
157
	struct pmem_device *pmem = bdev->bd_queue->queuedata;
158
	resource_size_t offset = sector * 512 + pmem->data_offset;
159

160 161
	if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
		return -EIO;
162
	*kaddr = pmem->virt_addr + offset;
D
Dan Williams 已提交
163
	*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
164

165 166 167 168 169 170
	/*
	 * If badblocks are present, limit known good range to the
	 * requested range.
	 */
	if (unlikely(pmem->bb.count))
		return size;
171
	return pmem->size - pmem->pfn_pad - offset;
172 173 174 175 176 177
}

static const struct block_device_operations pmem_fops = {
	.owner =		THIS_MODULE,
	.rw_page =		pmem_rw_page,
	.direct_access =	pmem_direct_access,
178
	.revalidate_disk =	nvdimm_revalidate_disk,
179 180
};

181 182 183 184 185
static void pmem_release_queue(void *q)
{
	blk_cleanup_queue(q);
}

186
static void pmem_release_disk(void *disk)
187 188 189 190 191
{
	del_gendisk(disk);
	put_disk(disk);
}

192 193
static int pmem_attach_disk(struct device *dev,
		struct nd_namespace_common *ndns)
194
{
195 196 197 198 199 200
	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
	struct vmem_altmap __altmap, *altmap = NULL;
	struct resource *res = &nsio->res;
	struct nd_pfn *nd_pfn = NULL;
	int nid = dev_to_node(dev);
	struct nd_pfn_sb *pfn_sb;
201
	struct pmem_device *pmem;
202
	struct resource pfn_res;
203
	struct request_queue *q;
204 205 206 207 208 209 210 211 212 213 214 215 216
	struct gendisk *disk;
	void *addr;

	/* while nsio_rw_bytes is active, parse a pfn info block if present */
	if (is_nd_pfn(dev)) {
		nd_pfn = to_nd_pfn(dev);
		altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
		if (IS_ERR(altmap))
			return PTR_ERR(altmap);
	}

	/* we're attaching a block device, disable raw namespace access */
	devm_nsio_disable(dev, nsio);
217

218
	pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
219
	if (!pmem)
220
		return -ENOMEM;
221

222
	dev_set_drvdata(dev, pmem);
223 224
	pmem->phys_addr = res->start;
	pmem->size = resource_size(res);
225
	if (!arch_has_wmb_pmem())
226
		dev_warn(dev, "unable to guarantee persistence of writes\n");
227

228 229 230
	if (!devm_request_mem_region(dev, res->start, resource_size(res),
				dev_name(dev))) {
		dev_warn(dev, "could not reserve region %pR\n", res);
231
		return -EBUSY;
232 233
	}

234 235
	q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
	if (!q)
236
		return -ENOMEM;
237

D
Dan Williams 已提交
238
	pmem->pfn_flags = PFN_DEV;
239 240 241 242 243 244 245 246 247 248 249
	if (is_nd_pfn(dev)) {
		addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
				altmap);
		pfn_sb = nd_pfn->pfn_sb;
		pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
		pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
		pmem->pfn_flags |= PFN_MAP;
		res = &pfn_res; /* for badblocks populate */
		res->start += pmem->data_offset;
	} else if (pmem_should_map_pages(dev)) {
		addr = devm_memremap_pages(dev, &nsio->res,
250
				&q->q_usage_counter, NULL);
D
Dan Williams 已提交
251 252
		pmem->pfn_flags |= PFN_MAP;
	} else
253 254
		addr = devm_memremap(dev, pmem->phys_addr,
				pmem->size, ARCH_MEMREMAP_PMEM);
255

256 257 258 259
	/*
	 * At release time the queue must be dead before
	 * devm_memremap_pages is unwound
	 */
260
	if (devm_add_action_or_reset(dev, pmem_release_queue, q))
261
		return -ENOMEM;
262

263 264 265
	if (IS_ERR(addr))
		return PTR_ERR(addr);
	pmem->virt_addr = (void __pmem *) addr;
266

267 268 269 270 271 272
	blk_queue_make_request(q, pmem_make_request);
	blk_queue_physical_block_size(q, PAGE_SIZE);
	blk_queue_max_hw_sectors(q, UINT_MAX);
	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
	q->queuedata = pmem;
273

274
	disk = alloc_disk_node(0, nid);
275 276
	if (!disk)
		return -ENOMEM;
277 278

	disk->fops		= &pmem_fops;
279
	disk->queue		= q;
280
	disk->flags		= GENHD_FL_EXT_DEVT;
V
Vishal Verma 已提交
281
	nvdimm_namespace_disk_name(ndns, disk->disk_name);
282
	disk->driverfs_dev = dev;
283 284
	set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
			/ 512);
285 286
	if (devm_init_badblocks(dev, &pmem->bb))
		return -ENOMEM;
287
	nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res);
288
	disk->bb = &pmem->bb;
289
	add_disk(disk);
290 291 292 293

	if (devm_add_action_or_reset(dev, pmem_release_disk, disk))
		return -ENOMEM;

294
	revalidate_disk(disk);
295

296 297
	return 0;
}
298

299
static int nd_pmem_probe(struct device *dev)
300
{
301
	struct nd_namespace_common *ndns;
302

303 304 305
	ndns = nvdimm_namespace_common_probe(dev);
	if (IS_ERR(ndns))
		return PTR_ERR(ndns);
306

307 308
	if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
		return -ENXIO;
309

310
	if (is_nd_btt(dev))
311 312
		return nvdimm_namespace_attach_btt(ndns);

313
	if (is_nd_pfn(dev))
314
		return pmem_attach_disk(dev, ndns);
315

316
	/* if we find a valid info-block we'll come back as that personality */
317 318
	if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
			|| nd_dax_probe(dev, ndns) == 0)
319 320
		return -ENXIO;

321 322
	/* ...otherwise we're just a raw pmem device */
	return pmem_attach_disk(dev, ndns);
323 324
}

325
static int nd_pmem_remove(struct device *dev)
326
{
327
	if (is_nd_btt(dev))
D
Dan Williams 已提交
328
		nvdimm_namespace_detach_btt(to_nd_btt(dev));
329 330 331
	return 0;
}

332 333
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
{
334
	struct nd_region *nd_region = to_nd_region(dev->parent);
D
Dan Williams 已提交
335 336 337 338 339
	struct pmem_device *pmem = dev_get_drvdata(dev);
	resource_size_t offset = 0, end_trunc = 0;
	struct nd_namespace_common *ndns;
	struct nd_namespace_io *nsio;
	struct resource res;
340 341 342 343

	if (event != NVDIMM_REVALIDATE_POISON)
		return;

D
Dan Williams 已提交
344 345 346 347 348
	if (is_nd_btt(dev)) {
		struct nd_btt *nd_btt = to_nd_btt(dev);

		ndns = nd_btt->ndns;
	} else if (is_nd_pfn(dev)) {
349 350 351
		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;

D
Dan Williams 已提交
352 353 354 355 356
		ndns = nd_pfn->ndns;
		offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
		end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
	} else
		ndns = to_ndns(dev);
357

D
Dan Williams 已提交
358 359 360
	nsio = to_nd_namespace_io(&ndns->dev);
	res.start = nsio->res.start + offset;
	res.end = nsio->res.end - end_trunc;
361
	nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
362 363
}

364 365
MODULE_ALIAS("pmem");
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
366
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
367 368 369
static struct nd_device_driver nd_pmem_driver = {
	.probe = nd_pmem_probe,
	.remove = nd_pmem_remove,
370
	.notify = nd_pmem_notify,
371 372
	.drv = {
		.name = "nd_pmem",
373
	},
374
	.type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
375 376 377 378
};

static int __init pmem_init(void)
{
379
	return nd_driver_register(&nd_pmem_driver);
380 381 382 383 384
}
module_init(pmem_init);

static void pmem_exit(void)
{
385
	driver_unregister(&nd_pmem_driver.drv);
386 387 388 389 390
}
module_exit(pmem_exit);

MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
MODULE_LICENSE("GPL v2");