pmem.c 6.5 KB
Newer Older
1 2 3
/*
 * Persistent Memory Driver
 *
4
 * Copyright (c) 2014-2015, Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <asm/cacheflush.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
26 27
#include <linux/nd.h>
#include "nd.h"
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119

struct pmem_device {
	struct request_queue	*pmem_queue;
	struct gendisk		*pmem_disk;

	/* One contiguous memory region per device */
	phys_addr_t		phys_addr;
	void			*virt_addr;
	size_t			size;
};

static int pmem_major;

static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
			unsigned int len, unsigned int off, int rw,
			sector_t sector)
{
	void *mem = kmap_atomic(page);
	size_t pmem_off = sector << 9;

	if (rw == READ) {
		memcpy(mem + off, pmem->virt_addr + pmem_off, len);
		flush_dcache_page(page);
	} else {
		flush_dcache_page(page);
		memcpy(pmem->virt_addr + pmem_off, mem + off, len);
	}

	kunmap_atomic(mem);
}

static void pmem_make_request(struct request_queue *q, struct bio *bio)
{
	struct block_device *bdev = bio->bi_bdev;
	struct pmem_device *pmem = bdev->bd_disk->private_data;
	int rw;
	struct bio_vec bvec;
	sector_t sector;
	struct bvec_iter iter;
	int err = 0;

	if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) {
		err = -EIO;
		goto out;
	}

	BUG_ON(bio->bi_rw & REQ_DISCARD);

	rw = bio_data_dir(bio);
	sector = bio->bi_iter.bi_sector;
	bio_for_each_segment(bvec, bio, iter) {
		pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset,
			     rw, sector);
		sector += bvec.bv_len >> 9;
	}

out:
	bio_endio(bio, err);
}

static int pmem_rw_page(struct block_device *bdev, sector_t sector,
		       struct page *page, int rw)
{
	struct pmem_device *pmem = bdev->bd_disk->private_data;

	pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
	page_endio(page, rw & WRITE, 0);

	return 0;
}

static long pmem_direct_access(struct block_device *bdev, sector_t sector,
			      void **kaddr, unsigned long *pfn, long size)
{
	struct pmem_device *pmem = bdev->bd_disk->private_data;
	size_t offset = sector << 9;

	if (!pmem)
		return -ENODEV;

	*kaddr = pmem->virt_addr + offset;
	*pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT;

	return pmem->size - offset;
}

static const struct block_device_operations pmem_fops = {
	.owner =		THIS_MODULE,
	.rw_page =		pmem_rw_page,
	.direct_access =	pmem_direct_access,
};

120 121
static struct pmem_device *pmem_alloc(struct device *dev,
		struct resource *res, int id)
122 123 124
{
	struct pmem_device *pmem;
	struct gendisk *disk;
125
	int err;
126 127 128 129 130 131 132 133 134 135 136

	err = -ENOMEM;
	pmem = kzalloc(sizeof(*pmem), GFP_KERNEL);
	if (!pmem)
		goto out;

	pmem->phys_addr = res->start;
	pmem->size = resource_size(res);

	err = -EINVAL;
	if (!request_mem_region(pmem->phys_addr, pmem->size, "pmem")) {
137 138
		dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
				&pmem->phys_addr, pmem->size);
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
		goto out_free_dev;
	}

	/*
	 * Map the memory as non-cachable, as we can't write back the contents
	 * of the CPU caches in case of a crash.
	 */
	err = -ENOMEM;
	pmem->virt_addr = ioremap_nocache(pmem->phys_addr, pmem->size);
	if (!pmem->virt_addr)
		goto out_release_region;

	pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
	if (!pmem->pmem_queue)
		goto out_unmap;

	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
	blk_queue_max_hw_sectors(pmem->pmem_queue, 1024);
	blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);

159
	disk = alloc_disk(0);
160 161 162 163
	if (!disk)
		goto out_free_queue;

	disk->major		= pmem_major;
164
	disk->first_minor	= 0;
165 166 167 168
	disk->fops		= &pmem_fops;
	disk->private_data	= pmem;
	disk->queue		= pmem->pmem_queue;
	disk->flags		= GENHD_FL_EXT_DEVT;
169
	sprintf(disk->disk_name, "pmem%d", id);
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
	disk->driverfs_dev = dev;
	set_capacity(disk, pmem->size >> 9);
	pmem->pmem_disk = disk;

	add_disk(disk);

	return pmem;

out_free_queue:
	blk_cleanup_queue(pmem->pmem_queue);
out_unmap:
	iounmap(pmem->virt_addr);
out_release_region:
	release_mem_region(pmem->phys_addr, pmem->size);
out_free_dev:
	kfree(pmem);
out:
	return ERR_PTR(err);
}

static void pmem_free(struct pmem_device *pmem)
{
	del_gendisk(pmem->pmem_disk);
	put_disk(pmem->pmem_disk);
	blk_cleanup_queue(pmem->pmem_queue);
	iounmap(pmem->virt_addr);
	release_mem_region(pmem->phys_addr, pmem->size);
	kfree(pmem);
}

200
static int nd_pmem_probe(struct device *dev)
201
{
202 203
	struct nd_region *nd_region = to_nd_region(dev->parent);
	struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
204 205
	struct pmem_device *pmem;

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	if (resource_size(&nsio->res) < ND_MIN_NAMESPACE_SIZE) {
		resource_size_t size = resource_size(&nsio->res);

		dev_dbg(dev, "%s: size: %pa, too small must be at least %#x\n",
				__func__, &size, ND_MIN_NAMESPACE_SIZE);
		return -ENODEV;
	}

	if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_PMEM) {
		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);

		if (!nspm->uuid) {
			dev_dbg(dev, "%s: uuid not set\n", __func__);
			return -ENODEV;
		}
	}

223
	pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
224 225 226
	if (IS_ERR(pmem))
		return PTR_ERR(pmem);

227
	dev_set_drvdata(dev, pmem);
228 229 230 231

	return 0;
}

232
static int nd_pmem_remove(struct device *dev)
233
{
234
	struct pmem_device *pmem = dev_get_drvdata(dev);
235 236 237 238 239

	pmem_free(pmem);
	return 0;
}

240 241
MODULE_ALIAS("pmem");
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
242
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
243 244 245 246 247
static struct nd_device_driver nd_pmem_driver = {
	.probe = nd_pmem_probe,
	.remove = nd_pmem_remove,
	.drv = {
		.name = "nd_pmem",
248
	},
249
	.type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
250 251 252 253 254 255 256 257 258 259
};

static int __init pmem_init(void)
{
	int error;

	pmem_major = register_blkdev(0, "pmem");
	if (pmem_major < 0)
		return pmem_major;

260 261
	error = nd_driver_register(&nd_pmem_driver);
	if (error) {
262
		unregister_blkdev(pmem_major, "pmem");
263 264 265 266
		return error;
	}

	return 0;
267 268 269 270 271
}
module_init(pmem_init);

static void pmem_exit(void)
{
272
	driver_unregister(&nd_pmem_driver.drv);
273 274 275 276 277 278
	unregister_blkdev(pmem_major, "pmem");
}
module_exit(pmem_exit);

MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
MODULE_LICENSE("GPL v2");