nd.h 5.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */
#ifndef __LINUX_ND_H__
#define __LINUX_ND_H__
15
#include <linux/fs.h>
16 17
#include <linux/ndctl.h>
#include <linux/device.h>
18
#include <linux/badblocks.h>
19

20 21 22 23
enum nvdimm_event {
	NVDIMM_REVALIDATE_POISON,
};

24 25 26 27 28
struct nd_device_driver {
	struct device_driver drv;
	unsigned long type;
	int (*probe)(struct device *dev);
	int (*remove)(struct device *dev);
29
	void (*shutdown)(struct device *dev);
30
	void (*notify)(struct device *dev, enum nvdimm_event event);
31 32 33 34 35 36
};

static inline struct nd_device_driver *to_nd_device_driver(
		struct device_driver *drv)
{
	return container_of(drv, struct nd_device_driver, drv);
37 38
};

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
/**
 * struct nd_namespace_common - core infrastructure of a namespace
 * @force_raw: ignore other personalities for the namespace (e.g. btt)
 * @dev: device model node
 * @claim: when set a another personality has taken ownership of the namespace
 * @rw_bytes: access the raw namespace capacity with byte-aligned transfers
 */
struct nd_namespace_common {
	int force_raw;
	struct device dev;
	struct device *claim;
	int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
			void *buf, size_t size, int rw);
};

static inline struct nd_namespace_common *to_ndns(struct device *dev)
{
	return container_of(dev, struct nd_namespace_common, dev);
}

59
/**
60
 * struct nd_namespace_io - device representation of a persistent memory range
61 62
 * @dev: namespace device created by the nd region driver
 * @res: struct resource conversion of a NFIT SPA table
63 64 65
 * @size: cached resource_size(@res) for fast path size checks
 * @addr: virtual address to access the namespace range
 * @bb: badblocks list for the namespace range
66
 */
67
struct nd_namespace_io {
68
	struct nd_namespace_common common;
69
	struct resource res;
70 71 72
	resource_size_t size;
	void __pmem *addr;
	struct badblocks bb;
73 74
};

75 76 77 78 79 80 81 82 83 84 85 86
/**
 * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory
 * @nsio: device and system physical address range to drive
 * @alt_name: namespace name supplied in the dimm label
 * @uuid: namespace name supplied in the dimm label
 */
struct nd_namespace_pmem {
	struct nd_namespace_io nsio;
	char *alt_name;
	u8 *uuid;
};

87 88 89 90 91 92
/**
 * struct nd_namespace_blk - namespace for dimm-bounded persistent memory
 * @alt_name: namespace name supplied in the dimm label
 * @uuid: namespace name supplied in the dimm label
 * @id: ida allocated id
 * @lbasize: blk namespaces have a native sector size when btt not present
93
 * @size: sum of all the resource ranges allocated to this namespace
94 95 96 97
 * @num_resources: number of dpa extents to claim
 * @res: discontiguous dpa extents for given dimm
 */
struct nd_namespace_blk {
98
	struct nd_namespace_common common;
99 100 101 102
	char *alt_name;
	u8 *uuid;
	int id;
	unsigned long lbasize;
103
	resource_size_t size;
104 105 106 107
	int num_resources;
	struct resource **res;
};

108 109
static inline struct nd_namespace_io *to_nd_namespace_io(struct device *dev)
{
110
	return container_of(dev, struct nd_namespace_io, common.dev);
111 112
}

113 114 115 116 117 118 119
static inline struct nd_namespace_pmem *to_nd_namespace_pmem(struct device *dev)
{
	struct nd_namespace_io *nsio = to_nd_namespace_io(dev);

	return container_of(nsio, struct nd_namespace_pmem, nsio);
}

120 121
static inline struct nd_namespace_blk *to_nd_namespace_blk(struct device *dev)
{
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
	return container_of(dev, struct nd_namespace_blk, common.dev);
}

/**
 * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
 * @ndns: device to read
 * @offset: namespace-relative starting offset
 * @buf: buffer to fill
 * @size: transfer length
 *
 * @buf is up-to-date upon return from this routine.
 */
static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
		resource_size_t offset, void *buf, size_t size)
{
	return ndns->rw_bytes(ndns, offset, buf, size, READ);
}

/**
 * nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
 * @ndns: device to read
 * @offset: namespace-relative starting offset
 * @buf: buffer to drain
 * @size: transfer length
 *
 * NVDIMM Namepaces disks do not implement sectors internally.  Depending on
 * the @ndns, the contents of @buf may be in cpu cache, platform buffers,
 * or on backing memory media upon return from this routine.  Flushing
 * to media is handled internal to the @ndns driver, if at all.
 */
static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
		resource_size_t offset, void *buf, size_t size)
{
	return ndns->rw_bytes(ndns, offset, buf, size, WRITE);
156 157
}

158 159 160 161
#define MODULE_ALIAS_ND_DEVICE(type) \
	MODULE_ALIAS("nd:t" __stringify(type) "*")
#define ND_DEVICE_MODALIAS_FMT "nd:t%d"

162 163
struct nd_region;
void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
164 165 166 167 168
int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
		struct module *module, const char *mod_name);
#define nd_driver_register(driver) \
	__nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
#endif /* __LINUX_ND_H__ */