dax.h 6.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6
#ifndef _LINUX_DAX_H
#define _LINUX_DAX_H

#include <linux/fs.h>
#include <linux/mm.h>
7
#include <linux/radix-tree.h>
8 9
#include <asm/pgtable.h>

P
Pankaj Gupta 已提交
10 11 12
/* Flag for synchronous flush */
#define DAXDEV_F_SYNC (1UL << 0)

13 14
typedef unsigned long dax_entry_t;

15
struct iomap_ops;
D
Dan Williams 已提交
16 17 18 19 20 21 22 23 24
struct dax_device;
struct dax_operations {
	/*
	 * direct_access: translate a device-relative
	 * logical-page-offset into an absolute physical pfn. Return the
	 * number of pages available for DAX at that pfn.
	 */
	long (*direct_access)(struct dax_device *, pgoff_t, long,
			void **, pfn_t *);
25 26 27 28 29 30
	/*
	 * Validate whether this device is usable as an fsdax backing
	 * device.
	 */
	bool (*dax_supported)(struct dax_device *, struct block_device *, int,
			sector_t, sector_t);
31
	/* copy_from_iter: required operation for fs-dax direct-i/o */
32 33
	size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
			struct iov_iter *);
34 35 36
	/* copy_to_iter: required operation for fs-dax direct-i/o */
	size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
			struct iov_iter *);
D
Dan Williams 已提交
37
};
38

39 40
extern struct attribute_group dax_attribute_group;

41 42
#if IS_ENABLED(CONFIG_DAX)
struct dax_device *dax_get_by_host(const char *host);
43
struct dax_device *alloc_dax(void *private, const char *host,
P
Pankaj Gupta 已提交
44
		const struct dax_operations *ops, unsigned long flags);
45
void put_dax(struct dax_device *dax_dev);
46 47 48
void kill_dax(struct dax_device *dax_dev);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
bool dax_write_cache_enabled(struct dax_device *dax_dev);
P
Pankaj Gupta 已提交
49 50 51 52 53 54 55 56 57 58
bool __dax_synchronous(struct dax_device *dax_dev);
static inline bool dax_synchronous(struct dax_device *dax_dev)
{
	return  __dax_synchronous(dax_dev);
}
void __set_dax_synchronous(struct dax_device *dax_dev);
static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
	__set_dax_synchronous(dax_dev);
}
59 60 61 62 63 64 65 66 67 68 69 70
/*
 * Check if given mapping is supported by the file / underlying device.
 */
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
					     struct dax_device *dax_dev)
{
	if (!(vma->vm_flags & VM_SYNC))
		return true;
	if (!IS_DAX(file_inode(vma->vm_file)))
		return false;
	return dax_synchronous(dax_dev);
}
71 72 73 74 75
#else
static inline struct dax_device *dax_get_by_host(const char *host)
{
	return NULL;
}
76
static inline struct dax_device *alloc_dax(void *private, const char *host,
P
Pankaj Gupta 已提交
77
		const struct dax_operations *ops, unsigned long flags)
78 79 80 81 82 83 84
{
	/*
	 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this
	 * NULL is an error or expected.
	 */
	return NULL;
}
85 86 87
static inline void put_dax(struct dax_device *dax_dev)
{
}
88 89 90 91 92 93 94 95 96 97
static inline void kill_dax(struct dax_device *dax_dev)
{
}
static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
{
}
static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
{
	return false;
}
P
Pankaj Gupta 已提交
98 99 100 101 102 103 104
static inline bool dax_synchronous(struct dax_device *dax_dev)
{
	return true;
}
static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
}
105 106 107 108 109
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
				struct dax_device *dax_dev)
{
	return !(vma->vm_flags & VM_SYNC);
}
110 111
#endif

112
struct writeback_control;
113 114
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
#if IS_ENABLED(CONFIG_FS_DAX)
115 116
bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
117
{
118
	return __bdev_dax_supported(bdev, blocksize);
119
}
120

121 122 123 124 125 126 127 128 129 130 131
bool __generic_fsdax_supported(struct dax_device *dax_dev,
		struct block_device *bdev, int blocksize, sector_t start,
		sector_t sectors);
static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
		struct block_device *bdev, int blocksize, sector_t start,
		sector_t sectors)
{
	return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
			sectors);
}

132 133 134 135 136
static inline void fs_put_dax(struct dax_device *dax_dev)
{
	put_dax(dax_dev);
}

137
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
138
int dax_writeback_mapping_range(struct address_space *mapping,
139
		struct dax_device *dax_dev, struct writeback_control *wbc);
140 141

struct page *dax_layout_busy_page(struct address_space *mapping);
142 143
dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_page(struct page *page, dax_entry_t cookie);
144
#else
145
static inline bool bdev_dax_supported(struct block_device *bdev,
146
		int blocksize)
147
{
148
	return false;
149 150
}

151 152 153 154 155 156 157
static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
		struct block_device *bdev, int blocksize, sector_t start,
		sector_t sectors)
{
	return false;
}

158
static inline void fs_put_dax(struct dax_device *dax_dev)
159 160
{
}
161 162 163 164 165

static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
{
	return NULL;
}
166

167 168 169 170 171
static inline struct page *dax_layout_busy_page(struct address_space *mapping)
{
	return NULL;
}

172
static inline int dax_writeback_mapping_range(struct address_space *mapping,
173
		struct dax_device *dax_dev, struct writeback_control *wbc)
174 175 176
{
	return -EOPNOTSUPP;
}
177

178
static inline dax_entry_t dax_lock_page(struct page *page)
179 180
{
	if (IS_DAX(page->mapping->host))
181 182
		return ~0UL;
	return 0;
183 184
}

185
static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
186 187
{
}
188 189
#endif

190 191
int dax_read_lock(void);
void dax_read_unlock(int id);
D
Dan Williams 已提交
192 193
bool dax_alive(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
194 195
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
		void **kaddr, pfn_t *pfn);
196 197
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
		int blocksize, sector_t start, sector_t len);
198 199
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
		size_t bytes, struct iov_iter *i);
200 201
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
		size_t bytes, struct iov_iter *i);
202
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
203

204
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
205
		const struct iomap_ops *ops);
206
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
207
		    pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
208 209
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
		enum page_entry_size pe_size, pfn_t pfn);
J
Jan Kara 已提交
210
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
211 212
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
				      pgoff_t index);
213 214

#ifdef CONFIG_FS_DAX
215 216
int __dax_zero_page_range(struct block_device *bdev,
		struct dax_device *dax_dev, sector_t sector,
217
		unsigned int offset, unsigned int length);
218
#else
219
static inline int __dax_zero_page_range(struct block_device *bdev,
220 221
		struct dax_device *dax_dev, sector_t sector,
		unsigned int offset, unsigned int length)
222 223 224
{
	return -ENXIO;
}
225 226
#endif

227 228 229 230
static inline bool dax_mapping(struct address_space *mapping)
{
	return mapping->host && IS_DAX(mapping->host);
}
231

232
#endif