uio.h 7.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 *	Berkeley style UIO structures	-	Alan Cox 1994.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */
9 10
#ifndef __LINUX_UIO_H
#define __LINUX_UIO_H
L
Linus Torvalds 已提交
11

12
#include <linux/kernel.h>
13
#include <linux/thread_info.h>
14
#include <uapi/linux/uio.h>
L
Linus Torvalds 已提交
15

16
struct page;
A
Al Viro 已提交
17
struct pipe_inode_info;
18 19 20 21 22 23

struct kvec {
	void *iov_base; /* and that should *never* hold a userland pointer */
	size_t iov_len;
};

A
Al Viro 已提交
24 25 26 27
enum {
	ITER_IOVEC = 0,
	ITER_KVEC = 2,
	ITER_BVEC = 4,
A
Al Viro 已提交
28
	ITER_PIPE = 8,
A
Al Viro 已提交
29 30
};

31
struct iov_iter {
A
Al Viro 已提交
32
	int type;
33 34
	size_t iov_offset;
	size_t count;
A
Al Viro 已提交
35 36
	union {
		const struct iovec *iov;
A
Al Viro 已提交
37
		const struct kvec *kvec;
A
Al Viro 已提交
38
		const struct bio_vec *bvec;
A
Al Viro 已提交
39 40 41 42
		struct pipe_inode_info *pipe;
	};
	union {
		unsigned long nr_segs;
43 44 45 46
		struct {
			int idx;
			int start_idx;
		};
A
Al Viro 已提交
47
	};
48 49
};

L
Linus Torvalds 已提交
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * Total number of bytes covered by an iovec.
 *
 * NOTE that it is not safe to use this function until all the iovec's
 * segment lengths have been validated.  Because the individual lengths can
 * overflow a size_t when added together.
 */
static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
{
	unsigned long seg;
	size_t ret = 0;

	for (seg = 0; seg < nr_segs; seg++)
		ret += iov[seg].iov_len;
	return ret;
}

67 68 69 70 71 72 73 74 75 76
static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
{
	return (struct iovec) {
		.iov_base = iter->iov->iov_base + iter->iov_offset,
		.iov_len = min(iter->count,
			       iter->iov->iov_len - iter->iov_offset),
	};
}

#define iov_for_each(iov, iter, start)				\
A
Al Viro 已提交
77
	if (!((start).type & (ITER_BVEC | ITER_PIPE)))		\
78 79 80 81 82
	for (iter = (start);					\
	     (iter).count &&					\
	     ((iov = iov_iter_iovec(&(iter))), 1);		\
	     iov_iter_advance(&(iter), (iov).iov_len))

L
Linus Torvalds 已提交
83
unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
84

85 86 87
size_t iov_iter_copy_from_user_atomic(struct page *page,
		struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
88
void iov_iter_revert(struct iov_iter *i, size_t bytes);
89 90
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
91 92
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i);
A
Al Viro 已提交
93 94
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i);
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146

size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);

static __always_inline __must_check
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
	if (unlikely(!check_copy_size(addr, bytes, true)))
		return bytes;
	else
		return _copy_to_iter(addr, bytes, i);
}

static __always_inline __must_check
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
	if (unlikely(!check_copy_size(addr, bytes, false)))
		return bytes;
	else
		return _copy_from_iter(addr, bytes, i);
}

static __always_inline __must_check
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
	if (unlikely(!check_copy_size(addr, bytes, false)))
		return false;
	else
		return _copy_from_iter_full(addr, bytes, i);
}

static __always_inline __must_check
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
	if (unlikely(!check_copy_size(addr, bytes, false)))
		return bytes;
	else
		return _copy_from_iter_nocache(addr, bytes, i);
}

static __always_inline __must_check
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
	if (unlikely(!check_copy_size(addr, bytes, false)))
		return false;
	else
		return _copy_from_iter_full_nocache(addr, bytes, i);
}

147 148 149 150 151 152 153
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/*
 * Note, users like pmem that depend on the stricter semantics of
 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
 * destination is flushed from the cache on return.
 */
154
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
155
#else
156 157 158 159 160
#define _copy_from_iter_flushcache _copy_from_iter_nocache
#endif

static __always_inline __must_check
size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
161
{
162 163 164 165
	if (unlikely(!check_copy_size(addr, bytes, false)))
		return bytes;
	else
		return _copy_from_iter_flushcache(addr, bytes, i);
166
}
167

168
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
A
Al Viro 已提交
169
unsigned long iov_iter_alignment(const struct iov_iter *i);
170
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
A
Al Viro 已提交
171 172
void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
			unsigned long nr_segs, size_t count);
A
Al Viro 已提交
173 174 175
void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
			unsigned long nr_segs, size_t count);
void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
A
Al Viro 已提交
176
			unsigned long nr_segs, size_t count);
A
Al Viro 已提交
177 178
void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe,
			size_t count);
A
Al Viro 已提交
179
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
180
			size_t maxsize, unsigned maxpages, size_t *start);
A
Al Viro 已提交
181 182
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
			size_t maxsize, size_t *start);
A
Al Viro 已提交
183
int iov_iter_npages(const struct iov_iter *i, int maxpages);
184

A
Al Viro 已提交
185 186
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);

187
static inline size_t iov_iter_count(const struct iov_iter *i)
188 189 190 191
{
	return i->count;
}

192
static inline bool iter_is_iovec(const struct iov_iter *i)
A
Al Viro 已提交
193
{
A
Al Viro 已提交
194
	return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
A
Al Viro 已提交
195 196
}

O
Omar Sandoval 已提交
197 198 199 200 201 202
/*
 * Get one of READ or WRITE out of iter->type without any other flags OR'd in
 * with it.
 *
 * The ?: is just for type safety.
 */
203
#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE))
O
Omar Sandoval 已提交
204

205 206 207 208 209 210 211
/*
 * Cap the iov_iter by given limit; note that the second argument is
 * *not* the new size - it's upper limit for such.  Passing it a value
 * greater than the amount of data in iov_iter is fine - it'll just do
 * nothing in that case.
 */
static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
A
Al Viro 已提交
212
{
213 214 215 216 217 218
	/*
	 * count doesn't have to fit in size_t - comparison extends both
	 * operands to u64 here and any value that would be truncated by
	 * conversion in assignement is by definition greater than all
	 * values of size_t, including old i->count.
	 */
A
Al Viro 已提交
219 220 221 222
	if (i->count > count)
		i->count = count;
}

A
Al Viro 已提交
223 224 225 226 227 228 229 230
/*
 * reexpand a previously truncated iterator; count must be no more than how much
 * we had shrunk it.
 */
static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
	i->count = count;
}
231
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
A
Al Viro 已提交
232
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
233
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
A
Al Viro 已提交
234

235 236 237 238 239 240 241 242 243 244 245 246 247 248
int import_iovec(int type, const struct iovec __user * uvector,
		 unsigned nr_segs, unsigned fast_segs,
		 struct iovec **iov, struct iov_iter *i);

#ifdef CONFIG_COMPAT
struct compat_iovec;
int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
		 unsigned nr_segs, unsigned fast_segs,
		 struct iovec **iov, struct iov_iter *i);
#endif

int import_single_range(int type, void __user *buf, size_t len,
		 struct iovec *iov, struct iov_iter *i);

249
#endif