uaccess.h 11.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__

5
#include <linux/sched.h>
6
#include <linux/thread_info.h>
A
Al Viro 已提交
7
#include <linux/kasan-checks.h>
8

A
Al Viro 已提交
9 10
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)

11 12
#include <asm/uaccess.h>

A
Al Viro 已提交
13 14
/*
 * Architectures should provide two primitives (raw_copy_{to,from}_user())
15 16
 * and get rid of their private instances of copy_{to,from}_user() and
 * __copy_{to,from}_user{,_inatomic}().
A
Al Viro 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
 *
 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
 * return the amount left to copy.  They should assume that access_ok() has
 * already been checked (and succeeded); they should *not* zero-pad anything.
 * No KASAN or object size checks either - those belong here.
 *
 * Both of these functions should attempt to copy size bytes starting at from
 * into the area starting at to.  They must not fetch or store anything
 * outside of those areas.  Return value must be between 0 (everything
 * copied successfully) and size (nothing copied).
 *
 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
 * at to must become equal to the bytes fetched from the corresponding area
 * starting at from.  All data past to + size - N must be left unmodified.
 *
 * If copying succeeds, the return value must be 0.  If some data cannot be
 * fetched, it is permitted to copy less than had been fetched; the only
 * hard requirement is that not storing anything at all (i.e. returning size)
 * should happen only when nothing could be copied.  In other words, you don't
 * have to squeeze as much as possible - it is allowed, but not necessary.
 *
 * For raw_copy_from_user() to always points to kernel memory and no faults
 * on store should happen.  Interpretation of from is affected by set_fs().
 * For raw_copy_to_user() it's the other way round.
 *
 * Both can be inlined - it's up to architectures whether it wants to bother
 * with that.  They should not be used directly; they are used to implement
 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
 * that are used instead.  Out of those, __... ones are inlined.  Plain
 * copy_{to,from}_user() might or might not be inlined.  If you want them
 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
 *
 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
 * at all; their callers absolutely must check the return value.
 *
 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
 * but both source and destination are __user pointers (affected by set_fs()
 * as usual) and both source and destination can trigger faults.
 */

58
static __always_inline __must_check unsigned long
A
Al Viro 已提交
59 60 61 62 63 64 65
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
	kasan_check_write(to, n);
	check_object_size(to, n, false);
	return raw_copy_from_user(to, from, n);
}

66
static __always_inline __must_check unsigned long
A
Al Viro 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
	might_fault();
	kasan_check_write(to, n);
	check_object_size(to, n, false);
	return raw_copy_from_user(to, from, n);
}

/**
 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.
 *
 * Copy data from kernel space to user space.  Caller must check
 * the specified block with access_ok() before calling this function.
 * The caller should also make sure he pins the user space address
 * so that we don't result in page fault and sleep.
 */
88
static __always_inline __must_check unsigned long
A
Al Viro 已提交
89 90 91 92 93 94 95
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
	kasan_check_read(from, n);
	check_object_size(from, n, true);
	return raw_copy_to_user(to, from, n);
}

96
static __always_inline __must_check unsigned long
A
Al Viro 已提交
97 98 99 100 101 102 103 104 105
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
	might_fault();
	kasan_check_read(from, n);
	check_object_size(from, n, true);
	return raw_copy_to_user(to, from, n);
}

#ifdef INLINE_COPY_FROM_USER
106
static inline __must_check unsigned long
A
Al Viro 已提交
107 108 109
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
	unsigned long res = n;
110
	might_fault();
111
	if (likely(access_ok(from, n))) {
112
		kasan_check_write(to, n);
A
Al Viro 已提交
113
		res = raw_copy_from_user(to, from, n);
114
	}
A
Al Viro 已提交
115 116 117 118 119
	if (unlikely(res))
		memset(to + (n - res), 0, res);
	return res;
}
#else
120
extern __must_check unsigned long
A
Al Viro 已提交
121 122 123 124
_copy_from_user(void *, const void __user *, unsigned long);
#endif

#ifdef INLINE_COPY_TO_USER
125
static inline __must_check unsigned long
A
Al Viro 已提交
126 127
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
128
	might_fault();
129
	if (access_ok(to, n)) {
130
		kasan_check_read(from, n);
A
Al Viro 已提交
131
		n = raw_copy_to_user(to, from, n);
132
	}
A
Al Viro 已提交
133 134 135
	return n;
}
#else
136
extern __must_check unsigned long
A
Al Viro 已提交
137 138 139 140 141 142
_copy_to_user(void __user *, const void *, unsigned long);
#endif

static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
143
	if (likely(check_copy_size(to, n, false)))
A
Al Viro 已提交
144 145 146 147 148 149 150
		n = _copy_from_user(to, from, n);
	return n;
}

static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
151
	if (likely(check_copy_size(from, n, true)))
A
Al Viro 已提交
152 153 154 155 156
		n = _copy_to_user(to, from, n);
	return n;
}
#ifdef CONFIG_COMPAT
static __always_inline unsigned long __must_check
157
copy_in_user(void __user *to, const void __user *from, unsigned long n)
A
Al Viro 已提交
158 159
{
	might_fault();
160
	if (access_ok(to, n) && access_ok(from, n))
A
Al Viro 已提交
161 162 163 164 165
		n = raw_copy_in_user(to, from, n);
	return n;
}
#endif

166 167 168 169 170 171 172 173 174 175
static __always_inline void pagefault_disabled_inc(void)
{
	current->pagefault_disabled++;
}

static __always_inline void pagefault_disabled_dec(void)
{
	current->pagefault_disabled--;
}

176
/*
177 178 179
 * These routines enable/disable the pagefault handler. If disabled, it will
 * not take any locks and go straight to the fixup table.
 *
180 181
 * User access methods will not sleep when called from a pagefault_disabled()
 * environment.
182 183 184
 */
static inline void pagefault_disable(void)
{
185
	pagefault_disabled_inc();
186 187 188 189 190 191 192 193 194 195 196 197 198 199
	/*
	 * make sure to have issued the store before a pagefault
	 * can hit.
	 */
	barrier();
}

static inline void pagefault_enable(void)
{
	/*
	 * make sure to issue those last loads/stores before enabling
	 * the pagefault handler again.
	 */
	barrier();
200
	pagefault_disabled_dec();
201 202
}

203 204 205
/*
 * Is the pagefault handler disabled? If so, user access methods will not sleep.
 */
206 207 208 209
static inline bool pagefault_disabled(void)
{
	return current->pagefault_disabled != 0;
}
210

211 212 213 214 215 216 217 218 219 220 221 222
/*
 * The pagefault handler is in general disabled by pagefault_disable() or
 * when in irq context (via in_atomic()).
 *
 * This function should only be used by the fault handlers. Other users should
 * stick to pagefault_disabled().
 * Please NEVER use preempt_disable() to disable the fault handler. With
 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
 */
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())

223 224
#ifndef ARCH_HAS_NOCACHE_UACCESS

225 226 227
static inline __must_check unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
				  unsigned long n)
228 229 230 231 232 233
{
	return __copy_from_user_inatomic(to, from, n);
}

#endif		/* ARCH_HAS_NOCACHE_UACCESS */

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
extern __must_check int check_zeroed_user(const void __user *from, size_t size);

/**
 * copy_struct_from_user: copy a struct from userspace
 * @dst:   Destination address, in kernel space. This buffer must be @ksize
 *         bytes long.
 * @ksize: Size of @dst struct.
 * @src:   Source address, in userspace.
 * @usize: (Alleged) size of @src struct.
 *
 * Copies a struct from userspace to kernel space, in a way that guarantees
 * backwards-compatibility for struct syscall arguments (as long as future
 * struct extensions are made such that all new fields are *appended* to the
 * old struct, and zeroed-out new fields have the same meaning as the old
 * struct).
 *
 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
 * The recommended usage is something like the following:
 *
 *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
 *   {
 *      int err;
 *      struct foo karg = {};
 *
 *      if (usize > PAGE_SIZE)
 *        return -E2BIG;
 *      if (usize < FOO_SIZE_VER0)
 *        return -EINVAL;
 *
 *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
 *      if (err)
 *        return err;
 *
 *      // ...
 *   }
 *
 * There are three cases to consider:
 *  * If @usize == @ksize, then it's copied verbatim.
 *  * If @usize < @ksize, then the userspace has passed an old struct to a
 *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
 *    are to be zero-filled.
 *  * If @usize > @ksize, then the userspace has passed a new struct to an
 *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
 *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
 *
 * Returns (in all cases, some data may have been copied):
 *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
 *  * -EFAULT: access to userspace failed.
 */
static __always_inline __must_check int
copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
		      size_t usize)
{
	size_t size = min(ksize, usize);
	size_t rest = max(ksize, usize) - size;

	/* Deal with trailing bytes. */
	if (usize < ksize) {
		memset(dst + size, 0, rest);
	} else if (usize > ksize) {
		int ret = check_zeroed_user(src + size, rest);
		if (ret <= 0)
			return ret ?: -E2BIG;
	}
	/* Copy the interoperable parts of the struct. */
	if (copy_from_user(dst, src, size))
		return -EFAULT;
	return 0;
}

304 305 306
bool probe_kernel_read_allowed(const void *unsafe_src, size_t size,
		bool strict);

307
extern long probe_kernel_read(void *dst, const void *src, size_t size);
308
extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
309 310
extern long probe_user_read(void *dst, const void __user *src, size_t size);

311
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
312 313
extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);

314
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
315 316
long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
		long count);
317

318 319
long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
		long count);
320
long strnlen_user_nofault(const void __user *unsafe_addr, long count);
321

322 323 324 325 326 327 328 329 330 331
/**
 * probe_kernel_address(): safely attempt to read from a location
 * @addr: address to read from
 * @retval: read into this variable
 *
 * Returns 0 on success, or -EFAULT.
 */
#define probe_kernel_address(addr, retval)		\
	probe_kernel_read(&retval, addr, sizeof(retval))

332
#ifndef user_access_begin
333
#define user_access_begin(ptr,len) access_ok(ptr, len)
334
#define user_access_end() do { } while (0)
335 336 337 338
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
339 340
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
341
#endif
342 343 344 345 346 347 348 349
#ifndef user_write_access_begin
#define user_write_access_begin user_access_begin
#define user_write_access_end user_access_end
#endif
#ifndef user_read_access_begin
#define user_read_access_begin user_access_begin
#define user_read_access_end user_access_end
#endif
350

351
#ifdef CONFIG_HARDENED_USERCOPY
352 353
void usercopy_warn(const char *name, const char *detail, bool to_user,
		   unsigned long offset, unsigned long len);
354 355 356 357 358
void __noreturn usercopy_abort(const char *name, const char *detail,
			       bool to_user, unsigned long offset,
			       unsigned long len);
#endif

359
#endif		/* __LINUX_UACCESS_H__ */