uaccess.h 9.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__

5
#include <linux/sched.h>
6
#include <linux/thread_info.h>
A
Al Viro 已提交
7
#include <linux/kasan-checks.h>
8 9 10 11

#define VERIFY_READ 0
#define VERIFY_WRITE 1

A
Al Viro 已提交
12 13
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)

14 15
#include <asm/uaccess.h>

A
Al Viro 已提交
16 17
/*
 * Architectures should provide two primitives (raw_copy_{to,from}_user())
18 19
 * and get rid of their private instances of copy_{to,from}_user() and
 * __copy_{to,from}_user{,_inatomic}().
A
Al Viro 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
 *
 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
 * return the amount left to copy.  They should assume that access_ok() has
 * already been checked (and succeeded); they should *not* zero-pad anything.
 * No KASAN or object size checks either - those belong here.
 *
 * Both of these functions should attempt to copy size bytes starting at from
 * into the area starting at to.  They must not fetch or store anything
 * outside of those areas.  Return value must be between 0 (everything
 * copied successfully) and size (nothing copied).
 *
 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
 * at to must become equal to the bytes fetched from the corresponding area
 * starting at from.  All data past to + size - N must be left unmodified.
 *
 * If copying succeeds, the return value must be 0.  If some data cannot be
 * fetched, it is permitted to copy less than had been fetched; the only
 * hard requirement is that not storing anything at all (i.e. returning size)
 * should happen only when nothing could be copied.  In other words, you don't
 * have to squeeze as much as possible - it is allowed, but not necessary.
 *
 * For raw_copy_from_user() to always points to kernel memory and no faults
 * on store should happen.  Interpretation of from is affected by set_fs().
 * For raw_copy_to_user() it's the other way round.
 *
 * Both can be inlined - it's up to architectures whether it wants to bother
 * with that.  They should not be used directly; they are used to implement
 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
 * that are used instead.  Out of those, __... ones are inlined.  Plain
 * copy_{to,from}_user() might or might not be inlined.  If you want them
 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
 *
 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
 * at all; their callers absolutely must check the return value.
 *
 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
 * but both source and destination are __user pointers (affected by set_fs()
 * as usual) and both source and destination can trigger faults.
 */

static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
	kasan_check_write(to, n);
	check_object_size(to, n, false);
	return raw_copy_from_user(to, from, n);
}

static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
	might_fault();
	kasan_check_write(to, n);
	check_object_size(to, n, false);
	return raw_copy_from_user(to, from, n);
}

/**
 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.
 *
 * Copy data from kernel space to user space.  Caller must check
 * the specified block with access_ok() before calling this function.
 * The caller should also make sure he pins the user space address
 * so that we don't result in page fault and sleep.
 */
static __always_inline unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
	kasan_check_read(from, n);
	check_object_size(from, n, true);
	return raw_copy_to_user(to, from, n);
}

static __always_inline unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
	might_fault();
	kasan_check_read(from, n);
	check_object_size(from, n, true);
	return raw_copy_to_user(to, from, n);
}

#ifdef INLINE_COPY_FROM_USER
static inline unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
	unsigned long res = n;
113 114 115
	might_fault();
	if (likely(access_ok(VERIFY_READ, from, n))) {
		kasan_check_write(to, n);
A
Al Viro 已提交
116
		res = raw_copy_from_user(to, from, n);
117
	}
A
Al Viro 已提交
118 119 120 121 122 123 124 125 126 127 128 129 130
	if (unlikely(res))
		memset(to + (n - res), 0, res);
	return res;
}
#else
extern unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
#endif

#ifdef INLINE_COPY_TO_USER
static inline unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
131 132 133
	might_fault();
	if (access_ok(VERIFY_WRITE, to, n)) {
		kasan_check_read(from, n);
A
Al Viro 已提交
134
		n = raw_copy_to_user(to, from, n);
135
	}
A
Al Viro 已提交
136 137 138 139 140 141 142 143 144 145
	return n;
}
#else
extern unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif

static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
146
	if (likely(check_copy_size(to, n, false)))
A
Al Viro 已提交
147 148 149 150 151 152 153
		n = _copy_from_user(to, from, n);
	return n;
}

static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
154
	if (likely(check_copy_size(from, n, true)))
A
Al Viro 已提交
155 156 157 158 159
		n = _copy_to_user(to, from, n);
	return n;
}
#ifdef CONFIG_COMPAT
static __always_inline unsigned long __must_check
160
copy_in_user(void __user *to, const void __user *from, unsigned long n)
A
Al Viro 已提交
161 162 163 164 165 166 167 168
{
	might_fault();
	if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
		n = raw_copy_in_user(to, from, n);
	return n;
}
#endif

169 170 171 172 173 174 175 176 177 178
static __always_inline void pagefault_disabled_inc(void)
{
	current->pagefault_disabled++;
}

static __always_inline void pagefault_disabled_dec(void)
{
	current->pagefault_disabled--;
}

179
/*
180 181 182
 * These routines enable/disable the pagefault handler. If disabled, it will
 * not take any locks and go straight to the fixup table.
 *
183 184
 * User access methods will not sleep when called from a pagefault_disabled()
 * environment.
185 186 187
 */
static inline void pagefault_disable(void)
{
188
	pagefault_disabled_inc();
189 190 191 192 193 194 195 196 197 198 199 200 201 202
	/*
	 * make sure to have issued the store before a pagefault
	 * can hit.
	 */
	barrier();
}

static inline void pagefault_enable(void)
{
	/*
	 * make sure to issue those last loads/stores before enabling
	 * the pagefault handler again.
	 */
	barrier();
203
	pagefault_disabled_dec();
204 205
}

206 207 208 209 210
/*
 * Is the pagefault handler disabled? If so, user access methods will not sleep.
 */
#define pagefault_disabled() (current->pagefault_disabled != 0)

211 212 213 214 215 216 217 218 219 220 221 222
/*
 * The pagefault handler is in general disabled by pagefault_disable() or
 * when in irq context (via in_atomic()).
 *
 * This function should only be used by the fault handlers. Other users should
 * stick to pagefault_disabled().
 * Please NEVER use preempt_disable() to disable the fault handler. With
 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
 */
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())

223 224 225 226 227 228 229 230 231 232
#ifndef ARCH_HAS_NOCACHE_UACCESS

static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
				const void __user *from, unsigned long n)
{
	return __copy_from_user_inatomic(to, from, n);
}

#endif		/* ARCH_HAS_NOCACHE_UACCESS */

I
Ingo Molnar 已提交
233 234 235 236 237 238 239 240 241
/*
 * probe_kernel_read(): safely attempt to read from a location
 * @dst: pointer to the buffer that shall take the data
 * @src: address to read from
 * @size: size of the data chunk
 *
 * Safely read from address @src to the buffer at @dst.  If a kernel fault
 * happens, handle that and return -EFAULT.
 */
242 243
extern long probe_kernel_read(void *dst, const void *src, size_t size);
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
I
Ingo Molnar 已提交
244 245 246 247 248 249 250 251 252 253

/*
 * probe_kernel_write(): safely attempt to write to a location
 * @dst: address to write to
 * @src: pointer to the data that shall be written
 * @size: size of the data chunk
 *
 * Safely write to address @dst from the buffer at @src.  If a kernel fault
 * happens, handle that and return -EFAULT.
 */
254 255
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
I
Ingo Molnar 已提交
256

257 258
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);

259 260 261 262 263 264 265 266 267 268
/**
 * probe_kernel_address(): safely attempt to read from a location
 * @addr: address to read from
 * @retval: read into this variable
 *
 * Returns 0 on success, or -EFAULT.
 */
#define probe_kernel_address(addr, retval)		\
	probe_kernel_read(&retval, addr, sizeof(retval))

269
#ifndef user_access_begin
270 271 272 273 274 275 276 277
/*
 * The type(VERIFY_READ vs VERIFY_WRITE) argument of access_ok() was not be
 * used at all. Commit 96d4f267e40f9509e8a66e2b39e8b95655617693
 * 'Remove 'type' argument from access_ok() function' in upstream has been
 * refactoring it yet.
 * Just pass 'VERIFY_WRITE' to keep the style here.
 */
#define user_access_begin(ptr,len) access_ok(VERIFY_WRITE, ptr, len)
278
#define user_access_end() do { } while (0)
279 280
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
281 282
#endif

283
#ifdef CONFIG_HARDENED_USERCOPY
284 285
void usercopy_warn(const char *name, const char *detail, bool to_user,
		   unsigned long offset, unsigned long len);
286 287 288 289 290
void __noreturn usercopy_abort(const char *name, const char *detail,
			       bool to_user, unsigned long offset,
			       unsigned long len);
#endif

291
#endif		/* __LINUX_UACCESS_H__ */