uaccess.h 16.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  arch/arm/include/asm/uaccess.h
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef _ASMARM_UACCESS_H
#define _ASMARM_UACCESS_H

/*
 * User space memory access functions
 */
14 15
#include <linux/string.h>
#include <linux/thread_info.h>
L
Linus Torvalds 已提交
16 17 18
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/domain.h>
19
#include <asm/unified.h>
20
#include <asm/compiler.h>
L
Linus Torvalds 已提交
21

22
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
23 24 25 26 27 28
#include <asm-generic/uaccess-unaligned.h>
#else
#define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user
#endif

L
Linus Torvalds 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
#define VERIFY_READ 0
#define VERIFY_WRITE 1

/*
 * The exception table consists of pairs of addresses: the first is the
 * address of an instruction that is allowed to fault, and the second is
 * the address at which the program should continue.  No registers are
 * modified, so it is entirely up to the continuation code to figure out
 * what to do.
 *
 * All the routines below use bits of fixup code that are out of line
 * with the main instruction path.  This means when everything is well,
 * we don't even have to jump over them.  Further, they do not intrude
 * on our cache or tlb entries.
 */

struct exception_table_entry
{
	unsigned long insn, fixup;
};

extern int fixup_exception(struct pt_regs *regs);

52 53 54 55 56 57 58 59
/*
 * These two functions allow hooking accesses to userspace to increase
 * system integrity by ensuring that the kernel can not inadvertantly
 * perform such accesses (eg, via list poison values) which could then
 * be exploited for priviledge escalation.
 */
static inline unsigned int uaccess_save_and_enable(void)
{
60 61 62 63 64 65 66 67 68
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
	unsigned int old_domain = get_domain();

	/* Set the current domain access to permit user accesses */
	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));

	return old_domain;
#else
69
	return 0;
70
#endif
71 72 73 74
}

static inline void uaccess_restore(unsigned int flags)
{
75 76 77 78
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
	/* Restore the user access mask */
	set_domain(flags);
#endif
79 80
}

R
Russell King 已提交
81 82 83 84 85 86 87
/*
 * These two are intentionally not defined anywhere - if the kernel
 * code generates any references to them, that's a bug.
 */
extern int __get_user_bad(void);
extern int __put_user_bad(void);

L
Linus Torvalds 已提交
88 89 90 91 92
/*
 * Note that this is actually 0x1,0000,0000
 */
#define KERNEL_DS	0x00000000
#define get_ds()	(KERNEL_DS)
R
Russell King 已提交
93 94 95 96

#ifdef CONFIG_MMU

#define USER_DS		TASK_SIZE
L
Linus Torvalds 已提交
97 98
#define get_fs()	(current_thread_info()->addr_limit)

R
Russell King 已提交
99
static inline void set_fs(mm_segment_t fs)
L
Linus Torvalds 已提交
100 101 102 103 104
{
	current_thread_info()->addr_limit = fs;
	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
}

M
Michael S. Tsirkin 已提交
105
#define segment_eq(a, b)	((a) == (b))
L
Linus Torvalds 已提交
106 107 108 109 110 111 112 113 114 115

#define __addr_ok(addr) ({ \
	unsigned long flag; \
	__asm__("cmp %2, %0; movlo %0, #0" \
		: "=&r" (flag) \
		: "0" (current_thread_info()->addr_limit), "r" (addr) \
		: "cc"); \
	(flag == 0); })

/* We use 33-bit arithmetic here... */
M
Michael S. Tsirkin 已提交
116
#define __range_ok(addr, size) ({ \
117
	unsigned long flag, roksum; \
L
Linus Torvalds 已提交
118 119
	__chk_user_ptr(addr);	\
	__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
120
		: "=&r" (flag), "=&r" (roksum) \
L
Linus Torvalds 已提交
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
		: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
		: "cc"); \
	flag; })

/*
 * Single-value transfer routines.  They automatically use the right
 * size if we just have the right pointer type.  Note that the functions
 * which read from user space (*get_*) need to take care not to leak
 * kernel data even if the calling code is buggy and fails to check
 * the return value.  This means zeroing out the destination variable
 * or buffer on error.  Normally this is done out of line by the
 * fixup code, but there are a few places where it intrudes on the
 * main code path.  When we only write to user space, there is no
 * problem.
 */
extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
139
extern int __get_user_32t_8(void *);
140
extern int __get_user_8(void *);
141 142 143
extern int __get_user_64t_1(void *);
extern int __get_user_64t_2(void *);
extern int __get_user_64t_4(void *);
L
Linus Torvalds 已提交
144

145 146 147 148 149 150 151
#define __GUP_CLOBBER_1	"lr", "cc"
#ifdef CONFIG_CPU_USE_DOMAINS
#define __GUP_CLOBBER_2	"ip", "lr", "cc"
#else
#define __GUP_CLOBBER_2 "lr", "cc"
#endif
#define __GUP_CLOBBER_4	"lr", "cc"
152
#define __GUP_CLOBBER_32t_8 "lr", "cc"
153
#define __GUP_CLOBBER_8	"lr", "cc"
154

M
Michael S. Tsirkin 已提交
155
#define __get_user_x(__r2, __p, __e, __l, __s)				\
L
Linus Torvalds 已提交
156 157
	   __asm__ __volatile__ (					\
		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
158
		__asmeq("%3", "r1")					\
L
Linus Torvalds 已提交
159 160
		"bl	__get_user_" #__s				\
		: "=&r" (__e), "=r" (__r2)				\
161 162
		: "0" (__p), "r" (__l)					\
		: __GUP_CLOBBER_##__s)
L
Linus Torvalds 已提交
163

164 165
/* narrowing a double-word get into a single 32bit word register: */
#ifdef __ARMEB__
M
Michael S. Tsirkin 已提交
166
#define __get_user_x_32t(__r2, __p, __e, __l, __s)			\
167
	__get_user_x(__r2, __p, __e, __l, 32t_8)
168
#else
169
#define __get_user_x_32t __get_user_x
170 171
#endif

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
/*
 * storing result into proper least significant word of 64bit target var,
 * different only for big endian case where 64 bit __r2 lsw is r3:
 */
#ifdef __ARMEB__
#define __get_user_x_64t(__r2, __p, __e, __l, __s)		        \
	   __asm__ __volatile__ (					\
		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
		__asmeq("%3", "r1")					\
		"bl	__get_user_64t_" #__s				\
		: "=&r" (__e), "=r" (__r2)				\
		: "0" (__p), "r" (__l)					\
		: __GUP_CLOBBER_##__s)
#else
#define __get_user_x_64t __get_user_x
#endif


M
Michael S. Tsirkin 已提交
190
#define __get_user_check(x, p)						\
L
Linus Torvalds 已提交
191
	({								\
192
		unsigned long __limit = current_thread_info()->addr_limit - 1; \
193
		register const typeof(*(p)) __user *__p asm("r0") = (p);\
194
		register typeof(x) __r2 asm("r2");			\
195
		register unsigned long __l asm("r1") = __limit;		\
L
Linus Torvalds 已提交
196
		register int __e asm("r0");				\
197
		unsigned int __ua_flags = uaccess_save_and_enable();	\
L
Linus Torvalds 已提交
198 199
		switch (sizeof(*(__p))) {				\
		case 1:							\
200 201 202 203
			if (sizeof((x)) >= 8)				\
				__get_user_x_64t(__r2, __p, __e, __l, 1); \
			else						\
				__get_user_x(__r2, __p, __e, __l, 1);	\
204
			break;						\
L
Linus Torvalds 已提交
205
		case 2:							\
206 207 208 209
			if (sizeof((x)) >= 8)				\
				__get_user_x_64t(__r2, __p, __e, __l, 2); \
			else						\
				__get_user_x(__r2, __p, __e, __l, 2);	\
L
Linus Torvalds 已提交
210 211
			break;						\
		case 4:							\
212 213 214 215
			if (sizeof((x)) >= 8)				\
				__get_user_x_64t(__r2, __p, __e, __l, 4); \
			else						\
				__get_user_x(__r2, __p, __e, __l, 4);	\
L
Linus Torvalds 已提交
216
			break;						\
217 218
		case 8:							\
			if (sizeof((x)) < 8)				\
219
				__get_user_x_32t(__r2, __p, __e, __l, 4); \
220 221 222
			else						\
				__get_user_x(__r2, __p, __e, __l, 8);	\
			break;						\
L
Linus Torvalds 已提交
223 224
		default: __e = __get_user_bad(); break;			\
		}							\
225
		uaccess_restore(__ua_flags);				\
226
		x = (typeof(*(p))) __r2;				\
L
Linus Torvalds 已提交
227 228 229
		__e;							\
	})

M
Michael S. Tsirkin 已提交
230
#define get_user(x, p)							\
231 232
	({								\
		might_fault();						\
M
Michael S. Tsirkin 已提交
233
		__get_user_check(x, p);					\
234 235
	 })

R
Russell King 已提交
236 237 238 239 240
extern int __put_user_1(void *, unsigned int);
extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);

M
Michael S. Tsirkin 已提交
241
#define __put_user_x(__r2, __p, __e, __l, __s)				\
R
Russell King 已提交
242 243
	   __asm__ __volatile__ (					\
		__asmeq("%0", "r0") __asmeq("%2", "r2")			\
244
		__asmeq("%3", "r1")					\
R
Russell King 已提交
245 246
		"bl	__put_user_" #__s				\
		: "=&r" (__e)						\
247
		: "0" (__p), "r" (__r2), "r" (__l)			\
R
Russell King 已提交
248 249
		: "ip", "lr", "cc")

M
Michael S. Tsirkin 已提交
250
#define __put_user_check(x, p)						\
R
Russell King 已提交
251
	({								\
252
		unsigned long __limit = current_thread_info()->addr_limit - 1; \
253
		const typeof(*(p)) __user *__tmp_p = (p);		\
254
		register const typeof(*(p)) __r2 asm("r2") = (x);	\
255
		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
256
		register unsigned long __l asm("r1") = __limit;		\
R
Russell King 已提交
257
		register int __e asm("r0");				\
258
		unsigned int __ua_flags = uaccess_save_and_enable();	\
R
Russell King 已提交
259 260
		switch (sizeof(*(__p))) {				\
		case 1:							\
261
			__put_user_x(__r2, __p, __e, __l, 1);		\
R
Russell King 已提交
262 263
			break;						\
		case 2:							\
264
			__put_user_x(__r2, __p, __e, __l, 2);		\
R
Russell King 已提交
265 266
			break;						\
		case 4:							\
267
			__put_user_x(__r2, __p, __e, __l, 4);		\
R
Russell King 已提交
268 269
			break;						\
		case 8:							\
270
			__put_user_x(__r2, __p, __e, __l, 8);		\
R
Russell King 已提交
271 272 273
			break;						\
		default: __e = __put_user_bad(); break;			\
		}							\
274
		uaccess_restore(__ua_flags);				\
R
Russell King 已提交
275 276 277
		__e;							\
	})

M
Michael S. Tsirkin 已提交
278
#define put_user(x, p)							\
279 280
	({								\
		might_fault();						\
M
Michael S. Tsirkin 已提交
281
		__put_user_check(x, p);					\
282 283
	 })

R
Russell King 已提交
284 285 286 287 288 289 290
#else /* CONFIG_MMU */

/*
 * uClinux has only one addr space, so has simplified address limits.
 */
#define USER_DS			KERNEL_DS

M
Michael S. Tsirkin 已提交
291 292 293
#define segment_eq(a, b)		(1)
#define __addr_ok(addr)		((void)(addr), 1)
#define __range_ok(addr, size)	((void)(addr), 0)
R
Russell King 已提交
294 295 296 297 298 299
#define get_fs()		(KERNEL_DS)

static inline void set_fs(mm_segment_t fs)
{
}

M
Michael S. Tsirkin 已提交
300 301
#define get_user(x, p)	__get_user(x, p)
#define put_user(x, p)	__put_user(x, p)
R
Russell King 已提交
302 303 304

#endif /* CONFIG_MMU */

M
Michael S. Tsirkin 已提交
305
#define access_ok(type, addr, size)	(__range_ok(addr, size) == 0)
R
Russell King 已提交
306

307
#define user_addr_max() \
308
	(segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
309

R
Russell King 已提交
310 311 312 313 314 315 316 317 318
/*
 * The "__xxx" versions of the user access functions do not verify the
 * address space - it must have been done previously with a separate
 * "access_ok()" call.
 *
 * The "xxx_error" versions set the third argument to EFAULT if an
 * error occurs, and leave it unchanged on success.  Note that these
 * versions are void (ie, don't return a value as such).
 */
M
Michael S. Tsirkin 已提交
319
#define __get_user(x, ptr)						\
L
Linus Torvalds 已提交
320 321
({									\
	long __gu_err = 0;						\
M
Michael S. Tsirkin 已提交
322
	__get_user_err((x), (ptr), __gu_err);				\
L
Linus Torvalds 已提交
323 324 325
	__gu_err;							\
})

M
Michael S. Tsirkin 已提交
326
#define __get_user_error(x, ptr, err)					\
L
Linus Torvalds 已提交
327
({									\
M
Michael S. Tsirkin 已提交
328
	__get_user_err((x), (ptr), err);				\
L
Linus Torvalds 已提交
329 330 331
	(void) 0;							\
})

M
Michael S. Tsirkin 已提交
332
#define __get_user_err(x, ptr, err)					\
L
Linus Torvalds 已提交
333 334 335
do {									\
	unsigned long __gu_addr = (unsigned long)(ptr);			\
	unsigned long __gu_val;						\
336
	unsigned int __ua_flags;					\
L
Linus Torvalds 已提交
337
	__chk_user_ptr(ptr);						\
338
	might_fault();							\
339
	__ua_flags = uaccess_save_and_enable();				\
L
Linus Torvalds 已提交
340
	switch (sizeof(*(ptr))) {					\
M
Michael S. Tsirkin 已提交
341 342 343
	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
L
Linus Torvalds 已提交
344 345
	default: (__gu_val) = __get_user_bad();				\
	}								\
346
	uaccess_restore(__ua_flags);					\
L
Linus Torvalds 已提交
347 348 349
	(x) = (__typeof__(*(ptr)))__gu_val;				\
} while (0)

350
#define __get_user_asm(x, addr, err, instr)			\
L
Linus Torvalds 已提交
351
	__asm__ __volatile__(					\
352
	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
L
Linus Torvalds 已提交
353
	"2:\n"							\
354
	"	.pushsection .text.fixup,\"ax\"\n"		\
L
Linus Torvalds 已提交
355 356 357 358
	"	.align	2\n"					\
	"3:	mov	%0, %3\n"				\
	"	mov	%1, #0\n"				\
	"	b	2b\n"					\
359 360
	"	.popsection\n"					\
	"	.pushsection __ex_table,\"a\"\n"		\
L
Linus Torvalds 已提交
361 362
	"	.align	3\n"					\
	"	.long	1b, 3b\n"				\
363
	"	.popsection"					\
L
Linus Torvalds 已提交
364 365 366 367
	: "+r" (err), "=&r" (x)					\
	: "r" (addr), "i" (-EFAULT)				\
	: "cc")

368 369 370
#define __get_user_asm_byte(x, addr, err)			\
	__get_user_asm(x, addr, err, ldrb)

L
Linus Torvalds 已提交
371
#ifndef __ARMEB__
M
Michael S. Tsirkin 已提交
372
#define __get_user_asm_half(x, __gu_addr, err)			\
L
Linus Torvalds 已提交
373 374 375 376 377 378 379
({								\
	unsigned long __b1, __b2;				\
	__get_user_asm_byte(__b1, __gu_addr, err);		\
	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
	(x) = __b1 | (__b2 << 8);				\
})
#else
M
Michael S. Tsirkin 已提交
380
#define __get_user_asm_half(x, __gu_addr, err)			\
L
Linus Torvalds 已提交
381 382 383 384 385 386 387 388
({								\
	unsigned long __b1, __b2;				\
	__get_user_asm_byte(__b1, __gu_addr, err);		\
	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
	(x) = (__b1 << 8) | __b2;				\
})
#endif

M
Michael S. Tsirkin 已提交
389
#define __get_user_asm_word(x, addr, err)			\
390
	__get_user_asm(x, addr, err, ldr)
L
Linus Torvalds 已提交
391

M
Michael S. Tsirkin 已提交
392
#define __put_user(x, ptr)						\
L
Linus Torvalds 已提交
393 394
({									\
	long __pu_err = 0;						\
M
Michael S. Tsirkin 已提交
395
	__put_user_err((x), (ptr), __pu_err);				\
L
Linus Torvalds 已提交
396 397 398
	__pu_err;							\
})

M
Michael S. Tsirkin 已提交
399
#define __put_user_error(x, ptr, err)					\
L
Linus Torvalds 已提交
400
({									\
M
Michael S. Tsirkin 已提交
401
	__put_user_err((x), (ptr), err);				\
L
Linus Torvalds 已提交
402 403 404
	(void) 0;							\
})

M
Michael S. Tsirkin 已提交
405
#define __put_user_err(x, ptr, err)					\
L
Linus Torvalds 已提交
406 407
do {									\
	unsigned long __pu_addr = (unsigned long)(ptr);			\
408
	unsigned int __ua_flags;					\
L
Linus Torvalds 已提交
409 410
	__typeof__(*(ptr)) __pu_val = (x);				\
	__chk_user_ptr(ptr);						\
411
	might_fault();							\
412
	__ua_flags = uaccess_save_and_enable();				\
L
Linus Torvalds 已提交
413
	switch (sizeof(*(ptr))) {					\
M
Michael S. Tsirkin 已提交
414 415 416 417
	case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);	break;	\
	case 2: __put_user_asm_half(__pu_val, __pu_addr, err);	break;	\
	case 4: __put_user_asm_word(__pu_val, __pu_addr, err);	break;	\
	case 8:	__put_user_asm_dword(__pu_val, __pu_addr, err);	break;	\
L
Linus Torvalds 已提交
418 419
	default: __put_user_bad();					\
	}								\
420
	uaccess_restore(__ua_flags);					\
L
Linus Torvalds 已提交
421 422
} while (0)

423
#define __put_user_asm(x, __pu_addr, err, instr)		\
L
Linus Torvalds 已提交
424
	__asm__ __volatile__(					\
425
	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
L
Linus Torvalds 已提交
426
	"2:\n"							\
427
	"	.pushsection .text.fixup,\"ax\"\n"		\
L
Linus Torvalds 已提交
428 429 430
	"	.align	2\n"					\
	"3:	mov	%0, %3\n"				\
	"	b	2b\n"					\
431 432
	"	.popsection\n"					\
	"	.pushsection __ex_table,\"a\"\n"		\
L
Linus Torvalds 已提交
433 434
	"	.align	3\n"					\
	"	.long	1b, 3b\n"				\
435
	"	.popsection"					\
L
Linus Torvalds 已提交
436 437 438 439
	: "+r" (err)						\
	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
	: "cc")

440 441 442
#define __put_user_asm_byte(x, __pu_addr, err)			\
	__put_user_asm(x, __pu_addr, err, strb)

L
Linus Torvalds 已提交
443
#ifndef __ARMEB__
M
Michael S. Tsirkin 已提交
444
#define __put_user_asm_half(x, __pu_addr, err)			\
L
Linus Torvalds 已提交
445
({								\
446
	unsigned long __temp = (__force unsigned long)(x);	\
L
Linus Torvalds 已提交
447 448 449 450
	__put_user_asm_byte(__temp, __pu_addr, err);		\
	__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);	\
})
#else
M
Michael S. Tsirkin 已提交
451
#define __put_user_asm_half(x, __pu_addr, err)			\
L
Linus Torvalds 已提交
452
({								\
453
	unsigned long __temp = (__force unsigned long)(x);	\
L
Linus Torvalds 已提交
454 455 456 457 458
	__put_user_asm_byte(__temp >> 8, __pu_addr, err);	\
	__put_user_asm_byte(__temp, __pu_addr + 1, err);	\
})
#endif

M
Michael S. Tsirkin 已提交
459
#define __put_user_asm_word(x, __pu_addr, err)			\
460
	__put_user_asm(x, __pu_addr, err, str)
L
Linus Torvalds 已提交
461 462 463 464 465 466 467 468 469

#ifndef __ARMEB__
#define	__reg_oper0	"%R2"
#define	__reg_oper1	"%Q2"
#else
#define	__reg_oper0	"%Q2"
#define	__reg_oper1	"%R2"
#endif

M
Michael S. Tsirkin 已提交
470
#define __put_user_asm_dword(x, __pu_addr, err)			\
L
Linus Torvalds 已提交
471
	__asm__ __volatile__(					\
472 473 474 475
 ARM(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1], #4\n"	) \
 ARM(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1]\n"	) \
 THUMB(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1]\n"	) \
 THUMB(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1, #4]\n"	) \
L
Linus Torvalds 已提交
476
	"3:\n"							\
477
	"	.pushsection .text.fixup,\"ax\"\n"		\
L
Linus Torvalds 已提交
478 479 480
	"	.align	2\n"					\
	"4:	mov	%0, %3\n"				\
	"	b	3b\n"					\
481 482
	"	.popsection\n"					\
	"	.pushsection __ex_table,\"a\"\n"		\
L
Linus Torvalds 已提交
483 484 485
	"	.align	3\n"					\
	"	.long	1b, 4b\n"				\
	"	.long	2b, 4b\n"				\
486
	"	.popsection"					\
L
Linus Torvalds 已提交
487 488 489 490
	: "+r" (err), "+r" (__pu_addr)				\
	: "r" (x), "i" (-EFAULT)				\
	: "cc")

491

R
Russell King 已提交
492
#ifdef CONFIG_MMU
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
extern unsigned long __must_check
arm_copy_from_user(void *to, const void __user *from, unsigned long n);

static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
	unsigned int __ua_flags = uaccess_save_and_enable();
	n = arm_copy_from_user(to, from, n);
	uaccess_restore(__ua_flags);
	return n;
}

extern unsigned long __must_check
arm_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check
__copy_to_user_std(void __user *to, const void *from, unsigned long n);

static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
	unsigned int __ua_flags = uaccess_save_and_enable();
	n = arm_copy_to_user(to, from, n);
	uaccess_restore(__ua_flags);
	return n;
}

extern unsigned long __must_check
arm_clear_user(void __user *addr, unsigned long n);
extern unsigned long __must_check
__clear_user_std(void __user *addr, unsigned long n);

static inline unsigned long __must_check
__clear_user(void __user *addr, unsigned long n)
{
	unsigned int __ua_flags = uaccess_save_and_enable();
	n = arm_clear_user(addr, n);
	uaccess_restore(__ua_flags);
	return n;
}

R
Russell King 已提交
533
#else
M
Michael S. Tsirkin 已提交
534 535 536
#define __copy_from_user(to, from, n)	(memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to, from, n)	(memcpy((void __force *)to, from, n), 0)
#define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
R
Russell King 已提交
537 538
#endif

539
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
L
Linus Torvalds 已提交
540 541
{
	if (access_ok(VERIFY_READ, from, n))
542
		n = __copy_from_user(to, from, n);
L
Linus Torvalds 已提交
543
	else /* security hole - plug it */
R
Russell King 已提交
544
		memset(to, 0, n);
L
Linus Torvalds 已提交
545 546 547
	return n;
}

548
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
L
Linus Torvalds 已提交
549 550
{
	if (access_ok(VERIFY_WRITE, to, n))
551
		n = __copy_to_user(to, from, n);
L
Linus Torvalds 已提交
552 553 554 555 556 557
	return n;
}

#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user

558
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
L
Linus Torvalds 已提交
559 560
{
	if (access_ok(VERIFY_WRITE, to, n))
561
		n = __clear_user(to, n);
L
Linus Torvalds 已提交
562 563 564
	return n;
}

565
/* These are from lib/ code, and use __get_user() and friends */
566
extern long strncpy_from_user(char *dest, const char __user *src, long count);
L
Linus Torvalds 已提交
567

568 569
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
L
Linus Torvalds 已提交
570 571

#endif /* _ASMARM_UACCESS_H */