uaccess.h 22.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
 */
#ifndef _ASM_UACCESS_H
#define _ASM_UACCESS_H

#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <asm-generic/uaccess.h>

/*
 * The fs value determines whether argument validity checking should be
 * performed or not.  If get_fs() == USER_DS, checking is performed, with
 * get_fs() == KERNEL_DS, checking is bypassed.
 *
 * For historical reasons, these macros are grossly misnamed.
 */
25
#ifdef CONFIG_32BIT
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34

#define __UA_LIMIT	0x80000000UL

#define __UA_ADDR	".word"
#define __UA_LA		"la"
#define __UA_ADDU	"addu"
#define __UA_t0		"$8"
#define __UA_t1		"$9"

35
#endif /* CONFIG_32BIT */
L
Linus Torvalds 已提交
36

37
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45 46

#define __UA_LIMIT	(- TASK_SIZE)

#define __UA_ADDR	".dword"
#define __UA_LA		"dla"
#define __UA_ADDU	"daddu"
#define __UA_t0		"$12"
#define __UA_t1		"$13"

47
#endif /* CONFIG_64BIT */
L
Linus Torvalds 已提交
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198

/*
 * USER_DS is a bitmask that has the bits set that may not be set in a valid
 * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
 * the arithmetic we're doing only works if the limit is a power of two, so
 * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
 * address in this range it's the process's problem, not ours :-)
 */

#define KERNEL_DS	((mm_segment_t) { 0UL })
#define USER_DS		((mm_segment_t) { __UA_LIMIT })

#define VERIFY_READ    0
#define VERIFY_WRITE   1

#define get_ds()	(KERNEL_DS)
#define get_fs()	(current_thread_info()->addr_limit)
#define set_fs(x)	(current_thread_info()->addr_limit = (x))

#define segment_eq(a,b)	((a).seg == (b).seg)


/*
 * Is a address valid? This does a straighforward calculation rather
 * than tests.
 *
 * Address valid if:
 *  - "addr" doesn't have any high-bits set
 *  - AND "size" doesn't have any high-bits set
 *  - AND "addr+size" doesn't have any high-bits set
 *  - OR we are in kernel mode.
 *
 * __ua_size() is a trick to avoid runtime checking of positive constant
 * sizes; for those we already know at compile time that the size is ok.
 */
#define __ua_size(size)							\
	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))

/*
 * access_ok: - Checks if a user space pointer is valid
 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
 *        to write to a block, it is always safe to read from it.
 * @addr: User space pointer to start of block to check
 * @size: Size of block to check
 *
 * Context: User context only.  This function may sleep.
 *
 * Checks if a pointer to a block of memory in user space is valid.
 *
 * Returns true (nonzero) if the memory block may be valid, false (zero)
 * if it is definitely invalid.
 *
 * Note that, depending on architecture, this function probably just
 * checks that the pointer is in the user space range - after calling
 * this function, memory access functions may still return -EFAULT.
 */

#define __access_mask get_fs().seg

#define __access_ok(addr, size, mask)					\
	(((signed long)((mask) & ((addr) | ((addr) + (size)) | __ua_size(size)))) == 0)

#define access_ok(type, addr, size)					\
	likely(__access_ok((unsigned long)(addr), (size),__access_mask))

/*
 * put_user: - Write a simple value into user space.
 * @x:   Value to copy to user space.
 * @ptr: Destination address, in user space.
 *
 * Context: User context only.  This function may sleep.
 *
 * This macro copies a single simple value from kernel space to user
 * space.  It supports simple types like char and int, but not larger
 * data types like structures or arrays.
 *
 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 * to the result of dereferencing @ptr.
 *
 * Returns zero on success, or -EFAULT on error.
 */
#define put_user(x,ptr)	\
	__put_user_check((x),(ptr),sizeof(*(ptr)))

/*
 * get_user: - Get a simple variable from user space.
 * @x:   Variable to store result.
 * @ptr: Source address, in user space.
 *
 * Context: User context only.  This function may sleep.
 *
 * This macro copies a single simple variable from user space to kernel
 * space.  It supports simple types like char and int, but not larger
 * data types like structures or arrays.
 *
 * @ptr must have pointer-to-simple-variable type, and the result of
 * dereferencing @ptr must be assignable to @x without a cast.
 *
 * Returns zero on success, or -EFAULT on error.
 * On error, the variable @x is set to zero.
 */
#define get_user(x,ptr) \
	__get_user_check((x),(ptr),sizeof(*(ptr)))

/*
 * __put_user: - Write a simple value into user space, with less checking.
 * @x:   Value to copy to user space.
 * @ptr: Destination address, in user space.
 *
 * Context: User context only.  This function may sleep.
 *
 * This macro copies a single simple value from kernel space to user
 * space.  It supports simple types like char and int, but not larger
 * data types like structures or arrays.
 *
 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 * to the result of dereferencing @ptr.
 *
 * Caller must check the pointer with access_ok() before calling this
 * function.
 *
 * Returns zero on success, or -EFAULT on error.
 */
#define __put_user(x,ptr) \
	__put_user_nocheck((x),(ptr),sizeof(*(ptr)))

/*
 * __get_user: - Get a simple variable from user space, with less checking.
 * @x:   Variable to store result.
 * @ptr: Source address, in user space.
 *
 * Context: User context only.  This function may sleep.
 *
 * This macro copies a single simple variable from user space to kernel
 * space.  It supports simple types like char and int, but not larger
 * data types like structures or arrays.
 *
 * @ptr must have pointer-to-simple-variable type, and the result of
 * dereferencing @ptr must be assignable to @x without a cast.
 *
 * Caller must check the pointer with access_ok() before calling this
 * function.
 *
 * Returns zero on success, or -EFAULT on error.
 * On error, the variable @x is set to zero.
 */
#define __get_user(x,ptr) \
	__get_user_nocheck((x),(ptr),sizeof(*(ptr)))

struct __large_struct { unsigned long buf[100]; };
R
Ralf Baechle 已提交
199
#define __m(x) (*(struct __large_struct __user *)(x))
L
Linus Torvalds 已提交
200 201 202 203 204

/*
 * Yuck.  We need two variants, one for 64bit operation and one
 * for 32 bit mode and old iron.
 */
205 206 207 208 209
#ifdef CONFIG_32BIT
#define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
#endif
#ifdef CONFIG_64BIT
#define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
L
Linus Torvalds 已提交
210 211
#endif

212 213 214 215
extern void __get_user_unknown(void);

#define __get_user_common(val, size, ptr)				\
do {									\
L
Linus Torvalds 已提交
216
	switch (size) {							\
217 218 219 220
	case 1: __get_user_asm(val, "lb", ptr); break;			\
	case 2: __get_user_asm(val, "lh", ptr); break;			\
	case 4: __get_user_asm(val, "lw", ptr); break;			\
	case 8: __GET_USER_DW(val, ptr); break;				\
L
Linus Torvalds 已提交
221 222
	default: __get_user_unknown(); break;				\
	}								\
223 224 225 226 227 228 229
} while (0)

#define __get_user_nocheck(x,ptr,size)					\
({									\
	long __gu_err;							\
									\
	__get_user_common((x), size, ptr);				\
L
Linus Torvalds 已提交
230 231 232 233 234
	__gu_err;							\
})

#define __get_user_check(x,ptr,size)					\
({									\
R
Ralf Baechle 已提交
235
	long __gu_err = -EFAULT;					\
236 237 238 239
	const void __user * __gu_ptr = (ptr);				\
									\
	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\
		__get_user_common((x), size, __gu_ptr);			\
L
Linus Torvalds 已提交
240 241 242 243
									\
	__gu_err;							\
})

244
#define __get_user_asm(val, insn, addr)					\
R
Ralf Baechle 已提交
245
{									\
246 247
	long __gu_tmp;							\
									\
L
Linus Torvalds 已提交
248 249 250 251 252 253 254 255 256 257
	__asm__ __volatile__(						\
	"1:	" insn "	%1, %3				\n"	\
	"2:							\n"	\
	"	.section .fixup,\"ax\"				\n"	\
	"3:	li	%0, %4					\n"	\
	"	j	2b					\n"	\
	"	.previous					\n"	\
	"	.section __ex_table,\"a\"			\n"	\
	"	"__UA_ADDR "\t1b, 3b				\n"	\
	"	.previous					\n"	\
258
	: "=r" (__gu_err), "=r" (__gu_tmp)				\
R
Ralf Baechle 已提交
259
	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
260 261
									\
	(val) = (__typeof__(val)) __gu_tmp;				\
R
Ralf Baechle 已提交
262
}
L
Linus Torvalds 已提交
263 264 265 266

/*
 * Get a long long 64 using 32 bit registers.
 */
267
#define __get_user_asm_ll32(val, addr)					\
R
Ralf Baechle 已提交
268
{									\
269 270
        unsigned long long __gu_tmp;					\
									\
L
Linus Torvalds 已提交
271
	__asm__ __volatile__(						\
R
Ralf Baechle 已提交
272 273
	"1:	lw	%1, (%3)				\n"	\
	"2:	lw	%D1, 4(%3)				\n"	\
L
Linus Torvalds 已提交
274 275
	"	move	%0, $0					\n"	\
	"3:	.section	.fixup,\"ax\"			\n"	\
R
Ralf Baechle 已提交
276
	"4:	li	%0, %4					\n"	\
L
Linus Torvalds 已提交
277 278 279 280 281 282 283 284
	"	move	%1, $0					\n"	\
	"	move	%D1, $0					\n"	\
	"	j	3b					\n"	\
	"	.previous					\n"	\
	"	.section	__ex_table,\"a\"		\n"	\
	"	" __UA_ADDR "	1b, 4b				\n"	\
	"	" __UA_ADDR "	2b, 4b				\n"	\
	"	.previous					\n"	\
285
	: "=r" (__gu_err), "=&r" (__gu_tmp)				\
R
Ralf Baechle 已提交
286
	: "0" (0), "r" (addr), "i" (-EFAULT));				\
287
	(val) = __gu_tmp;						\
R
Ralf Baechle 已提交
288
}
L
Linus Torvalds 已提交
289 290 291 292 293

/*
 * Yuck.  We need two variants, one for 64bit operation and one
 * for 32 bit mode and old iron.
 */
294
#ifdef CONFIG_32BIT
R
Ralf Baechle 已提交
295
#define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
L
Linus Torvalds 已提交
296
#endif
297 298 299
#ifdef CONFIG_64BIT
#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
#endif
L
Linus Torvalds 已提交
300 301 302 303 304 305 306 307

#define __put_user_nocheck(x,ptr,size)					\
({									\
	__typeof__(*(ptr)) __pu_val;					\
	long __pu_err = 0;						\
									\
	__pu_val = (x);							\
	switch (size) {							\
R
Ralf Baechle 已提交
308 309 310 311
	case 1: __put_user_asm("sb", ptr); break;			\
	case 2: __put_user_asm("sh", ptr); break;			\
	case 4: __put_user_asm("sw", ptr); break;			\
	case 8: __PUT_USER_DW(ptr); break;				\
L
Linus Torvalds 已提交
312 313 314 315 316 317 318
	default: __put_user_unknown(); break;				\
	}								\
	__pu_err;							\
})

#define __put_user_check(x,ptr,size)					\
({									\
R
Ralf Baechle 已提交
319 320 321
	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
	__typeof__(*(ptr)) __pu_val = (x);				\
	long __pu_err = -EFAULT;					\
L
Linus Torvalds 已提交
322
									\
R
Ralf Baechle 已提交
323
	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
L
Linus Torvalds 已提交
324
		switch (size) {						\
R
Ralf Baechle 已提交
325 326 327 328
		case 1: __put_user_asm("sb", __pu_addr); break;		\
		case 2: __put_user_asm("sh", __pu_addr); break;		\
		case 4: __put_user_asm("sw", __pu_addr); break;		\
		case 8: __PUT_USER_DW(__pu_addr); break;		\
L
Linus Torvalds 已提交
329 330 331 332 333 334
		default: __put_user_unknown(); break;			\
		}							\
	}								\
	__pu_err;							\
})

R
Ralf Baechle 已提交
335 336
#define __put_user_asm(insn, ptr)					\
{									\
L
Linus Torvalds 已提交
337 338 339 340 341 342 343 344 345 346 347
	__asm__ __volatile__(						\
	"1:	" insn "	%z2, %3		# __put_user_asm\n"	\
	"2:							\n"	\
	"	.section	.fixup,\"ax\"			\n"	\
	"3:	li	%0, %4					\n"	\
	"	j	2b					\n"	\
	"	.previous					\n"	\
	"	.section	__ex_table,\"a\"		\n"	\
	"	" __UA_ADDR "	1b, 3b				\n"	\
	"	.previous					\n"	\
	: "=r" (__pu_err)						\
R
Ralf Baechle 已提交
348
	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
L
Linus Torvalds 已提交
349
	  "i" (-EFAULT));						\
R
Ralf Baechle 已提交
350
}
L
Linus Torvalds 已提交
351

R
Ralf Baechle 已提交
352 353
#define __put_user_asm_ll32(ptr)					\
{									\
L
Linus Torvalds 已提交
354
	__asm__ __volatile__(						\
R
Ralf Baechle 已提交
355 356
	"1:	sw	%2, (%3)	# __put_user_asm_ll32	\n"	\
	"2:	sw	%D2, 4(%3)				\n"	\
L
Linus Torvalds 已提交
357 358
	"3:							\n"	\
	"	.section	.fixup,\"ax\"			\n"	\
R
Ralf Baechle 已提交
359
	"4:	li	%0, %4					\n"	\
L
Linus Torvalds 已提交
360 361 362 363 364 365 366
	"	j	3b					\n"	\
	"	.previous					\n"	\
	"	.section	__ex_table,\"a\"		\n"	\
	"	" __UA_ADDR "	1b, 4b				\n"	\
	"	" __UA_ADDR "	2b, 4b				\n"	\
	"	.previous"						\
	: "=r" (__pu_err)						\
R
Ralf Baechle 已提交
367 368 369
	: "0" (0), "r" (__pu_val), "r" (ptr),				\
	  "i" (-EFAULT));						\
}
L
Linus Torvalds 已提交
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391

extern void __put_user_unknown(void);

/*
 * We're generating jump to subroutines which will be outside the range of
 * jump instructions
 */
#ifdef MODULE
#define __MODULE_JAL(destination)					\
	".set\tnoat\n\t"						\
	__UA_LA "\t$1, " #destination "\n\t" 				\
	"jalr\t$1\n\t"							\
	".set\tat\n\t"
#else
#define __MODULE_JAL(destination)					\
	"jal\t" #destination "\n\t"
#endif

extern size_t __copy_user(void *__to, const void *__from, size_t __n);

#define __invoke_copy_to_user(to,from,n)				\
({									\
R
Ralf Baechle 已提交
392
	register void __user *__cu_to_r __asm__ ("$4");			\
L
Linus Torvalds 已提交
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
	register const void *__cu_from_r __asm__ ("$5");		\
	register long __cu_len_r __asm__ ("$6");			\
									\
	__cu_to_r = (to);						\
	__cu_from_r = (from);						\
	__cu_len_r = (n);						\
	__asm__ __volatile__(						\
	__MODULE_JAL(__copy_user)					\
	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
	:								\
	: "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",		\
	  "memory");							\
	__cu_len_r;							\
})

/*
 * __copy_to_user: - Copy a block of data into user space, with less checking.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.  This function may sleep.
 *
 * Copy data from kernel space to user space.  Caller must check
 * the specified block with access_ok() before calling this function.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 */
#define __copy_to_user(to,from,n)					\
({									\
R
Ralf Baechle 已提交
424
	void __user *__cu_to;						\
L
Linus Torvalds 已提交
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
	const void *__cu_from;						\
	long __cu_len;							\
									\
	might_sleep();							\
	__cu_to = (to);							\
	__cu_from = (from);						\
	__cu_len = (n);							\
	__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len);	\
	__cu_len;							\
})

#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user

/*
 * copy_to_user: - Copy a block of data into user space.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.  This function may sleep.
 *
 * Copy data from kernel space to user space.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 */
#define copy_to_user(to,from,n)						\
({									\
R
Ralf Baechle 已提交
454
	void __user *__cu_to;						\
L
Linus Torvalds 已提交
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
	const void *__cu_from;						\
	long __cu_len;							\
									\
	might_sleep();							\
	__cu_to = (to);							\
	__cu_from = (from);						\
	__cu_len = (n);							\
	if (access_ok(VERIFY_WRITE, __cu_to, __cu_len))			\
		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
		                                 __cu_len);		\
	__cu_len;							\
})

#define __invoke_copy_from_user(to,from,n)				\
({									\
	register void *__cu_to_r __asm__ ("$4");			\
R
Ralf Baechle 已提交
471
	register const void __user *__cu_from_r __asm__ ("$5");		\
L
Linus Torvalds 已提交
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
	register long __cu_len_r __asm__ ("$6");			\
									\
	__cu_to_r = (to);						\
	__cu_from_r = (from);						\
	__cu_len_r = (n);						\
	__asm__ __volatile__(						\
	".set\tnoreorder\n\t"						\
	__MODULE_JAL(__copy_user)					\
	".set\tnoat\n\t"						\
	__UA_ADDU "\t$1, %1, %2\n\t"					\
	".set\tat\n\t"							\
	".set\treorder"							\
	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
	:								\
	: "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",		\
	  "memory");							\
	__cu_len_r;							\
})

/*
 * __copy_from_user: - Copy a block of data from user space, with less checking. * @to:   Destination address, in kernel space.
 * @from: Source address, in user space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.  This function may sleep.
 *
 * Copy data from user space to kernel space.  Caller must check
 * the specified block with access_ok() before calling this function.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 *
 * If some data could not be copied, this function will pad the copied
 * data to the requested size using zero bytes.
 */
#define __copy_from_user(to,from,n)					\
({									\
	void *__cu_to;							\
R
Ralf Baechle 已提交
510
	const void __user *__cu_from;					\
L
Linus Torvalds 已提交
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
	long __cu_len;							\
									\
	might_sleep();							\
	__cu_to = (to);							\
	__cu_from = (from);						\
	__cu_len = (n);							\
	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
	                                   __cu_len);			\
	__cu_len;							\
})

/*
 * copy_from_user: - Copy a block of data from user space.
 * @to:   Destination address, in kernel space.
 * @from: Source address, in user space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.  This function may sleep.
 *
 * Copy data from user space to kernel space.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 *
 * If some data could not be copied, this function will pad the copied
 * data to the requested size using zero bytes.
 */
#define copy_from_user(to,from,n)					\
({									\
	void *__cu_to;							\
R
Ralf Baechle 已提交
541
	const void __user *__cu_from;					\
L
Linus Torvalds 已提交
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
	long __cu_len;							\
									\
	might_sleep();							\
	__cu_to = (to);							\
	__cu_from = (from);						\
	__cu_len = (n);							\
	if (access_ok(VERIFY_READ, __cu_from, __cu_len))		\
		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
		                                   __cu_len);		\
	__cu_len;							\
})

#define __copy_in_user(to, from, n)	__copy_from_user(to, from, n)

#define copy_in_user(to,from,n)						\
({									\
R
Ralf Baechle 已提交
558 559
	void __user *__cu_to;						\
	const void __user *__cu_from;					\
L
Linus Torvalds 已提交
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	long __cu_len;							\
									\
	might_sleep();							\
	__cu_to = (to);							\
	__cu_from = (from);						\
	__cu_len = (n);							\
	if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&	\
	           access_ok(VERIFY_WRITE, __cu_to, __cu_len)))		\
		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
		                                   __cu_len);		\
	__cu_len;							\
})

/*
 * __clear_user: - Zero a block of memory in user space, with less checking.
 * @to:   Destination address, in user space.
 * @n:    Number of bytes to zero.
 *
 * Zero a block of memory in user space.  Caller must check
 * the specified block with access_ok() before calling this function.
 *
 * Returns number of bytes that could not be cleared.
 * On success, this will be zero.
 */
static inline __kernel_size_t
R
Ralf Baechle 已提交
585
__clear_user(void __user *addr, __kernel_size_t size)
L
Linus Torvalds 已提交
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
{
	__kernel_size_t res;

	might_sleep();
	__asm__ __volatile__(
		"move\t$4, %1\n\t"
		"move\t$5, $0\n\t"
		"move\t$6, %2\n\t"
		__MODULE_JAL(__bzero)
		"move\t%0, $6"
		: "=r" (res)
		: "r" (addr), "r" (size)
		: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");

	return res;
}

#define clear_user(addr,n)						\
({									\
R
Ralf Baechle 已提交
605
	void __user * __cl_addr = (addr);				\
L
Linus Torvalds 已提交
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	unsigned long __cl_size = (n);					\
	if (__cl_size && access_ok(VERIFY_WRITE,			\
		((unsigned long)(__cl_addr)), __cl_size))		\
		__cl_size = __clear_user(__cl_addr, __cl_size);		\
	__cl_size;							\
})

/*
 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
 * @dst:   Destination address, in kernel space.  This buffer must be at
 *         least @count bytes long.
 * @src:   Source address, in user space.
 * @count: Maximum number of bytes to copy, including the trailing NUL.
 *
 * Copies a NUL-terminated string from userspace to kernel space.
 * Caller must check the specified block with access_ok() before calling
 * this function.
 *
 * On success, returns the length of the string (not including the trailing
 * NUL).
 *
 * If access to userspace fails, returns -EFAULT (some data may have been
 * copied).
 *
 * If @count is smaller than the length of the string, copies @count bytes
 * and returns @count.
 */
static inline long
R
Ralf Baechle 已提交
634
__strncpy_from_user(char *__to, const char __user *__from, long __len)
L
Linus Torvalds 已提交
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
{
	long res;

	might_sleep();
	__asm__ __volatile__(
		"move\t$4, %1\n\t"
		"move\t$5, %2\n\t"
		"move\t$6, %3\n\t"
		__MODULE_JAL(__strncpy_from_user_nocheck_asm)
		"move\t%0, $2"
		: "=r" (res)
		: "r" (__to), "r" (__from), "r" (__len)
		: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");

	return res;
}

/*
 * strncpy_from_user: - Copy a NUL terminated string from userspace.
 * @dst:   Destination address, in kernel space.  This buffer must be at
 *         least @count bytes long.
 * @src:   Source address, in user space.
 * @count: Maximum number of bytes to copy, including the trailing NUL.
 *
 * Copies a NUL-terminated string from userspace to kernel space.
 *
 * On success, returns the length of the string (not including the trailing
 * NUL).
 *
 * If access to userspace fails, returns -EFAULT (some data may have been
 * copied).
 *
 * If @count is smaller than the length of the string, copies @count bytes
 * and returns @count.
 */
static inline long
R
Ralf Baechle 已提交
671
strncpy_from_user(char *__to, const char __user *__from, long __len)
L
Linus Torvalds 已提交
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
{
	long res;

	might_sleep();
	__asm__ __volatile__(
		"move\t$4, %1\n\t"
		"move\t$5, %2\n\t"
		"move\t$6, %3\n\t"
		__MODULE_JAL(__strncpy_from_user_asm)
		"move\t%0, $2"
		: "=r" (res)
		: "r" (__to), "r" (__from), "r" (__len)
		: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");

	return res;
}

/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
R
Ralf Baechle 已提交
690
static inline long __strlen_user(const char __user *s)
L
Linus Torvalds 已提交
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
{
	long res;

	might_sleep();
	__asm__ __volatile__(
		"move\t$4, %1\n\t"
		__MODULE_JAL(__strlen_user_nocheck_asm)
		"move\t%0, $2"
		: "=r" (res)
		: "r" (s)
		: "$2", "$4", __UA_t0, "$31");

	return res;
}

/*
 * strlen_user: - Get the size of a string in user space.
 * @str: The string to measure.
 *
 * Context: User context only.  This function may sleep.
 *
 * Get the size of a NUL-terminated string in user space.
 *
 * Returns the size of the string INCLUDING the terminating NUL.
 * On exception, returns 0.
 *
 * If there is a limit on the length of a valid string, you may wish to
 * consider using strnlen_user() instead.
 */
R
Ralf Baechle 已提交
720
static inline long strlen_user(const char __user *s)
L
Linus Torvalds 已提交
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
{
	long res;

	might_sleep();
	__asm__ __volatile__(
		"move\t$4, %1\n\t"
		__MODULE_JAL(__strlen_user_asm)
		"move\t%0, $2"
		: "=r" (res)
		: "r" (s)
		: "$2", "$4", __UA_t0, "$31");

	return res;
}

/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
R
Ralf Baechle 已提交
737
static inline long __strnlen_user(const char __user *s, long n)
L
Linus Torvalds 已提交
738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
{
	long res;

	might_sleep();
	__asm__ __volatile__(
		"move\t$4, %1\n\t"
		"move\t$5, %2\n\t"
		__MODULE_JAL(__strnlen_user_nocheck_asm)
		"move\t%0, $2"
		: "=r" (res)
		: "r" (s), "r" (n)
		: "$2", "$4", "$5", __UA_t0, "$31");

	return res;
}

/*
 * strlen_user: - Get the size of a string in user space.
 * @str: The string to measure.
 *
 * Context: User context only.  This function may sleep.
 *
 * Get the size of a NUL-terminated string in user space.
 *
 * Returns the size of the string INCLUDING the terminating NUL.
 * On exception, returns 0.
 *
 * If there is a limit on the length of a valid string, you may wish to
 * consider using strnlen_user() instead.
 */
R
Ralf Baechle 已提交
768
static inline long strnlen_user(const char __user *s, long n)
L
Linus Torvalds 已提交
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
{
	long res;

	might_sleep();
	__asm__ __volatile__(
		"move\t$4, %1\n\t"
		"move\t$5, %2\n\t"
		__MODULE_JAL(__strnlen_user_asm)
		"move\t%0, $2"
		: "=r" (res)
		: "r" (s), "r" (n)
		: "$2", "$4", "$5", __UA_t0, "$31");

	return res;
}

struct exception_table_entry
{
	unsigned long insn;
	unsigned long nextinsn;
};

extern int fixup_exception(struct pt_regs *regs);

#endif /* _ASM_UACCESS_H */