uaccess.h 11.8 KB
Newer Older
S
Stephen Rothwell 已提交
1 2 3 4 5 6 7 8
#ifndef _ARCH_POWERPC_UACCESS_H
#define _ARCH_POWERPC_UACCESS_H

#ifdef __KERNEL__
#ifndef __ASSEMBLY__

#include <linux/sched.h>
#include <linux/errno.h>
9
#include <asm/asm-compat.h>
10
#include <asm/ppc_asm.h>
S
Stephen Rothwell 已提交
11
#include <asm/processor.h>
12
#include <asm/page.h>
S
Stephen Rothwell 已提交
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29

#define VERIFY_READ	0
#define VERIFY_WRITE	1

/*
 * The fs value determines whether argument validity checking should be
 * performed or not.  If get_fs() == USER_DS, checking is performed, with
 * get_fs() == KERNEL_DS, checking is bypassed.
 *
 * For historical reasons, these macros are grossly misnamed.
 *
 * The fs/ds values are now the highest legal address in the "segment".
 * This simplifies the checking in the routines below.
 */

#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })

30
#define KERNEL_DS	MAKE_MM_SEG(~0UL)
S
Stephen Rothwell 已提交
31
#ifdef __powerpc64__
32 33
/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
#define USER_DS		MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
S
Stephen Rothwell 已提交
34 35 36 37 38 39 40 41 42 43
#else
#define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
#endif

#define get_ds()	(KERNEL_DS)
#define get_fs()	(current->thread.fs)
#define set_fs(val)	(current->thread.fs = (val))

#define segment_eq(a, b)	((a).seg == (b).seg)

44 45
#define user_addr_max()	(get_fs().seg)

S
Stephen Rothwell 已提交
46 47
#ifdef __powerpc64__
/*
48 49
 * This check is sufficient because there is a large enough
 * gap between user addresses and the kernel addresses
S
Stephen Rothwell 已提交
50 51
 */
#define __access_ok(addr, size, segment)	\
52
	(((addr) <= (segment).seg) && ((size) <= (segment).seg))
S
Stephen Rothwell 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66

#else

#define __access_ok(addr, size, segment)	\
	(((addr) <= (segment).seg) &&		\
	 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))

#endif

#define access_ok(type, addr, size)		\
	(__chk_user_ptr(addr),			\
	 __access_ok((__force unsigned long)(addr), (size), get_fs()))

/*
67 68
 * The exception table consists of pairs of relative addresses: the first is
 * the address of an instruction that is allowed to fault, and the second is
S
Stephen Rothwell 已提交
69
 * the address at which the program should continue.  No registers are
70 71
 * modified, so it is entirely up to the continuation code to figure out what
 * to do.
S
Stephen Rothwell 已提交
72
 *
73 74 75 76
 * All the routines below use bits of fixup code that are out of line with the
 * main instruction path.  This means when everything is well, we don't even
 * have to jump over them.  Further, they do not intrude on our cache or tlb
 * entries.
S
Stephen Rothwell 已提交
77 78
 */

79 80
#define ARCH_HAS_RELATIVE_EXTABLE

S
Stephen Rothwell 已提交
81
struct exception_table_entry {
82 83
	int insn;
	int fixup;
S
Stephen Rothwell 已提交
84 85
};

86 87 88 89 90
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
{
	return (unsigned long)&x->fixup + x->fixup;
}

S
Stephen Rothwell 已提交
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
/*
 * These are the main single-value transfer routines.  They automatically
 * use the right size if we just have the right pointer type.
 *
 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 * and yet we don't want to do any pointers, because that is too much
 * of a performance impact. Thus we have a few rather ugly macros here,
 * and hide all the ugliness from the user.
 *
 * The "__xxx" versions of the user access functions are versions that
 * do not verify the address space, that must have been done previously
 * with a separate "access_ok()" call (this is used when we do multiple
 * accesses to the same area of user memory).
 *
 * As we use the same address space for kernel and user data on the
 * PowerPC, we can just do these as direct assignments.  (Of course, the
 * exception handling means that it's no longer "just"...)
 *
 */
#define get_user(x, ptr) \
	__get_user_check((x), (ptr), sizeof(*(ptr)))
#define put_user(x, ptr) \
	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))

#define __get_user(x, ptr) \
	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
#define __put_user(x, ptr) \
	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
119 120 121 122 123 124

#define __get_user_inatomic(x, ptr) \
	__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
#define __put_user_inatomic(x, ptr) \
	__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))

S
Stephen Rothwell 已提交
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
#define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user

extern long __put_user_bad(void);

/*
 * We don't tell gcc that we are accessing memory, but this is OK
 * because we do not write to any memory gcc knows about, so there
 * are no aliasing issues.
 */
#define __put_user_asm(x, addr, err, op)			\
	__asm__ __volatile__(					\
		"1:	" op " %1,0(%2)	# put_user\n"		\
		"2:\n"						\
		".section .fixup,\"ax\"\n"			\
		"3:	li %0,%3\n"				\
		"	b 2b\n"					\
		".previous\n"					\
143
		EX_TABLE(1b, 3b)				\
S
Stephen Rothwell 已提交
144
		: "=r" (err)					\
145
		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
S
Stephen Rothwell 已提交
146

147 148 149 150
#ifdef __powerpc64__
#define __put_user_asm2(x, ptr, retval)				\
	  __put_user_asm(x, ptr, retval, "std")
#else /* __powerpc64__ */
S
Stephen Rothwell 已提交
151 152 153 154 155 156 157 158 159
#define __put_user_asm2(x, addr, err)				\
	__asm__ __volatile__(					\
		"1:	stw %1,0(%2)\n"				\
		"2:	stw %1+1,4(%2)\n"			\
		"3:\n"						\
		".section .fixup,\"ax\"\n"			\
		"4:	li %0,%3\n"				\
		"	b 3b\n"					\
		".previous\n"					\
160 161
		EX_TABLE(1b, 4b)				\
		EX_TABLE(2b, 4b)				\
S
Stephen Rothwell 已提交
162
		: "=r" (err)					\
163
		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
S
Stephen Rothwell 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
#endif /* __powerpc64__ */

#define __put_user_size(x, ptr, size, retval)			\
do {								\
	retval = 0;						\
	switch (size) {						\
	  case 1: __put_user_asm(x, ptr, retval, "stb"); break;	\
	  case 2: __put_user_asm(x, ptr, retval, "sth"); break;	\
	  case 4: __put_user_asm(x, ptr, retval, "stw"); break;	\
	  case 8: __put_user_asm2(x, ptr, retval); break;	\
	  default: __put_user_bad();				\
	}							\
} while (0)

#define __put_user_nocheck(x, ptr, size)			\
({								\
	long __pu_err;						\
181 182
	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
	if (!is_kernel_addr((unsigned long)__pu_addr))		\
183
		might_fault();					\
S
Stephen Rothwell 已提交
184
	__chk_user_ptr(ptr);					\
185
	__put_user_size((x), __pu_addr, (size), __pu_err);	\
S
Stephen Rothwell 已提交
186 187 188 189 190 191 192
	__pu_err;						\
})

#define __put_user_check(x, ptr, size)					\
({									\
	long __pu_err = -EFAULT;					\
	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
193
	might_fault();							\
S
Stephen Rothwell 已提交
194 195 196 197 198
	if (access_ok(VERIFY_WRITE, __pu_addr, size))			\
		__put_user_size((x), __pu_addr, (size), __pu_err);	\
	__pu_err;							\
})

199 200 201 202 203 204 205 206 207 208
#define __put_user_nosleep(x, ptr, size)			\
({								\
	long __pu_err;						\
	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
	__chk_user_ptr(ptr);					\
	__put_user_size((x), __pu_addr, (size), __pu_err);	\
	__pu_err;						\
})


S
Stephen Rothwell 已提交
209 210 211 212
extern long __get_user_bad(void);

#define __get_user_asm(x, addr, err, op)		\
	__asm__ __volatile__(				\
213
		"1:	"op" %1,0(%2)	# get_user\n"	\
S
Stephen Rothwell 已提交
214 215 216 217 218 219
		"2:\n"					\
		".section .fixup,\"ax\"\n"		\
		"3:	li %0,%3\n"			\
		"	li %1,0\n"			\
		"	b 2b\n"				\
		".previous\n"				\
220
		EX_TABLE(1b, 3b)			\
S
Stephen Rothwell 已提交
221
		: "=r" (err), "=r" (x)			\
222
		: "b" (addr), "i" (-EFAULT), "0" (err))
S
Stephen Rothwell 已提交
223

224 225 226 227 228
#ifdef __powerpc64__
#define __get_user_asm2(x, addr, err)			\
	__get_user_asm(x, addr, err, "ld")
#else /* __powerpc64__ */
#define __get_user_asm2(x, addr, err)			\
S
Stephen Rothwell 已提交
229 230 231 232 233 234 235 236 237 238
	__asm__ __volatile__(				\
		"1:	lwz %1,0(%2)\n"			\
		"2:	lwz %1+1,4(%2)\n"		\
		"3:\n"					\
		".section .fixup,\"ax\"\n"		\
		"4:	li %0,%3\n"			\
		"	li %1,0\n"			\
		"	li %1+1,0\n"			\
		"	b 3b\n"				\
		".previous\n"				\
239 240
		EX_TABLE(1b, 4b)			\
		EX_TABLE(2b, 4b)			\
S
Stephen Rothwell 已提交
241
		: "=r" (err), "=&r" (x)			\
242
		: "b" (addr), "i" (-EFAULT), "0" (err))
S
Stephen Rothwell 已提交
243 244 245 246 247 248
#endif /* __powerpc64__ */

#define __get_user_size(x, ptr, size, retval)			\
do {								\
	retval = 0;						\
	__chk_user_ptr(ptr);					\
249 250
	if (size > sizeof(x))					\
		(x) = __get_user_bad();				\
S
Stephen Rothwell 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263
	switch (size) {						\
	case 1: __get_user_asm(x, ptr, retval, "lbz"); break;	\
	case 2: __get_user_asm(x, ptr, retval, "lhz"); break;	\
	case 4: __get_user_asm(x, ptr, retval, "lwz"); break;	\
	case 8: __get_user_asm2(x, ptr, retval);  break;	\
	default: (x) = __get_user_bad();			\
	}							\
} while (0)

#define __get_user_nocheck(x, ptr, size)			\
({								\
	long __gu_err;						\
	unsigned long __gu_val;					\
264
	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
S
Stephen Rothwell 已提交
265
	__chk_user_ptr(ptr);					\
266
	if (!is_kernel_addr((unsigned long)__gu_addr))		\
267
		might_fault();					\
268
	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
S
Stephen Rothwell 已提交
269 270 271 272 273 274 275 276
	(x) = (__typeof__(*(ptr)))__gu_val;			\
	__gu_err;						\
})

#define __get_user_check(x, ptr, size)					\
({									\
	long __gu_err = -EFAULT;					\
	unsigned long  __gu_val = 0;					\
277
	__typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
278
	might_fault();							\
S
Stephen Rothwell 已提交
279 280
	if (access_ok(VERIFY_READ, __gu_addr, (size)))			\
		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
281
	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
S
Stephen Rothwell 已提交
282 283 284
	__gu_err;							\
})

285 286 287 288
#define __get_user_nosleep(x, ptr, size)			\
({								\
	long __gu_err;						\
	unsigned long __gu_val;					\
289
	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
290 291
	__chk_user_ptr(ptr);					\
	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
292
	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
293 294 295 296
	__gu_err;						\
})


S
Stephen Rothwell 已提交
297 298 299 300 301 302
/* more complex routines */

extern unsigned long __copy_tofrom_user(void __user *to,
		const void __user *from, unsigned long size);

#ifndef __powerpc64__
303

304
static inline unsigned long copy_from_user(void *to,
305
		const void __user *from, unsigned long n)
S
Stephen Rothwell 已提交
306
{
A
Al Viro 已提交
307
	if (likely(access_ok(VERIFY_READ, from, n))) {
308
		check_object_size(to, n, false);
S
Stephen Rothwell 已提交
309
		return __copy_tofrom_user((__force void __user *)to, from, n);
310
	}
A
Al Viro 已提交
311
	memset(to, 0, n);
S
Stephen Rothwell 已提交
312 313 314
	return n;
}

315
static inline unsigned long copy_to_user(void __user *to,
316
		const void *from, unsigned long n)
S
Stephen Rothwell 已提交
317
{
318
	if (access_ok(VERIFY_WRITE, to, n)) {
319
		check_object_size(from, n, true);
S
Stephen Rothwell 已提交
320
		return __copy_tofrom_user(to, (__force void __user *)from, n);
321
	}
S
Stephen Rothwell 已提交
322 323 324 325 326
	return n;
}

#else /* __powerpc64__ */

327 328 329 330 331 332 333 334 335 336
#define __copy_in_user(to, from, size) \
	__copy_tofrom_user((to), (from), (size))

extern unsigned long copy_from_user(void *to, const void __user *from,
				    unsigned long n);
extern unsigned long copy_to_user(void __user *to, const void *from,
				  unsigned long n);
extern unsigned long copy_in_user(void __user *to, const void __user *from,
				  unsigned long n);

S
Stephen Rothwell 已提交
337 338
#endif /* __powerpc64__ */

339 340
static inline unsigned long __copy_from_user_inatomic(void *to,
		const void __user *from, unsigned long n)
S
Stephen Rothwell 已提交
341 342
{
	if (__builtin_constant_p(n) && (n <= 8)) {
343
		unsigned long ret = 1;
S
Stephen Rothwell 已提交
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358

		switch (n) {
		case 1:
			__get_user_size(*(u8 *)to, from, 1, ret);
			break;
		case 2:
			__get_user_size(*(u16 *)to, from, 2, ret);
			break;
		case 4:
			__get_user_size(*(u32 *)to, from, 4, ret);
			break;
		case 8:
			__get_user_size(*(u64 *)to, from, 8, ret);
			break;
		}
S
Stephen Rothwell 已提交
359 360
		if (ret == 0)
			return 0;
S
Stephen Rothwell 已提交
361
	}
362

363
	check_object_size(to, n, false);
364

S
Stephen Rothwell 已提交
365
	return __copy_tofrom_user((__force void __user *)to, from, n);
S
Stephen Rothwell 已提交
366 367
}

368 369
static inline unsigned long __copy_to_user_inatomic(void __user *to,
		const void *from, unsigned long n)
S
Stephen Rothwell 已提交
370 371
{
	if (__builtin_constant_p(n) && (n <= 8)) {
372
		unsigned long ret = 1;
S
Stephen Rothwell 已提交
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387

		switch (n) {
		case 1:
			__put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
			break;
		case 2:
			__put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
			break;
		case 4:
			__put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
			break;
		case 8:
			__put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
			break;
		}
S
Stephen Rothwell 已提交
388 389
		if (ret == 0)
			return 0;
S
Stephen Rothwell 已提交
390
	}
391 392

	check_object_size(from, n, true);
393

S
Stephen Rothwell 已提交
394
	return __copy_tofrom_user(to, (__force const void __user *)from, n);
S
Stephen Rothwell 已提交
395 396
}

397 398
static inline unsigned long __copy_from_user(void *to,
		const void __user *from, unsigned long size)
S
Stephen Rothwell 已提交
399
{
400
	might_fault();
S
Stephen Rothwell 已提交
401 402 403
	return __copy_from_user_inatomic(to, from, size);
}

404 405
static inline unsigned long __copy_to_user(void __user *to,
		const void *from, unsigned long size)
S
Stephen Rothwell 已提交
406
{
407
	might_fault();
S
Stephen Rothwell 已提交
408 409 410 411 412 413 414
	return __copy_to_user_inatomic(to, from, size);
}

extern unsigned long __clear_user(void __user *addr, unsigned long size);

static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
415
	might_fault();
S
Stephen Rothwell 已提交
416 417 418 419 420
	if (likely(access_ok(VERIFY_WRITE, addr, size)))
		return __clear_user(addr, size);
	return size;
}

421 422 423
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
S
Stephen Rothwell 已提交
424 425 426 427 428

#endif  /* __ASSEMBLY__ */
#endif /* __KERNEL__ */

#endif	/* _ARCH_POWERPC_UACCESS_H */