uaccess_32.h 11.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * uaccess.h: User space memore access functions.
 *
 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
 */
#ifndef _ASM_UACCESS_H
#define _ASM_UACCESS_H

#ifdef __KERNEL__
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <asm/vac-ops.h>
#endif

#ifndef __ASSEMBLY__

20 21 22
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE

23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
/* Sparc is not segmented, however we need to be able to fool access_ok()
 * when doing system calls from kernel mode legitimately.
 *
 * "For historical reasons, these macros are grossly misnamed." -Linus
 */

#define KERNEL_DS   ((mm_segment_t) { 0 })
#define USER_DS     ((mm_segment_t) { -1 })

#define VERIFY_READ	0
#define VERIFY_WRITE	1

#define get_ds()	(KERNEL_DS)
#define get_fs()	(current->thread.current_ds)
#define set_fs(val)	((current->thread.current_ds) = (val))

#define segment_eq(a,b)	((a).seg == (b).seg)

/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
 * can be fairly lightweight.
 * No one can read/write anything from userland in the kernel space by setting
 * large size and address near to PAGE_OFFSET - a fault will break his intentions.
 */
#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
#define access_ok(type, addr, size)					\
	({ (void)(type); __access_ok((unsigned long)(addr), size); })

/*
 * The exception table consists of pairs of addresses: the first is the
 * address of an instruction that is allowed to fault, and the second is
 * the address at which the program should continue.  No registers are
 * modified, so it is entirely up to the continuation code to figure out
 * what to do.
 *
 * All the routines below use bits of fixup code that are out of line
 * with the main instruction path.  This means when everything is well,
 * we don't even have to jump over them.  Further, they do not intrude
 * on our cache or tlb entries.
 *
 * There is a special way how to put a range of potentially faulting
 * insns (like twenty ldd/std's with now intervening other instructions)
 * You specify address of first in insn and 0 in fixup and in the next
 * exception_table_entry you specify last potentially faulting insn + 1
 * and in fixup the routine which should handle the fault.
 * That fixup code will get
 * (faulting_insn_address - first_insn_in_the_range_address)/4
 * in %g2 (ie. index of the faulting instruction in the range).
 */

struct exception_table_entry
{
        unsigned long insn, fixup;
};

/* Returns 0 if exception not found and fixup otherwise.  */
extern unsigned long search_extables_range(unsigned long addr, unsigned long *g2);

extern void __ret_efault(void);

/* Uh, these should become the main single-value transfer routines..
 * They automatically use the right size if we just have the right
 * pointer type..
 *
 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 * and yet we don't want to do any pointers, because that is too much
 * of a performance impact. Thus we have a few rather ugly macros here,
 * and hide all the ugliness from the user.
 */
#define put_user(x,ptr) ({ \
unsigned long __pu_addr = (unsigned long)(ptr); \
__chk_user_ptr(ptr); \
__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })

#define get_user(x,ptr) ({ \
unsigned long __gu_addr = (unsigned long)(ptr); \
__chk_user_ptr(ptr); \
__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })

/*
 * The "__xxx" versions do not do address space checking, useful when
 * doing multiple accesses to the same area (the user has to do the
 * checks by hand with "access_ok()")
 */
#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr)))

struct __large_struct { unsigned long buf[100]; };
#define __m(x) ((struct __large_struct __user *)(x))

#define __put_user_check(x,addr,size) ({ \
register int __pu_ret; \
if (__access_ok(addr,size)) { \
switch (size) { \
case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
case 4: __put_user_asm(x,,addr,__pu_ret); break; \
case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
default: __pu_ret = __put_user_bad(); break; \
} } else { __pu_ret = -EFAULT; } __pu_ret; })

#define __put_user_nocheck(x,addr,size) ({ \
register int __pu_ret; \
switch (size) { \
case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
case 4: __put_user_asm(x,,addr,__pu_ret); break; \
case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
default: __pu_ret = __put_user_bad(); break; \
} __pu_ret; })

#define __put_user_asm(x,size,addr,ret)					\
__asm__ __volatile__(							\
	"/* Put user asm, inline. */\n"					\
"1:\t"	"st"#size " %1, %2\n\t"						\
	"clr	%0\n"							\
"2:\n\n\t"								\
	".section .fixup,#alloc,#execinstr\n\t"				\
	".align	4\n"							\
"3:\n\t"								\
	"b	2b\n\t"							\
	" mov	%3, %0\n\t"						\
        ".previous\n\n\t"						\
	".section __ex_table,#alloc\n\t"				\
	".align	4\n\t"							\
	".word	1b, 3b\n\t"						\
	".previous\n\n\t"						\
       : "=&r" (ret) : "r" (x), "m" (*__m(addr)),			\
	 "i" (-EFAULT))

extern int __put_user_bad(void);

#define __get_user_check(x,addr,size,type) ({ \
register int __gu_ret; \
register unsigned long __gu_val; \
if (__access_ok(addr,size)) { \
switch (size) { \
case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; })

#define __get_user_check_ret(x,addr,size,type,retval) ({ \
register unsigned long __gu_val __asm__ ("l1"); \
if (__access_ok(addr,size)) { \
switch (size) { \
case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
default: if (__get_user_bad()) return retval; \
} x = (type) __gu_val; } else return retval; })

#define __get_user_nocheck(x,addr,size,type) ({ \
register int __gu_ret; \
register unsigned long __gu_val; \
switch (size) { \
case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
} x = (type) __gu_val; __gu_ret; })

#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \
register unsigned long __gu_val __asm__ ("l1"); \
switch (size) { \
case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
default: if (__get_user_bad()) return retval; \
} x = (type) __gu_val; })

#define __get_user_asm(x,size,addr,ret)					\
__asm__ __volatile__(							\
	"/* Get user asm, inline. */\n"					\
"1:\t"	"ld"#size " %2, %1\n\t"						\
	"clr	%0\n"							\
"2:\n\n\t"								\
	".section .fixup,#alloc,#execinstr\n\t"				\
	".align	4\n"							\
"3:\n\t"								\
	"clr	%1\n\t"							\
	"b	2b\n\t"							\
	" mov	%3, %0\n\n\t"						\
	".previous\n\t"							\
	".section __ex_table,#alloc\n\t"				\
	".align	4\n\t"							\
	".word	1b, 3b\n\n\t"						\
	".previous\n\t"							\
       : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),			\
	 "i" (-EFAULT))

#define __get_user_asm_ret(x,size,addr,retval)				\
if (__builtin_constant_p(retval) && retval == -EFAULT)			\
__asm__ __volatile__(							\
	"/* Get user asm ret, inline. */\n"				\
"1:\t"	"ld"#size " %1, %0\n\n\t"					\
	".section __ex_table,#alloc\n\t"				\
	".align	4\n\t"							\
	".word	1b,__ret_efault\n\n\t"					\
	".previous\n\t"							\
       : "=&r" (x) : "m" (*__m(addr)));					\
else									\
__asm__ __volatile__(							\
	"/* Get user asm ret, inline. */\n"				\
"1:\t"	"ld"#size " %1, %0\n\n\t"					\
	".section .fixup,#alloc,#execinstr\n\t"				\
	".align	4\n"							\
"3:\n\t"								\
	"ret\n\t"							\
	" restore %%g0, %2, %%o0\n\n\t"					\
	".previous\n\t"							\
	".section __ex_table,#alloc\n\t"				\
	".align	4\n\t"							\
	".word	1b, 3b\n\n\t"						\
	".previous\n\t"							\
       : "=&r" (x) : "m" (*__m(addr)), "i" (retval))

extern int __get_user_bad(void);

extern unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);

static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
{
	if (n && __access_ok((unsigned long) to, n))
		return __copy_user(to, (__force void __user *) from, n);
	else
		return n;
}

static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
{
	return __copy_user(to, (__force void __user *) from, n);
}

263 264 265 266 267 268 269 270
extern void copy_from_user_overflow(void)
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
	__compiletime_error("copy_from_user() buffer size is not provably correct")
#else
	__compiletime_warning("copy_from_user() buffer size is not provably correct")
#endif
;

271 272
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
273 274 275 276
	int sz = __compiletime_object_size(to);

	if (unlikely(sz != -1 && sz < n)) {
		copy_from_user_overflow();
277
		return n;
278 279
	}

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	if (n && __access_ok((unsigned long) from, n))
		return __copy_user((__force void __user *) to, from, n);
	else
		return n;
}

static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
{
	return __copy_user((__force void __user *) to, from, n);
}

#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user

static inline unsigned long __clear_user(void __user *addr, unsigned long size)
{
	unsigned long ret;

	__asm__ __volatile__ (
		".section __ex_table,#alloc\n\t"
		".align 4\n\t"
		".word 1f,3\n\t"
		".previous\n\t"
		"mov %2, %%o1\n"
		"1:\n\t"
		"call __bzero\n\t"
		" mov %1, %%o0\n\t"
		"mov %%o0, %0\n"
		: "=r" (ret) : "r" (addr), "r" (size) :
		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
		"g1", "g2", "g3", "g4", "g5", "g7", "cc");

	return ret;
}

static inline unsigned long clear_user(void __user *addr, unsigned long n)
{
	if (n && __access_ok((unsigned long) addr, n))
		return __clear_user(addr, n);
	else
		return n;
}

extern long __strncpy_from_user(char *dest, const char __user *src, long count);

static inline long strncpy_from_user(char *dest, const char __user *src, long count)
{
	if (__access_ok((unsigned long) src, count))
		return __strncpy_from_user(dest, src, count);
	else
		return -EFAULT;
}

extern long __strlen_user(const char __user *);
extern long __strnlen_user(const char __user *, long len);

static inline long strlen_user(const char __user *str)
{
	if (!access_ok(VERIFY_READ, str, 0))
		return 0;
	else
		return __strlen_user(str);
}

static inline long strnlen_user(const char __user *str, long len)
{
	if (!access_ok(VERIFY_READ, str, 0))
		return 0;
	else
		return __strnlen_user(str, len);
}

#endif  /* __ASSEMBLY__ */

#endif /* _ASM_UACCESS_H */