uaccess_32.h 6.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
#ifndef __i386_UACCESS_H
#define __i386_UACCESS_H

/*
 * User space memory access functions
 */
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/prefetch.h>
#include <linux/string.h>
11
#include <asm/asm.h>
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20 21 22
#include <asm/page.h>

/*
 * movsl can be slow when source and dest are not both 8-byte aligned
 */
#ifdef CONFIG_X86_INTEL_USERCOPY
extern struct movsl_mask {
	int mask;
} ____cacheline_aligned_in_smp movsl_mask;
#endif

23 24 25 26 27 28 29 30 31 32
unsigned long __must_check __copy_to_user_ll
		(void __user *to, const void *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll
		(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nozero
		(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache
		(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache_nozero
		(void *to, const void __user *from, unsigned long n);
L
Linus Torvalds 已提交
33

34 35 36 37 38 39 40 41 42 43 44 45 46
/**
 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.
 *
 * Copy data from kernel space to user space.  Caller must check
 * the specified block with access_ok() before calling this function.
 * The caller should also make sure he pins the user space address
 * so that the we don't result in page fault and sleep.
 *
L
Linus Torvalds 已提交
47 48 49 50 51 52
 * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
 * If a store crosses a page boundary and gets a fault, the x86 will not write
 * anything, so this is accurate.
 */

53
static __always_inline unsigned long __must_check
L
Linus Torvalds 已提交
54 55 56 57 58 59 60
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
	if (__builtin_constant_p(n)) {
		unsigned long ret;

		switch (n) {
		case 1:
61 62
			__put_user_size(*(u8 *)from, (u8 __user *)to,
					1, ret, 1);
L
Linus Torvalds 已提交
63 64
			return ret;
		case 2:
65 66
			__put_user_size(*(u16 *)from, (u16 __user *)to,
					2, ret, 2);
L
Linus Torvalds 已提交
67 68
			return ret;
		case 4:
69 70
			__put_user_size(*(u32 *)from, (u32 __user *)to,
					4, ret, 4);
L
Linus Torvalds 已提交
71 72 73 74 75 76 77
			return ret;
		}
	}
	return __copy_to_user_ll(to, from, n);
}

/**
78 79 80
 * __copy_to_user: - Copy a block of data into user space, with less checking.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
L
Linus Torvalds 已提交
81 82 83 84
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.  This function may sleep.
 *
85
 * Copy data from kernel space to user space.  Caller must check
L
Linus Torvalds 已提交
86 87 88 89 90
 * the specified block with access_ok() before calling this function.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 */
91 92 93 94 95 96 97
static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
       might_sleep();
       return __copy_to_user_inatomic(to, from, n);
}

98
static __always_inline unsigned long
L
Linus Torvalds 已提交
99 100
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	/* Avoid zeroing the tail if the copy fails..
	 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
	 * but as the zeroing behaviour is only significant when n is not
	 * constant, that shouldn't be a problem.
	 */
	if (__builtin_constant_p(n)) {
		unsigned long ret;

		switch (n) {
		case 1:
			__get_user_size(*(u8 *)to, from, 1, ret, 1);
			return ret;
		case 2:
			__get_user_size(*(u16 *)to, from, 2, ret, 2);
			return ret;
		case 4:
			__get_user_size(*(u32 *)to, from, 4, ret, 4);
			return ret;
		}
	}
	return __copy_from_user_ll_nozero(to, from, n);
}
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145

/**
 * __copy_from_user: - Copy a block of data from user space, with less checking.
 * @to:   Destination address, in kernel space.
 * @from: Source address, in user space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.  This function may sleep.
 *
 * Copy data from user space to kernel space.  Caller must check
 * the specified block with access_ok() before calling this function.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 *
 * If some data could not be copied, this function will pad the copied
 * data to the requested size using zero bytes.
 *
 * An alternate version - __copy_from_user_inatomic() - may be called from
 * atomic context and will fail rather than sleep.  In this case the
 * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
 * for explanation of why this is needed.
 */
146 147 148 149
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
	might_sleep();
L
Linus Torvalds 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
	if (__builtin_constant_p(n)) {
		unsigned long ret;

		switch (n) {
		case 1:
			__get_user_size(*(u8 *)to, from, 1, ret, 1);
			return ret;
		case 2:
			__get_user_size(*(u16 *)to, from, 2, ret, 2);
			return ret;
		case 4:
			__get_user_size(*(u32 *)to, from, 4, ret, 4);
			return ret;
		}
	}
	return __copy_from_user_ll(to, from, n);
}

168 169
#define ARCH_HAS_NOCACHE_UACCESS

170
static __always_inline unsigned long __copy_from_user_nocache(void *to,
171 172
				const void __user *from, unsigned long n)
{
173
	might_sleep();
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
	if (__builtin_constant_p(n)) {
		unsigned long ret;

		switch (n) {
		case 1:
			__get_user_size(*(u8 *)to, from, 1, ret, 1);
			return ret;
		case 2:
			__get_user_size(*(u16 *)to, from, 2, ret, 2);
			return ret;
		case 4:
			__get_user_size(*(u32 *)to, from, 4, ret, 4);
			return ret;
		}
	}
	return __copy_from_user_ll_nocache(to, from, n);
}

192
static __always_inline unsigned long
193 194
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
				  unsigned long n)
L
Linus Torvalds 已提交
195
{
196
       return __copy_from_user_ll_nocache_nozero(to, from, n);
197 198
}

L
Linus Torvalds 已提交
199
unsigned long __must_check copy_to_user(void __user *to,
200
					const void *from, unsigned long n);
L
Linus Torvalds 已提交
201
unsigned long __must_check copy_from_user(void *to,
202 203
					  const void __user *from,
					  unsigned long n);
L
Linus Torvalds 已提交
204
long __must_check strncpy_from_user(char *dst, const char __user *src,
205
				    long count);
L
Linus Torvalds 已提交
206
long __must_check __strncpy_from_user(char *dst,
207
				      const char __user *src, long count);
L
Linus Torvalds 已提交
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222

/**
 * strlen_user: - Get the size of a string in user space.
 * @str: The string to measure.
 *
 * Context: User context only.  This function may sleep.
 *
 * Get the size of a NUL-terminated string in user space.
 *
 * Returns the size of the string INCLUDING the terminating NUL.
 * On exception, returns 0.
 *
 * If there is a limit on the length of a valid string, you may wish to
 * consider using strnlen_user() instead.
 */
223
#define strlen_user(str) strnlen_user(str, LONG_MAX)
L
Linus Torvalds 已提交
224 225 226 227 228 229

long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);

#endif /* __i386_UACCESS_H */