uaccess_64.h 6.8 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_UACCESS_64_H
#define _ASM_X86_UACCESS_64_H
L
Linus Torvalds 已提交
3 4 5 6 7 8

/*
 * User space memory access functions
 */
#include <linux/compiler.h>
#include <linux/errno.h>
9
#include <linux/lockdep.h>
10
#include <asm/alternative.h>
11
#include <asm/cpufeatures.h>
L
Linus Torvalds 已提交
12 13 14 15 16 17 18
#include <asm/page.h>

/*
 * Copy To/From Userspace
 */

/* Handles exceptions in both to and from, but doesn't do access_ok */
19
__must_check unsigned long
20 21
copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
__must_check unsigned long
22 23 24 25 26 27 28 29 30
copy_user_generic_string(void *to, const void *from, unsigned len);
__must_check unsigned long
copy_user_generic_unrolled(void *to, const void *from, unsigned len);

static __always_inline __must_check unsigned long
copy_user_generic(void *to, const void *from, unsigned len)
{
	unsigned ret;

31 32 33 34 35 36
	/*
	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
	 * Otherwise, use copy_user_generic_unrolled.
	 */
	alternative_call_2(copy_user_generic_unrolled,
37 38
			 copy_user_generic_string,
			 X86_FEATURE_REP_GOOD,
39 40
			 copy_user_enhanced_fast_string,
			 X86_FEATURE_ERMS,
41 42 43 44 45 46
			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
				     "=d" (len)),
			 "1" (to), "2" (from), "3" (len)
			 : "memory", "rcx", "r8", "r9", "r10", "r11");
	return ret;
}
47 48 49 50 51

__must_check unsigned long
copy_in_user(void __user *to, const void __user *from, unsigned len);

static __always_inline __must_check
52
int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
53
{
54
	int ret = 0;
55

L
Linus Torvalds 已提交
56
	if (!__builtin_constant_p(size))
57 58
		return copy_user_generic(dst, (__force void *)src, size);
	switch (size) {
59 60 61
	case 1:
		__uaccess_begin();
		__get_user_asm(*(u8 *)dst, (u8 __user *)src,
62
			      ret, "b", "b", "=q", 1);
63
		__uaccess_end();
L
Linus Torvalds 已提交
64
		return ret;
65 66 67
	case 2:
		__uaccess_begin();
		__get_user_asm(*(u16 *)dst, (u16 __user *)src,
68
			      ret, "w", "w", "=r", 2);
69
		__uaccess_end();
L
Linus Torvalds 已提交
70
		return ret;
71 72 73
	case 4:
		__uaccess_begin();
		__get_user_asm(*(u32 *)dst, (u32 __user *)src,
74
			      ret, "l", "k", "=r", 4);
75
		__uaccess_end();
76
		return ret;
77 78 79
	case 8:
		__uaccess_begin();
		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
80
			      ret, "q", "", "=r", 8);
81
		__uaccess_end();
L
Linus Torvalds 已提交
82 83
		return ret;
	case 10:
84
		__uaccess_begin();
85
		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
86
			       ret, "q", "", "=r", 10);
87 88 89 90 91
		if (likely(!ret))
			__get_user_asm(*(u16 *)(8 + (char *)dst),
				       (u16 __user *)(8 + (char __user *)src),
				       ret, "w", "w", "=r", 2);
		__uaccess_end();
92
		return ret;
L
Linus Torvalds 已提交
93
	case 16:
94
		__uaccess_begin();
95 96
		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
			       ret, "q", "", "=r", 16);
97 98 99 100 101
		if (likely(!ret))
			__get_user_asm(*(u64 *)(8 + (char *)dst),
				       (u64 __user *)(8 + (char __user *)src),
				       ret, "q", "", "=r", 8);
		__uaccess_end();
102
		return ret;
L
Linus Torvalds 已提交
103
	default:
104
		return copy_user_generic(dst, (__force void *)src, size);
L
Linus Torvalds 已提交
105
	}
106
}
L
Linus Torvalds 已提交
107

108
static __always_inline __must_check
109 110 111 112 113 114 115 116
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
	might_fault();
	return __copy_from_user_nocheck(dst, src, size);
}

static __always_inline __must_check
int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
117
{
118
	int ret = 0;
119

L
Linus Torvalds 已提交
120
	if (!__builtin_constant_p(size))
121 122
		return copy_user_generic((__force void *)dst, src, size);
	switch (size) {
123 124 125
	case 1:
		__uaccess_begin();
		__put_user_asm(*(u8 *)src, (u8 __user *)dst,
126
			      ret, "b", "b", "iq", 1);
127
		__uaccess_end();
L
Linus Torvalds 已提交
128
		return ret;
129 130 131
	case 2:
		__uaccess_begin();
		__put_user_asm(*(u16 *)src, (u16 __user *)dst,
132
			      ret, "w", "w", "ir", 2);
133
		__uaccess_end();
L
Linus Torvalds 已提交
134
		return ret;
135 136 137
	case 4:
		__uaccess_begin();
		__put_user_asm(*(u32 *)src, (u32 __user *)dst,
138
			      ret, "l", "k", "ir", 4);
139
		__uaccess_end();
140
		return ret;
141 142 143
	case 8:
		__uaccess_begin();
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
144
			      ret, "q", "", "er", 8);
145
		__uaccess_end();
L
Linus Torvalds 已提交
146 147
		return ret;
	case 10:
148
		__uaccess_begin();
149
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
150
			       ret, "q", "", "er", 10);
151 152 153 154 155 156
		if (likely(!ret)) {
			asm("":::"memory");
			__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
				       ret, "w", "w", "ir", 2);
		}
		__uaccess_end();
157
		return ret;
L
Linus Torvalds 已提交
158
	case 16:
159
		__uaccess_begin();
160
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
161
			       ret, "q", "", "er", 16);
162 163 164 165 166 167
		if (likely(!ret)) {
			asm("":::"memory");
			__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
				       ret, "q", "", "er", 8);
		}
		__uaccess_end();
168
		return ret;
L
Linus Torvalds 已提交
169
	default:
170
		return copy_user_generic((__force void *)dst, src, size);
L
Linus Torvalds 已提交
171
	}
172
}
L
Linus Torvalds 已提交
173

174 175 176 177 178 179 180
static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
	might_fault();
	return __copy_to_user_nocheck(dst, src, size);
}

181 182
static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
183
{
184
	int ret = 0;
185

186
	might_fault();
L
Linus Torvalds 已提交
187
	if (!__builtin_constant_p(size))
188 189 190 191
		return copy_user_generic((__force void *)dst,
					 (__force void *)src, size);
	switch (size) {
	case 1: {
L
Linus Torvalds 已提交
192
		u8 tmp;
193
		__uaccess_begin();
194 195
		__get_user_asm(tmp, (u8 __user *)src,
			       ret, "b", "b", "=q", 1);
L
Linus Torvalds 已提交
196
		if (likely(!ret))
197 198
			__put_user_asm(tmp, (u8 __user *)dst,
				       ret, "b", "b", "iq", 1);
199
		__uaccess_end();
L
Linus Torvalds 已提交
200 201
		return ret;
	}
202
	case 2: {
L
Linus Torvalds 已提交
203
		u16 tmp;
204
		__uaccess_begin();
205 206
		__get_user_asm(tmp, (u16 __user *)src,
			       ret, "w", "w", "=r", 2);
L
Linus Torvalds 已提交
207
		if (likely(!ret))
208 209
			__put_user_asm(tmp, (u16 __user *)dst,
				       ret, "w", "w", "ir", 2);
210
		__uaccess_end();
L
Linus Torvalds 已提交
211 212 213
		return ret;
	}

214
	case 4: {
L
Linus Torvalds 已提交
215
		u32 tmp;
216
		__uaccess_begin();
217 218
		__get_user_asm(tmp, (u32 __user *)src,
			       ret, "l", "k", "=r", 4);
L
Linus Torvalds 已提交
219
		if (likely(!ret))
220 221
			__put_user_asm(tmp, (u32 __user *)dst,
				       ret, "l", "k", "ir", 4);
222
		__uaccess_end();
L
Linus Torvalds 已提交
223 224
		return ret;
	}
225
	case 8: {
L
Linus Torvalds 已提交
226
		u64 tmp;
227
		__uaccess_begin();
228 229
		__get_user_asm(tmp, (u64 __user *)src,
			       ret, "q", "", "=r", 8);
L
Linus Torvalds 已提交
230
		if (likely(!ret))
231
			__put_user_asm(tmp, (u64 __user *)dst,
232
				       ret, "q", "", "er", 8);
233
		__uaccess_end();
L
Linus Torvalds 已提交
234 235 236
		return ret;
	}
	default:
237 238
		return copy_user_generic((__force void *)dst,
					 (__force void *)src, size);
L
Linus Torvalds 已提交
239
	}
240
}
L
Linus Torvalds 已提交
241

242 243 244
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
245
	return __copy_from_user_nocheck(dst, src, size);
246
}
247 248 249 250

static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
251
	return __copy_to_user_nocheck(dst, src, size);
252
}
L
Linus Torvalds 已提交
253

254 255
extern long __copy_user_nocache(void *dst, const void __user *src,
				unsigned size, int zerorest);
256

257 258
static inline int
__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
259
{
260
	might_fault();
261
	return __copy_user_nocache(dst, src, size, 1);
262 263
}

264 265 266
static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
				  unsigned size)
267
{
268
	return __copy_user_nocache(dst, src, size, 0);
269 270
}

271
unsigned long
272
copy_user_handle_tail(char *to, char *from, unsigned len);
273

H
H. Peter Anvin 已提交
274
#endif /* _ASM_X86_UACCESS_64_H */