uaccess_64.h 7.1 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_UACCESS_64_H
#define _ASM_X86_UACCESS_64_H
L
Linus Torvalds 已提交
3 4 5 6 7 8

/*
 * User space memory access functions
 */
#include <linux/compiler.h>
#include <linux/errno.h>
9
#include <linux/lockdep.h>
10
#include <linux/kasan-checks.h>
11
#include <asm/alternative.h>
12
#include <asm/cpufeatures.h>
L
Linus Torvalds 已提交
13 14 15 16 17 18 19
#include <asm/page.h>

/*
 * Copy To/From Userspace
 */

/* Handles exceptions in both to and from, but doesn't do access_ok */
20
__must_check unsigned long
21 22
copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
__must_check unsigned long
23 24 25 26 27 28 29 30 31
copy_user_generic_string(void *to, const void *from, unsigned len);
__must_check unsigned long
copy_user_generic_unrolled(void *to, const void *from, unsigned len);

static __always_inline __must_check unsigned long
copy_user_generic(void *to, const void *from, unsigned len)
{
	unsigned ret;

32 33 34 35 36 37
	/*
	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
	 * Otherwise, use copy_user_generic_unrolled.
	 */
	alternative_call_2(copy_user_generic_unrolled,
38 39
			 copy_user_generic_string,
			 X86_FEATURE_REP_GOOD,
40 41
			 copy_user_enhanced_fast_string,
			 X86_FEATURE_ERMS,
42 43 44 45 46 47
			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
				     "=d" (len)),
			 "1" (to), "2" (from), "3" (len)
			 : "memory", "rcx", "r8", "r9", "r10", "r11");
	return ret;
}
48 49 50 51 52

__must_check unsigned long
copy_in_user(void __user *to, const void __user *from, unsigned len);

static __always_inline __must_check
53
int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
54
{
55
	int ret = 0;
56

L
Linus Torvalds 已提交
57
	if (!__builtin_constant_p(size))
58 59
		return copy_user_generic(dst, (__force void *)src, size);
	switch (size) {
60 61 62
	case 1:
		__uaccess_begin();
		__get_user_asm(*(u8 *)dst, (u8 __user *)src,
63
			      ret, "b", "b", "=q", 1);
64
		__uaccess_end();
L
Linus Torvalds 已提交
65
		return ret;
66 67 68
	case 2:
		__uaccess_begin();
		__get_user_asm(*(u16 *)dst, (u16 __user *)src,
69
			      ret, "w", "w", "=r", 2);
70
		__uaccess_end();
L
Linus Torvalds 已提交
71
		return ret;
72 73 74
	case 4:
		__uaccess_begin();
		__get_user_asm(*(u32 *)dst, (u32 __user *)src,
75
			      ret, "l", "k", "=r", 4);
76
		__uaccess_end();
77
		return ret;
78 79 80
	case 8:
		__uaccess_begin();
		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
81
			      ret, "q", "", "=r", 8);
82
		__uaccess_end();
L
Linus Torvalds 已提交
83 84
		return ret;
	case 10:
85
		__uaccess_begin();
86
		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
87
			       ret, "q", "", "=r", 10);
88 89 90 91 92
		if (likely(!ret))
			__get_user_asm(*(u16 *)(8 + (char *)dst),
				       (u16 __user *)(8 + (char __user *)src),
				       ret, "w", "w", "=r", 2);
		__uaccess_end();
93
		return ret;
L
Linus Torvalds 已提交
94
	case 16:
95
		__uaccess_begin();
96 97
		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
			       ret, "q", "", "=r", 16);
98 99 100 101 102
		if (likely(!ret))
			__get_user_asm(*(u64 *)(8 + (char *)dst),
				       (u64 __user *)(8 + (char __user *)src),
				       ret, "q", "", "=r", 8);
		__uaccess_end();
103
		return ret;
L
Linus Torvalds 已提交
104
	default:
105
		return copy_user_generic(dst, (__force void *)src, size);
L
Linus Torvalds 已提交
106
	}
107
}
L
Linus Torvalds 已提交
108

109
static __always_inline __must_check
110 111 112
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
	might_fault();
113
	kasan_check_write(dst, size);
114 115 116 117 118
	return __copy_from_user_nocheck(dst, src, size);
}

static __always_inline __must_check
int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
119
{
120
	int ret = 0;
121

L
Linus Torvalds 已提交
122
	if (!__builtin_constant_p(size))
123 124
		return copy_user_generic((__force void *)dst, src, size);
	switch (size) {
125 126 127
	case 1:
		__uaccess_begin();
		__put_user_asm(*(u8 *)src, (u8 __user *)dst,
128
			      ret, "b", "b", "iq", 1);
129
		__uaccess_end();
L
Linus Torvalds 已提交
130
		return ret;
131 132 133
	case 2:
		__uaccess_begin();
		__put_user_asm(*(u16 *)src, (u16 __user *)dst,
134
			      ret, "w", "w", "ir", 2);
135
		__uaccess_end();
L
Linus Torvalds 已提交
136
		return ret;
137 138 139
	case 4:
		__uaccess_begin();
		__put_user_asm(*(u32 *)src, (u32 __user *)dst,
140
			      ret, "l", "k", "ir", 4);
141
		__uaccess_end();
142
		return ret;
143 144 145
	case 8:
		__uaccess_begin();
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
146
			      ret, "q", "", "er", 8);
147
		__uaccess_end();
L
Linus Torvalds 已提交
148 149
		return ret;
	case 10:
150
		__uaccess_begin();
151
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
152
			       ret, "q", "", "er", 10);
153 154 155 156 157 158
		if (likely(!ret)) {
			asm("":::"memory");
			__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
				       ret, "w", "w", "ir", 2);
		}
		__uaccess_end();
159
		return ret;
L
Linus Torvalds 已提交
160
	case 16:
161
		__uaccess_begin();
162
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
163
			       ret, "q", "", "er", 16);
164 165 166 167 168 169
		if (likely(!ret)) {
			asm("":::"memory");
			__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
				       ret, "q", "", "er", 8);
		}
		__uaccess_end();
170
		return ret;
L
Linus Torvalds 已提交
171
	default:
172
		return copy_user_generic((__force void *)dst, src, size);
L
Linus Torvalds 已提交
173
	}
174
}
L
Linus Torvalds 已提交
175

176 177 178 179
static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
	might_fault();
180
	kasan_check_read(src, size);
181 182 183
	return __copy_to_user_nocheck(dst, src, size);
}

184 185
static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
186
{
187
	int ret = 0;
188

189
	might_fault();
L
Linus Torvalds 已提交
190
	if (!__builtin_constant_p(size))
191 192 193 194
		return copy_user_generic((__force void *)dst,
					 (__force void *)src, size);
	switch (size) {
	case 1: {
L
Linus Torvalds 已提交
195
		u8 tmp;
196
		__uaccess_begin();
197 198
		__get_user_asm(tmp, (u8 __user *)src,
			       ret, "b", "b", "=q", 1);
L
Linus Torvalds 已提交
199
		if (likely(!ret))
200 201
			__put_user_asm(tmp, (u8 __user *)dst,
				       ret, "b", "b", "iq", 1);
202
		__uaccess_end();
L
Linus Torvalds 已提交
203 204
		return ret;
	}
205
	case 2: {
L
Linus Torvalds 已提交
206
		u16 tmp;
207
		__uaccess_begin();
208 209
		__get_user_asm(tmp, (u16 __user *)src,
			       ret, "w", "w", "=r", 2);
L
Linus Torvalds 已提交
210
		if (likely(!ret))
211 212
			__put_user_asm(tmp, (u16 __user *)dst,
				       ret, "w", "w", "ir", 2);
213
		__uaccess_end();
L
Linus Torvalds 已提交
214 215 216
		return ret;
	}

217
	case 4: {
L
Linus Torvalds 已提交
218
		u32 tmp;
219
		__uaccess_begin();
220 221
		__get_user_asm(tmp, (u32 __user *)src,
			       ret, "l", "k", "=r", 4);
L
Linus Torvalds 已提交
222
		if (likely(!ret))
223 224
			__put_user_asm(tmp, (u32 __user *)dst,
				       ret, "l", "k", "ir", 4);
225
		__uaccess_end();
L
Linus Torvalds 已提交
226 227
		return ret;
	}
228
	case 8: {
L
Linus Torvalds 已提交
229
		u64 tmp;
230
		__uaccess_begin();
231 232
		__get_user_asm(tmp, (u64 __user *)src,
			       ret, "q", "", "=r", 8);
L
Linus Torvalds 已提交
233
		if (likely(!ret))
234
			__put_user_asm(tmp, (u64 __user *)dst,
235
				       ret, "q", "", "er", 8);
236
		__uaccess_end();
L
Linus Torvalds 已提交
237 238 239
		return ret;
	}
	default:
240 241
		return copy_user_generic((__force void *)dst,
					 (__force void *)src, size);
L
Linus Torvalds 已提交
242
	}
243
}
L
Linus Torvalds 已提交
244

245 246 247
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
248
	kasan_check_write(dst, size);
249
	return __copy_from_user_nocheck(dst, src, size);
250
}
251 252 253 254

static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
255
	kasan_check_read(src, size);
256
	return __copy_to_user_nocheck(dst, src, size);
257
}
L
Linus Torvalds 已提交
258

259 260
extern long __copy_user_nocache(void *dst, const void __user *src,
				unsigned size, int zerorest);
261

262 263
static inline int
__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
264
{
265
	might_fault();
266
	kasan_check_write(dst, size);
267
	return __copy_user_nocache(dst, src, size, 1);
268 269
}

270 271 272
static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
				  unsigned size)
273
{
274
	kasan_check_write(dst, size);
275
	return __copy_user_nocache(dst, src, size, 0);
276 277
}

278
unsigned long
279
copy_user_handle_tail(char *to, char *from, unsigned len);
280

H
H. Peter Anvin 已提交
281
#endif /* _ASM_X86_UACCESS_64_H */