uaccess_64.h 5.8 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_UACCESS_64_H
#define _ASM_X86_UACCESS_64_H
L
Linus Torvalds 已提交
3 4 5 6 7

/*
 * User space memory access functions
 */
#include <linux/compiler.h>
8
#include <linux/lockdep.h>
9
#include <linux/kasan-checks.h>
10
#include <asm/alternative.h>
11
#include <asm/cpufeatures.h>
L
Linus Torvalds 已提交
12 13 14 15 16 17 18
#include <asm/page.h>

/*
 * Copy To/From Userspace
 */

/* Handles exceptions in both to and from, but doesn't do access_ok */
19
__must_check unsigned long
20 21
copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
__must_check unsigned long
22 23 24 25 26 27 28 29 30
copy_user_generic_string(void *to, const void *from, unsigned len);
__must_check unsigned long
copy_user_generic_unrolled(void *to, const void *from, unsigned len);

static __always_inline __must_check unsigned long
copy_user_generic(void *to, const void *from, unsigned len)
{
	unsigned ret;

31 32 33 34 35 36
	/*
	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
	 * Otherwise, use copy_user_generic_unrolled.
	 */
	alternative_call_2(copy_user_generic_unrolled,
37 38
			 copy_user_generic_string,
			 X86_FEATURE_REP_GOOD,
39 40
			 copy_user_enhanced_fast_string,
			 X86_FEATURE_ERMS,
41 42 43 44 45 46
			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
				     "=d" (len)),
			 "1" (to), "2" (from), "3" (len)
			 : "memory", "rcx", "r8", "r9", "r10", "r11");
	return ret;
}
47 48 49 50 51

__must_check unsigned long
copy_in_user(void __user *to, const void __user *from, unsigned len);

static __always_inline __must_check
52
int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
53
{
54
	int ret = 0;
55

56
	check_object_size(dst, size, false);
L
Linus Torvalds 已提交
57
	if (!__builtin_constant_p(size))
58 59
		return copy_user_generic(dst, (__force void *)src, size);
	switch (size) {
60 61
	case 1:
		__uaccess_begin();
A
Al Viro 已提交
62
		__get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
63
			      ret, "b", "b", "=q", 1);
64
		__uaccess_end();
L
Linus Torvalds 已提交
65
		return ret;
66 67
	case 2:
		__uaccess_begin();
A
Al Viro 已提交
68
		__get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
69
			      ret, "w", "w", "=r", 2);
70
		__uaccess_end();
L
Linus Torvalds 已提交
71
		return ret;
72 73
	case 4:
		__uaccess_begin();
A
Al Viro 已提交
74
		__get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
75
			      ret, "l", "k", "=r", 4);
76
		__uaccess_end();
77
		return ret;
78 79
	case 8:
		__uaccess_begin();
A
Al Viro 已提交
80
		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
81
			      ret, "q", "", "=r", 8);
82
		__uaccess_end();
L
Linus Torvalds 已提交
83 84
		return ret;
	case 10:
85
		__uaccess_begin();
A
Al Viro 已提交
86
		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
87
			       ret, "q", "", "=r", 10);
88
		if (likely(!ret))
A
Al Viro 已提交
89
			__get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
90 91 92
				       (u16 __user *)(8 + (char __user *)src),
				       ret, "w", "w", "=r", 2);
		__uaccess_end();
93
		return ret;
L
Linus Torvalds 已提交
94
	case 16:
95
		__uaccess_begin();
A
Al Viro 已提交
96
		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
97
			       ret, "q", "", "=r", 16);
98
		if (likely(!ret))
A
Al Viro 已提交
99
			__get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
100 101 102
				       (u64 __user *)(8 + (char __user *)src),
				       ret, "q", "", "=r", 8);
		__uaccess_end();
103
		return ret;
L
Linus Torvalds 已提交
104
	default:
105
		return copy_user_generic(dst, (__force void *)src, size);
L
Linus Torvalds 已提交
106
	}
107
}
L
Linus Torvalds 已提交
108

109
static __always_inline __must_check
110 111 112
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
	might_fault();
113
	kasan_check_write(dst, size);
114 115 116 117 118
	return __copy_from_user_nocheck(dst, src, size);
}

static __always_inline __must_check
int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
119
{
120
	int ret = 0;
121

122
	check_object_size(src, size, true);
L
Linus Torvalds 已提交
123
	if (!__builtin_constant_p(size))
124 125
		return copy_user_generic((__force void *)dst, src, size);
	switch (size) {
126 127 128
	case 1:
		__uaccess_begin();
		__put_user_asm(*(u8 *)src, (u8 __user *)dst,
129
			      ret, "b", "b", "iq", 1);
130
		__uaccess_end();
L
Linus Torvalds 已提交
131
		return ret;
132 133 134
	case 2:
		__uaccess_begin();
		__put_user_asm(*(u16 *)src, (u16 __user *)dst,
135
			      ret, "w", "w", "ir", 2);
136
		__uaccess_end();
L
Linus Torvalds 已提交
137
		return ret;
138 139 140
	case 4:
		__uaccess_begin();
		__put_user_asm(*(u32 *)src, (u32 __user *)dst,
141
			      ret, "l", "k", "ir", 4);
142
		__uaccess_end();
143
		return ret;
144 145 146
	case 8:
		__uaccess_begin();
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
147
			      ret, "q", "", "er", 8);
148
		__uaccess_end();
L
Linus Torvalds 已提交
149 150
		return ret;
	case 10:
151
		__uaccess_begin();
152
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
153
			       ret, "q", "", "er", 10);
154 155 156 157 158 159
		if (likely(!ret)) {
			asm("":::"memory");
			__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
				       ret, "w", "w", "ir", 2);
		}
		__uaccess_end();
160
		return ret;
L
Linus Torvalds 已提交
161
	case 16:
162
		__uaccess_begin();
163
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
164
			       ret, "q", "", "er", 16);
165 166 167 168 169 170
		if (likely(!ret)) {
			asm("":::"memory");
			__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
				       ret, "q", "", "er", 8);
		}
		__uaccess_end();
171
		return ret;
L
Linus Torvalds 已提交
172
	default:
173
		return copy_user_generic((__force void *)dst, src, size);
L
Linus Torvalds 已提交
174
	}
175
}
L
Linus Torvalds 已提交
176

177 178 179 180
static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
	might_fault();
181
	kasan_check_read(src, size);
182 183 184
	return __copy_to_user_nocheck(dst, src, size);
}

185 186
static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
187
{
188 189
	return copy_user_generic((__force void *)dst,
				 (__force void *)src, size);
190
}
L
Linus Torvalds 已提交
191

192 193 194
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
195
	kasan_check_write(dst, size);
196
	return __copy_from_user_nocheck(dst, src, size);
197
}
198 199 200 201

static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
202
	kasan_check_read(src, size);
203
	return __copy_to_user_nocheck(dst, src, size);
204
}
L
Linus Torvalds 已提交
205

206 207
extern long __copy_user_nocache(void *dst, const void __user *src,
				unsigned size, int zerorest);
208

209 210 211
static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
				  unsigned size)
212
{
213
	kasan_check_write(dst, size);
214
	return __copy_user_nocache(dst, src, size, 0);
215 216
}

217
unsigned long
218
copy_user_handle_tail(char *to, char *from, unsigned len);
219

H
H. Peter Anvin 已提交
220
#endif /* _ASM_X86_UACCESS_64_H */