uaccess_64.h 5.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
H
H. Peter Anvin 已提交
2 3
#ifndef _ASM_X86_UACCESS_64_H
#define _ASM_X86_UACCESS_64_H
L
Linus Torvalds 已提交
4 5 6 7 8

/*
 * User space memory access functions
 */
#include <linux/compiler.h>
9
#include <linux/lockdep.h>
10
#include <linux/kasan-checks.h>
11
#include <asm/alternative.h>
12
#include <asm/cpufeatures.h>
L
Linus Torvalds 已提交
13 14 15 16 17 18 19
#include <asm/page.h>

/*
 * Copy To/From Userspace
 */

/* Handles exceptions in both to and from, but doesn't do access_ok */
20
__must_check unsigned long
21 22
copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
__must_check unsigned long
23 24 25 26 27 28 29 30 31
copy_user_generic_string(void *to, const void *from, unsigned len);
__must_check unsigned long
copy_user_generic_unrolled(void *to, const void *from, unsigned len);

static __always_inline __must_check unsigned long
copy_user_generic(void *to, const void *from, unsigned len)
{
	unsigned ret;

32 33 34 35 36 37
	/*
	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
	 * Otherwise, use copy_user_generic_unrolled.
	 */
	alternative_call_2(copy_user_generic_unrolled,
38 39
			 copy_user_generic_string,
			 X86_FEATURE_REP_GOOD,
40 41
			 copy_user_enhanced_fast_string,
			 X86_FEATURE_ERMS,
42 43 44 45 46 47
			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
				     "=d" (len)),
			 "1" (to), "2" (from), "3" (len)
			 : "memory", "rcx", "r8", "r9", "r10", "r11");
	return ret;
}
48

A
Al Viro 已提交
49
static __always_inline __must_check unsigned long
50 51 52 53 54
copy_to_user_mcsafe(void *to, const void *from, unsigned len)
{
	unsigned long ret;

	__uaccess_begin();
55 56 57 58 59 60
	/*
	 * Note, __memcpy_mcsafe() is explicitly used since it can
	 * handle exceptions / faults.  memcpy_mcsafe() may fall back to
	 * memcpy() which lacks this handling.
	 */
	ret = __memcpy_mcsafe(to, from, len);
61 62 63 64 65
	__uaccess_end();
	return ret;
}

static __always_inline __must_check unsigned long
A
Al Viro 已提交
66
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
67
{
68
	int ret = 0;
69

L
Linus Torvalds 已提交
70
	if (!__builtin_constant_p(size))
71 72
		return copy_user_generic(dst, (__force void *)src, size);
	switch (size) {
73
	case 1:
74
		__uaccess_begin_nospec();
A
Al Viro 已提交
75
		__get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
76
			      ret, "b", "b", "=q", 1);
77
		__uaccess_end();
L
Linus Torvalds 已提交
78
		return ret;
79
	case 2:
80
		__uaccess_begin_nospec();
A
Al Viro 已提交
81
		__get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
82
			      ret, "w", "w", "=r", 2);
83
		__uaccess_end();
L
Linus Torvalds 已提交
84
		return ret;
85
	case 4:
86
		__uaccess_begin_nospec();
A
Al Viro 已提交
87
		__get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
88
			      ret, "l", "k", "=r", 4);
89
		__uaccess_end();
90
		return ret;
91
	case 8:
92
		__uaccess_begin_nospec();
A
Al Viro 已提交
93
		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
94
			      ret, "q", "", "=r", 8);
95
		__uaccess_end();
L
Linus Torvalds 已提交
96 97
		return ret;
	case 10:
98
		__uaccess_begin_nospec();
A
Al Viro 已提交
99
		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
100
			       ret, "q", "", "=r", 10);
101
		if (likely(!ret))
A
Al Viro 已提交
102
			__get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
103 104 105
				       (u16 __user *)(8 + (char __user *)src),
				       ret, "w", "w", "=r", 2);
		__uaccess_end();
106
		return ret;
L
Linus Torvalds 已提交
107
	case 16:
108
		__uaccess_begin_nospec();
A
Al Viro 已提交
109
		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
110
			       ret, "q", "", "=r", 16);
111
		if (likely(!ret))
A
Al Viro 已提交
112
			__get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
113 114 115
				       (u64 __user *)(8 + (char __user *)src),
				       ret, "q", "", "=r", 8);
		__uaccess_end();
116
		return ret;
L
Linus Torvalds 已提交
117
	default:
118
		return copy_user_generic(dst, (__force void *)src, size);
L
Linus Torvalds 已提交
119
	}
120
}
L
Linus Torvalds 已提交
121

A
Al Viro 已提交
122 123
static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
124
{
125
	int ret = 0;
126

L
Linus Torvalds 已提交
127
	if (!__builtin_constant_p(size))
128 129
		return copy_user_generic((__force void *)dst, src, size);
	switch (size) {
130 131 132
	case 1:
		__uaccess_begin();
		__put_user_asm(*(u8 *)src, (u8 __user *)dst,
133
			      ret, "b", "b", "iq", 1);
134
		__uaccess_end();
L
Linus Torvalds 已提交
135
		return ret;
136 137 138
	case 2:
		__uaccess_begin();
		__put_user_asm(*(u16 *)src, (u16 __user *)dst,
139
			      ret, "w", "w", "ir", 2);
140
		__uaccess_end();
L
Linus Torvalds 已提交
141
		return ret;
142 143 144
	case 4:
		__uaccess_begin();
		__put_user_asm(*(u32 *)src, (u32 __user *)dst,
145
			      ret, "l", "k", "ir", 4);
146
		__uaccess_end();
147
		return ret;
148 149 150
	case 8:
		__uaccess_begin();
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
151
			      ret, "q", "", "er", 8);
152
		__uaccess_end();
L
Linus Torvalds 已提交
153 154
		return ret;
	case 10:
155
		__uaccess_begin();
156
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
157
			       ret, "q", "", "er", 10);
158 159 160 161 162 163
		if (likely(!ret)) {
			asm("":::"memory");
			__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
				       ret, "w", "w", "ir", 2);
		}
		__uaccess_end();
164
		return ret;
L
Linus Torvalds 已提交
165
	case 16:
166
		__uaccess_begin();
167
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
168
			       ret, "q", "", "er", 16);
169 170 171 172 173 174
		if (likely(!ret)) {
			asm("":::"memory");
			__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
				       ret, "q", "", "er", 8);
		}
		__uaccess_end();
175
		return ret;
L
Linus Torvalds 已提交
176
	default:
177
		return copy_user_generic((__force void *)dst, src, size);
L
Linus Torvalds 已提交
178
	}
179
}
L
Linus Torvalds 已提交
180

181
static __always_inline __must_check
A
Al Viro 已提交
182
unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
183
{
184 185
	return copy_user_generic((__force void *)dst,
				 (__force void *)src, size);
186
}
L
Linus Torvalds 已提交
187

188 189
extern long __copy_user_nocache(void *dst, const void __user *src,
				unsigned size, int zerorest);
190

191 192 193 194
extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
			   size_t len);

195 196 197
static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
				  unsigned size)
198
{
199
	kasan_check_write(dst, size);
200
	return __copy_user_nocache(dst, src, size, 0);
201 202
}

203 204 205 206 207 208 209
static inline int
__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
{
	kasan_check_write(dst, size);
	return __copy_user_flushcache(dst, src, size);
}

210 211 212
unsigned long
mcsafe_handle_tail(char *to, char *from, unsigned len);

H
H. Peter Anvin 已提交
213
#endif /* _ASM_X86_UACCESS_64_H */