uaccess_64.h 5.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
H
H. Peter Anvin 已提交
2 3
#ifndef _ASM_X86_UACCESS_64_H
#define _ASM_X86_UACCESS_64_H
L
Linus Torvalds 已提交
4 5 6 7 8

/*
 * User space memory access functions
 */
#include <linux/compiler.h>
9
#include <linux/lockdep.h>
10
#include <linux/kasan-checks.h>
11
#include <asm/alternative.h>
12
#include <asm/cpufeatures.h>
L
Linus Torvalds 已提交
13 14 15 16 17 18 19
#include <asm/page.h>

/*
 * Copy To/From Userspace
 */

/* Handles exceptions in both to and from, but doesn't do access_ok */
20
__must_check unsigned long
21 22
copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
__must_check unsigned long
23 24 25 26 27 28 29 30 31
copy_user_generic_string(void *to, const void *from, unsigned len);
__must_check unsigned long
copy_user_generic_unrolled(void *to, const void *from, unsigned len);

static __always_inline __must_check unsigned long
copy_user_generic(void *to, const void *from, unsigned len)
{
	unsigned ret;

32 33 34 35 36 37
	/*
	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
	 * Otherwise, use copy_user_generic_unrolled.
	 */
	alternative_call_2(copy_user_generic_unrolled,
38 39
			 copy_user_generic_string,
			 X86_FEATURE_REP_GOOD,
40 41
			 copy_user_enhanced_fast_string,
			 X86_FEATURE_ERMS,
42 43 44 45 46 47
			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
				     "=d" (len)),
			 "1" (to), "2" (from), "3" (len)
			 : "memory", "rcx", "r8", "r9", "r10", "r11");
	return ret;
}
48

A
Al Viro 已提交
49 50
static __always_inline __must_check unsigned long
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
51
{
52
	int ret = 0;
53

L
Linus Torvalds 已提交
54
	if (!__builtin_constant_p(size))
55 56
		return copy_user_generic(dst, (__force void *)src, size);
	switch (size) {
57
	case 1:
58
		__uaccess_begin_nospec();
A
Al Viro 已提交
59
		__get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
60
			      ret, "b", "b", "=q", 1);
61
		__uaccess_end();
L
Linus Torvalds 已提交
62
		return ret;
63
	case 2:
64
		__uaccess_begin_nospec();
A
Al Viro 已提交
65
		__get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
66
			      ret, "w", "w", "=r", 2);
67
		__uaccess_end();
L
Linus Torvalds 已提交
68
		return ret;
69
	case 4:
70
		__uaccess_begin_nospec();
A
Al Viro 已提交
71
		__get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
72
			      ret, "l", "k", "=r", 4);
73
		__uaccess_end();
74
		return ret;
75
	case 8:
76
		__uaccess_begin_nospec();
A
Al Viro 已提交
77
		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
78
			      ret, "q", "", "=r", 8);
79
		__uaccess_end();
L
Linus Torvalds 已提交
80 81
		return ret;
	case 10:
82
		__uaccess_begin_nospec();
A
Al Viro 已提交
83
		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
84
			       ret, "q", "", "=r", 10);
85
		if (likely(!ret))
A
Al Viro 已提交
86
			__get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
87 88 89
				       (u16 __user *)(8 + (char __user *)src),
				       ret, "w", "w", "=r", 2);
		__uaccess_end();
90
		return ret;
L
Linus Torvalds 已提交
91
	case 16:
92
		__uaccess_begin_nospec();
A
Al Viro 已提交
93
		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
94
			       ret, "q", "", "=r", 16);
95
		if (likely(!ret))
A
Al Viro 已提交
96
			__get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
97 98 99
				       (u64 __user *)(8 + (char __user *)src),
				       ret, "q", "", "=r", 8);
		__uaccess_end();
100
		return ret;
L
Linus Torvalds 已提交
101
	default:
102
		return copy_user_generic(dst, (__force void *)src, size);
L
Linus Torvalds 已提交
103
	}
104
}
L
Linus Torvalds 已提交
105

A
Al Viro 已提交
106 107
static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
108
{
109
	int ret = 0;
110

L
Linus Torvalds 已提交
111
	if (!__builtin_constant_p(size))
112 113
		return copy_user_generic((__force void *)dst, src, size);
	switch (size) {
114 115 116
	case 1:
		__uaccess_begin();
		__put_user_asm(*(u8 *)src, (u8 __user *)dst,
117
			      ret, "b", "b", "iq", 1);
118
		__uaccess_end();
L
Linus Torvalds 已提交
119
		return ret;
120 121 122
	case 2:
		__uaccess_begin();
		__put_user_asm(*(u16 *)src, (u16 __user *)dst,
123
			      ret, "w", "w", "ir", 2);
124
		__uaccess_end();
L
Linus Torvalds 已提交
125
		return ret;
126 127 128
	case 4:
		__uaccess_begin();
		__put_user_asm(*(u32 *)src, (u32 __user *)dst,
129
			      ret, "l", "k", "ir", 4);
130
		__uaccess_end();
131
		return ret;
132 133 134
	case 8:
		__uaccess_begin();
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
135
			      ret, "q", "", "er", 8);
136
		__uaccess_end();
L
Linus Torvalds 已提交
137 138
		return ret;
	case 10:
139
		__uaccess_begin();
140
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
141
			       ret, "q", "", "er", 10);
142 143 144 145 146 147
		if (likely(!ret)) {
			asm("":::"memory");
			__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
				       ret, "w", "w", "ir", 2);
		}
		__uaccess_end();
148
		return ret;
L
Linus Torvalds 已提交
149
	case 16:
150
		__uaccess_begin();
151
		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
152
			       ret, "q", "", "er", 16);
153 154 155 156 157 158
		if (likely(!ret)) {
			asm("":::"memory");
			__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
				       ret, "q", "", "er", 8);
		}
		__uaccess_end();
159
		return ret;
L
Linus Torvalds 已提交
160
	default:
161
		return copy_user_generic((__force void *)dst, src, size);
L
Linus Torvalds 已提交
162
	}
163
}
L
Linus Torvalds 已提交
164

165
static __always_inline __must_check
A
Al Viro 已提交
166
unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
167
{
168 169
	return copy_user_generic((__force void *)dst,
				 (__force void *)src, size);
170
}
L
Linus Torvalds 已提交
171

172 173
extern long __copy_user_nocache(void *dst, const void __user *src,
				unsigned size, int zerorest);
174

175 176 177 178
extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
			   size_t len);

179 180 181
static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
				  unsigned size)
182
{
183
	kasan_check_write(dst, size);
184
	return __copy_user_nocache(dst, src, size, 0);
185 186
}

187 188 189 190 191 192 193
static inline int
__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
{
	kasan_check_write(dst, size);
	return __copy_user_flushcache(dst, src, size);
}

194
unsigned long
195
copy_user_handle_tail(char *to, char *from, unsigned len);
196

H
H. Peter Anvin 已提交
197
#endif /* _ASM_X86_UACCESS_64_H */