uaccess.h 9.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Based on arch/arm/include/asm/uaccess.h
 *
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#ifndef __ASM_UACCESS_H
#define __ASM_UACCESS_H

/*
 * User space memory access functions
 */
24
#include <linux/kasan-checks.h>
25 26 27
#include <linux/string.h>
#include <linux/thread_info.h>

28 29
#include <asm/alternative.h>
#include <asm/cpufeature.h>
30
#include <asm/ptrace.h>
31
#include <asm/sysreg.h>
32 33 34 35 36 37 38 39
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>

#define VERIFY_READ 0
#define VERIFY_WRITE 1

/*
40 41 42 43 44
 * The exception table consists of pairs of relative offsets: the first
 * is the relative offset to an instruction that is allowed to fault,
 * and the second is the relative offset at which the program should
 * continue. No registers are modified, so it is entirely up to the
 * continuation code to figure out what to do.
45 46 47 48 49 50 51 52 53
 *
 * All the routines below use bits of fixup code that are out of line
 * with the main instruction path.  This means when everything is well,
 * we don't even have to jump over them.  Further, they do not intrude
 * on our cache or tlb entries.
 */

struct exception_table_entry
{
54
	int insn, fixup;
55 56
};

57 58
#define ARCH_HAS_RELATIVE_EXTABLE

59 60 61 62 63 64 65 66 67 68 69
extern int fixup_exception(struct pt_regs *regs);

#define KERNEL_DS	(-1UL)
#define get_ds()	(KERNEL_DS)

#define USER_DS		TASK_SIZE_64
#define get_fs()	(current_thread_info()->addr_limit)

static inline void set_fs(mm_segment_t fs)
{
	current_thread_info()->addr_limit = fs;
70 71 72 73 74 75 76 77 78 79

	/*
	 * Enable/disable UAO so that copy_to_user() etc can access
	 * kernel memory with the unprivileged instructions.
	 */
	if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
		asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
	else
		asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
				CONFIG_ARM64_UAO));
80 81
}

82
#define segment_eq(a, b)	((a) == (b))
83 84 85 86 87 88

/*
 * Test whether a block of memory is a valid user space address.
 * Returns 1 if the range is valid, 0 otherwise.
 *
 * This is equivalent to the following test:
89
 * (u65)addr + (u65)size <= current->addr_limit
90 91 92 93 94 95 96
 *
 * This needs 65-bit arithmetic.
 */
#define __range_ok(addr, size)						\
({									\
	unsigned long flag, roksum;					\
	__chk_user_ptr(addr);						\
97
	asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"		\
98 99 100 101 102 103 104 105
		: "=&r" (flag), "=&r" (roksum)				\
		: "1" (addr), "Ir" (size),				\
		  "r" (current_thread_info()->addr_limit)		\
		: "cc");						\
	flag;								\
})

#define access_ok(type, addr, size)	__range_ok(addr, size)
106
#define user_addr_max			get_fs
107

108 109 110 111 112 113
#define _ASM_EXTABLE(from, to)						\
	"	.pushsection	__ex_table, \"a\"\n"			\
	"	.align		3\n"					\
	"	.long		(" #from " - .), (" #to " - .)\n"	\
	"	.popsection\n"

114 115 116 117 118 119 120 121
/*
 * The "__xxx" versions of the user access functions do not verify the address
 * space - it must have been done previously with a separate "access_ok()"
 * call.
 *
 * The "__xxx_error" versions set the third argument to -EFAULT if an error
 * occurs, and leave it unchanged on success.
 */
122
#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature)	\
123
	asm volatile(							\
124 125
	"1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",			\
			alt_instr " " reg "1, [%2]\n", feature)		\
126 127 128 129 130 131 132
	"2:\n"								\
	"	.section .fixup, \"ax\"\n"				\
	"	.align	2\n"						\
	"3:	mov	%w0, %3\n"					\
	"	mov	%1, #0\n"					\
	"	b	2b\n"						\
	"	.previous\n"						\
133
	_ASM_EXTABLE(1b, 3b)						\
134 135 136 137 138 139 140
	: "+r" (err), "=&r" (x)						\
	: "r" (addr), "i" (-EFAULT))

#define __get_user_err(x, ptr, err)					\
do {									\
	unsigned long __gu_val;						\
	__chk_user_ptr(ptr);						\
141
	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
142
			CONFIG_ARM64_PAN));				\
143 144
	switch (sizeof(*(ptr))) {					\
	case 1:								\
145 146
		__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr),  \
			       (err), ARM64_HAS_UAO);			\
147 148
		break;							\
	case 2:								\
149 150
		__get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr),  \
			       (err), ARM64_HAS_UAO);			\
151 152
		break;							\
	case 4:								\
153 154
		__get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr),	\
			       (err), ARM64_HAS_UAO);			\
155 156
		break;							\
	case 8:								\
157 158
		__get_user_asm("ldr", "ldtr", "%",  __gu_val, (ptr),	\
			       (err), ARM64_HAS_UAO);			\
159 160 161 162
		break;							\
	default:							\
		BUILD_BUG();						\
	}								\
163
	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
164
	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
165
			CONFIG_ARM64_PAN));				\
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
} while (0)

#define __get_user(x, ptr)						\
({									\
	int __gu_err = 0;						\
	__get_user_err((x), (ptr), __gu_err);				\
	__gu_err;							\
})

#define __get_user_error(x, ptr, err)					\
({									\
	__get_user_err((x), (ptr), (err));				\
	(void)0;							\
})

#define __get_user_unaligned __get_user

#define get_user(x, ptr)						\
({									\
185
	__typeof__(*(ptr)) __user *__p = (ptr);				\
186
	might_fault();							\
187 188
	access_ok(VERIFY_READ, __p, sizeof(*__p)) ?			\
		__get_user((x), __p) :					\
189 190 191
		((x) = 0, -EFAULT);					\
})

192
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature)	\
193
	asm volatile(							\
194 195
	"1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",			\
			alt_instr " " reg "1, [%2]\n", feature)		\
196 197 198 199 200 201
	"2:\n"								\
	"	.section .fixup,\"ax\"\n"				\
	"	.align	2\n"						\
	"3:	mov	%w0, %3\n"					\
	"	b	2b\n"						\
	"	.previous\n"						\
202
	_ASM_EXTABLE(1b, 3b)						\
203 204 205 206 207 208 209
	: "+r" (err)							\
	: "r" (x), "r" (addr), "i" (-EFAULT))

#define __put_user_err(x, ptr, err)					\
do {									\
	__typeof__(*(ptr)) __pu_val = (x);				\
	__chk_user_ptr(ptr);						\
210
	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
211
			CONFIG_ARM64_PAN));				\
212 213
	switch (sizeof(*(ptr))) {					\
	case 1:								\
214 215
		__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr),	\
			       (err), ARM64_HAS_UAO);			\
216 217
		break;							\
	case 2:								\
218 219
		__put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr),	\
			       (err), ARM64_HAS_UAO);			\
220 221
		break;							\
	case 4:								\
222 223
		__put_user_asm("str", "sttr", "%w", __pu_val, (ptr),	\
			       (err), ARM64_HAS_UAO);			\
224 225
		break;							\
	case 8:								\
226 227
		__put_user_asm("str", "sttr", "%", __pu_val, (ptr),	\
			       (err), ARM64_HAS_UAO);			\
228 229 230 231
		break;							\
	default:							\
		BUILD_BUG();						\
	}								\
232
	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
233
			CONFIG_ARM64_PAN));				\
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
} while (0)

#define __put_user(x, ptr)						\
({									\
	int __pu_err = 0;						\
	__put_user_err((x), (ptr), __pu_err);				\
	__pu_err;							\
})

#define __put_user_error(x, ptr, err)					\
({									\
	__put_user_err((x), (ptr), (err));				\
	(void)0;							\
})

#define __put_user_unaligned __put_user

#define put_user(x, ptr)						\
({									\
253
	__typeof__(*(ptr)) __user *__p = (ptr);				\
254
	might_fault();							\
255 256
	access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ?			\
		__put_user((x), __p) :					\
257 258 259
		-EFAULT;						\
})

260 261
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
262 263 264
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);

265 266 267
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
{
	kasan_check_write(to, n);
268 269
	check_object_size(to, n, false);
	return __arch_copy_from_user(to, from, n);
270 271 272 273 274
}

static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
{
	kasan_check_read(from, n);
275 276
	check_object_size(from, n, true);
	return __arch_copy_to_user(to, from, n);
277 278
}

279 280
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
281
	unsigned long res = n;
282 283
	kasan_check_write(to, n);

284 285
	if (access_ok(VERIFY_READ, from, n)) {
		check_object_size(to, n, false);
286 287 288 289 290
		res = __arch_copy_from_user(to, from, n);
	}
	if (unlikely(res))
		memset(to + (n - res), 0, res);
	return res;
291 292 293 294
}

static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
295 296
	kasan_check_read(from, n);

297 298
	if (access_ok(VERIFY_WRITE, to, n)) {
		check_object_size(from, n, true);
299
		n = __arch_copy_to_user(to, from, n);
300
	}
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	return n;
}

static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
	if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
		n = __copy_in_user(to, from, n);
	return n;
}

#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user

static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
	if (access_ok(VERIFY_WRITE, to, n))
		n = __clear_user(to, n);
	return n;
}

321
extern long strncpy_from_user(char *dest, const char __user *src, long count);
322

323 324
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
325 326

#endif /* __ASM_UACCESS_H */