uaccess_32.h 1.5 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_UACCESS_32_H
#define _ASM_X86_UACCESS_32_H
L
Linus Torvalds 已提交
3 4 5 6 7

/*
 * User space memory access functions
 */
#include <linux/string.h>
8
#include <asm/asm.h>
L
Linus Torvalds 已提交
9 10
#include <asm/page.h>

A
Al Viro 已提交
11 12
unsigned long __must_check __copy_user_ll
		(void *to, const void *from, unsigned long n);
13 14
unsigned long __must_check __copy_from_user_ll_nocache_nozero
		(void *to, const void __user *from, unsigned long n);
L
Linus Torvalds 已提交
15

16
static __always_inline unsigned long __must_check
A
Al Viro 已提交
17
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
L
Linus Torvalds 已提交
18
{
A
Al Viro 已提交
19
	return __copy_user_ll((__force void *)to, from, n);
L
Linus Torvalds 已提交
20 21
}

22
static __always_inline unsigned long
A
Al Viro 已提交
23
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
24
{
L
Linus Torvalds 已提交
25 26 27 28 29
	if (__builtin_constant_p(n)) {
		unsigned long ret;

		switch (n) {
		case 1:
A
Al Viro 已提交
30
			ret = 0;
31
			__uaccess_begin();
A
Al Viro 已提交
32 33
			__get_user_asm_nozero(*(u8 *)to, from, ret,
					      "b", "b", "=q", 1);
34
			__uaccess_end();
L
Linus Torvalds 已提交
35 36
			return ret;
		case 2:
A
Al Viro 已提交
37
			ret = 0;
38
			__uaccess_begin();
A
Al Viro 已提交
39 40
			__get_user_asm_nozero(*(u16 *)to, from, ret,
					      "w", "w", "=r", 2);
41
			__uaccess_end();
L
Linus Torvalds 已提交
42 43
			return ret;
		case 4:
A
Al Viro 已提交
44
			ret = 0;
45
			__uaccess_begin();
A
Al Viro 已提交
46 47
			__get_user_asm_nozero(*(u32 *)to, from, ret,
					      "l", "k", "=r", 4);
48
			__uaccess_end();
L
Linus Torvalds 已提交
49 50 51
			return ret;
		}
	}
A
Al Viro 已提交
52
	return __copy_user_ll(to, (__force const void *)from, n);
L
Linus Torvalds 已提交
53 54
}

55
static __always_inline unsigned long
56 57
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
				  unsigned long n)
L
Linus Torvalds 已提交
58
{
59
       return __copy_from_user_ll_nocache_nozero(to, from, n);
60 61
}

H
H. Peter Anvin 已提交
62
#endif /* _ASM_X86_UACCESS_32_H */