uaccess_32.h 5.5 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_UACCESS_32_H
#define _ASM_X86_UACCESS_32_H
L
Linus Torvalds 已提交
3 4 5 6 7 8 9

/*
 * User space memory access functions
 */
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/string.h>
10
#include <asm/asm.h>
L
Linus Torvalds 已提交
11 12
#include <asm/page.h>

13 14 15 16 17 18 19 20 21 22
unsigned long __must_check __copy_to_user_ll
		(void __user *to, const void *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll
		(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nozero
		(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache
		(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache_nozero
		(void *to, const void __user *from, unsigned long n);
L
Linus Torvalds 已提交
23

24 25 26 27 28 29 30 31 32 33 34
/**
 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.
 *
 * Copy data from kernel space to user space.  Caller must check
 * the specified block with access_ok() before calling this function.
 * The caller should also make sure he pins the user space address
S
Sergey Senozhatsky 已提交
35
 * so that we don't result in page fault and sleep.
36
 *
L
Linus Torvalds 已提交
37 38 39 40 41 42
 * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
 * If a store crosses a page boundary and gets a fault, the x86 will not write
 * anything, so this is accurate.
 */

43
static __always_inline unsigned long __must_check
L
Linus Torvalds 已提交
44 45 46 47 48 49 50
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
	if (__builtin_constant_p(n)) {
		unsigned long ret;

		switch (n) {
		case 1:
51 52
			__put_user_size(*(u8 *)from, (u8 __user *)to,
					1, ret, 1);
L
Linus Torvalds 已提交
53 54
			return ret;
		case 2:
55 56
			__put_user_size(*(u16 *)from, (u16 __user *)to,
					2, ret, 2);
L
Linus Torvalds 已提交
57 58
			return ret;
		case 4:
59 60
			__put_user_size(*(u32 *)from, (u32 __user *)to,
					4, ret, 4);
L
Linus Torvalds 已提交
61
			return ret;
62 63 64 65
		case 8:
			__put_user_size(*(u64 *)from, (u64 __user *)to,
					8, ret, 8);
			return ret;
L
Linus Torvalds 已提交
66 67 68 69 70 71
		}
	}
	return __copy_to_user_ll(to, from, n);
}

/**
72 73 74
 * __copy_to_user: - Copy a block of data into user space, with less checking.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
L
Linus Torvalds 已提交
75 76 77 78
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.  This function may sleep.
 *
79
 * Copy data from kernel space to user space.  Caller must check
L
Linus Torvalds 已提交
80 81 82 83 84
 * the specified block with access_ok() before calling this function.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 */
85 86 87
static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
88
	might_fault();
89
	return __copy_to_user_inatomic(to, from, n);
90 91
}

92
static __always_inline unsigned long
L
Linus Torvalds 已提交
93 94
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
	/* Avoid zeroing the tail if the copy fails..
	 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
	 * but as the zeroing behaviour is only significant when n is not
	 * constant, that shouldn't be a problem.
	 */
	if (__builtin_constant_p(n)) {
		unsigned long ret;

		switch (n) {
		case 1:
			__get_user_size(*(u8 *)to, from, 1, ret, 1);
			return ret;
		case 2:
			__get_user_size(*(u16 *)to, from, 2, ret, 2);
			return ret;
		case 4:
			__get_user_size(*(u32 *)to, from, 4, ret, 4);
			return ret;
		}
	}
	return __copy_from_user_ll_nozero(to, from, n);
}
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139

/**
 * __copy_from_user: - Copy a block of data from user space, with less checking.
 * @to:   Destination address, in kernel space.
 * @from: Source address, in user space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.  This function may sleep.
 *
 * Copy data from user space to kernel space.  Caller must check
 * the specified block with access_ok() before calling this function.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 *
 * If some data could not be copied, this function will pad the copied
 * data to the requested size using zero bytes.
 *
 * An alternate version - __copy_from_user_inatomic() - may be called from
 * atomic context and will fail rather than sleep.  In this case the
 * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
 * for explanation of why this is needed.
 */
140 141 142
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
143
	might_fault();
L
Linus Torvalds 已提交
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
	if (__builtin_constant_p(n)) {
		unsigned long ret;

		switch (n) {
		case 1:
			__get_user_size(*(u8 *)to, from, 1, ret, 1);
			return ret;
		case 2:
			__get_user_size(*(u16 *)to, from, 2, ret, 2);
			return ret;
		case 4:
			__get_user_size(*(u32 *)to, from, 4, ret, 4);
			return ret;
		}
	}
	return __copy_from_user_ll(to, from, n);
}

162
static __always_inline unsigned long __copy_from_user_nocache(void *to,
163 164
				const void __user *from, unsigned long n)
{
165
	might_fault();
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
	if (__builtin_constant_p(n)) {
		unsigned long ret;

		switch (n) {
		case 1:
			__get_user_size(*(u8 *)to, from, 1, ret, 1);
			return ret;
		case 2:
			__get_user_size(*(u16 *)to, from, 2, ret, 2);
			return ret;
		case 4:
			__get_user_size(*(u32 *)to, from, 4, ret, 4);
			return ret;
		}
	}
	return __copy_from_user_ll_nocache(to, from, n);
}

184
static __always_inline unsigned long
185 186
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
				  unsigned long n)
L
Linus Torvalds 已提交
187
{
188
       return __copy_from_user_ll_nocache_nozero(to, from, n);
189 190
}

H
H. Peter Anvin 已提交
191
#endif /* _ASM_X86_UACCESS_32_H */