提交 7a3d9b0f 编写于 作者: J Jan Beulich 提交者: Ingo Molnar

x86: Unify copy_to_user() and add size checking to it

Similarly to copy_from_user(), where the range check is to
protect against kernel memory corruption, copy_to_user() can
benefit from such checking too: Here it protects against kernel
information leaks.
Signed-off-by: NJan Beulich <jbeulich@suse.com>
Cc: <arjan@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/5265059502000078000FC4F6@nat28.tlf.novell.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
上级 3df7b41a
...@@ -544,6 +544,8 @@ extern struct movsl_mask { ...@@ -544,6 +544,8 @@ extern struct movsl_mask {
unsigned long __must_check _copy_from_user(void *to, const void __user *from, unsigned long __must_check _copy_from_user(void *to, const void __user *from,
unsigned n); unsigned n);
unsigned long __must_check _copy_to_user(void __user *to, const void *from,
unsigned n);
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
# define copy_user_diag __compiletime_error # define copy_user_diag __compiletime_error
...@@ -553,6 +555,8 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from, ...@@ -553,6 +555,8 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from,
extern void copy_user_diag("copy_from_user() buffer size is too small") extern void copy_user_diag("copy_from_user() buffer size is too small")
copy_from_user_overflow(void); copy_from_user_overflow(void);
extern void copy_user_diag("copy_to_user() buffer size is too small")
copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
#undef copy_user_diag #undef copy_user_diag
...@@ -563,6 +567,11 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct") ...@@ -563,6 +567,11 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
__copy_from_user_overflow(void) __asm__("copy_from_user_overflow"); __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
#define __copy_from_user_overflow(size, count) __copy_from_user_overflow() #define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
extern void
__compiletime_warning("copy_to_user() buffer size is not provably correct")
__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
#else #else
static inline void static inline void
...@@ -571,6 +580,8 @@ __copy_from_user_overflow(int size, unsigned long count) ...@@ -571,6 +580,8 @@ __copy_from_user_overflow(int size, unsigned long count)
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
} }
#define __copy_to_user_overflow __copy_from_user_overflow
#endif #endif
static inline unsigned long __must_check static inline unsigned long __must_check
...@@ -608,7 +619,26 @@ copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -608,7 +619,26 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
return n; return n;
} }
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
might_fault();
/* See the comment in copy_from_user() above. */
if (likely(sz < 0 || sz >= n))
n = _copy_to_user(to, from, n);
else if(__builtin_constant_p(n))
copy_to_user_overflow();
else
__copy_to_user_overflow(sz, n);
return n;
}
#undef __copy_from_user_overflow #undef __copy_from_user_overflow
#undef __copy_to_user_overflow
#endif /* _ASM_X86_UACCESS_H */ #endif /* _ASM_X86_UACCESS_H */
...@@ -184,7 +184,4 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from, ...@@ -184,7 +184,4 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
return __copy_from_user_ll_nocache_nozero(to, from, n); return __copy_from_user_ll_nocache_nozero(to, from, n);
} }
unsigned long __must_check copy_to_user(void __user *to,
const void *from, unsigned long n);
#endif /* _ASM_X86_UACCESS_32_H */ #endif /* _ASM_X86_UACCESS_32_H */
...@@ -45,19 +45,9 @@ copy_user_generic(void *to, const void *from, unsigned len) ...@@ -45,19 +45,9 @@ copy_user_generic(void *to, const void *from, unsigned len)
return ret; return ret;
} }
__must_check unsigned long
_copy_to_user(void __user *to, const void *from, unsigned len);
__must_check unsigned long __must_check unsigned long
copy_in_user(void __user *to, const void __user *from, unsigned len); copy_in_user(void __user *to, const void __user *from, unsigned len);
static __always_inline __must_check
int copy_to_user(void __user *dst, const void *src, unsigned size)
{
might_fault();
return _copy_to_user(dst, src, size);
}
static __always_inline __must_check static __always_inline __must_check
int __copy_from_user(void *dst, const void __user *src, unsigned size) int __copy_from_user(void *dst, const void __user *src, unsigned size)
{ {
......
...@@ -654,14 +654,13 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); ...@@ -654,14 +654,13 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
* Returns number of bytes that could not be copied. * Returns number of bytes that could not be copied.
* On success, this will be zero. * On success, this will be zero.
*/ */
unsigned long unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
if (access_ok(VERIFY_WRITE, to, n)) if (access_ok(VERIFY_WRITE, to, n))
n = __copy_to_user(to, from, n); n = __copy_to_user(to, from, n);
return n; return n;
} }
EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(_copy_to_user);
/** /**
* copy_from_user: - Copy a block of data from user space. * copy_from_user: - Copy a block of data from user space.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册