提交 37096003 编写于 作者: A Al Viro

s390: get rid of zeroing, switch to RAW_COPY_USER

[folded a fix from Martin]
Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
上级 e70f1d59
......@@ -178,6 +178,7 @@ config S390
select ARCH_HAS_SCALED_CPUTIME
select VIRT_TO_BUS
select HAVE_NMI
select ARCH_HAS_RAW_COPY_USER
config SCHED_OMIT_FRAME_POINTER
......
......@@ -60,47 +60,14 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
#define access_ok(type, addr, size) __access_ok(addr, size)
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
unsigned long __must_check __copy_from_user(void *to, const void __user *from,
unsigned long n);
unsigned long __must_check
raw_copy_from_user(void *to, const void __user *from, unsigned long n);
/**
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
unsigned long __must_check __copy_to_user(void __user *to, const void *from,
unsigned long n);
unsigned long __must_check
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
......@@ -189,13 +156,13 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s
static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
size = __copy_to_user(ptr, x, size);
size = raw_copy_to_user(ptr, x, size);
return size ? -EFAULT : 0;
}
static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
size = __copy_from_user(x, ptr, size);
size = raw_copy_from_user(x, ptr, size);
return size ? -EFAULT : 0;
}
......@@ -285,77 +252,8 @@ int __get_user_bad(void) __attribute__((noreturn));
#define __put_user_unaligned __put_user
#define __get_user_unaligned __get_user
extern void __compiletime_error("usercopy buffer size is too small")
__bad_copy_user(void);
static inline void copy_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
return __copy_to_user(to, from, n);
}
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned int sz = __compiletime_object_size(to);
might_fault();
if (unlikely(sz != -1 && sz < n)) {
if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
}
return __copy_from_user(to, from, n);
}
unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_fault();
return __copy_in_user(to, from, n);
}
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
/*
* Copy a null terminated string from userspace.
......
......@@ -26,7 +26,7 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
tmp1 = -4096UL;
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
"9: jz 7f\n"
"6: jz 4f\n"
"1: algr %0,%3\n"
" slgr %1,%3\n"
" slgr %2,%3\n"
......@@ -35,23 +35,13 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
" slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 4f\n"
" jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
"10:slgr %0,%4\n"
" algr %2,%4\n"
"4: lghi %4,-1\n"
" algr %4,%0\n" /* copy remaining size, subtract 1 */
" bras %3,6f\n" /* memset loop */
" xc 0(1,%2),0(%2)\n"
"5: xc 0(256,%2),0(%2)\n"
" la %2,256(%2)\n"
"6: aghi %4,-256\n"
" jnm 5b\n"
" ex %4,0(%3)\n"
" j 8f\n"
"7: slgr %0,%0\n"
"8:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
"7: slgr %0,%4\n"
" j 5f\n"
"4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: "d" (reg0) : "cc", "memory");
return size;
......@@ -67,49 +57,38 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
asm volatile(
" sacf 0\n"
"0: mvcp 0(%0,%2),0(%1),%3\n"
"10:jz 8f\n"
"7: jz 5f\n"
"1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcp 0(%0,%2),0(%1),%3\n"
"11:jnz 1b\n"
" j 8f\n"
"8: jnz 1b\n"
" j 5f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
" lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
" slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 5f\n"
" jnh 6f\n"
"4: mvcp 0(%4,%2),0(%1),%3\n"
"12:slgr %0,%4\n"
" algr %2,%4\n"
"5: lghi %4,-1\n"
" algr %4,%0\n" /* copy remaining size, subtract 1 */
" bras %3,7f\n" /* memset loop */
" xc 0(1,%2),0(%2)\n"
"6: xc 0(256,%2),0(%2)\n"
" la %2,256(%2)\n"
"7: aghi %4,-256\n"
" jnm 6b\n"
" ex %4,0(%3)\n"
" j 9f\n"
"8: slgr %0,%0\n"
"9: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
"9: slgr %0,%4\n"
" j 6f\n"
"5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
return size;
}
unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
check_object_size(to, n, false);
if (static_branch_likely(&have_mvcos))
return copy_from_user_mvcos(to, from, n);
return copy_from_user_mvcp(to, from, n);
}
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(raw_copy_from_user);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size)
......@@ -176,14 +155,13 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
return size;
}
unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
check_object_size(from, n, true);
if (static_branch_likely(&have_mvcos))
return copy_to_user_mvcos(to, from, n);
return copy_to_user_mvcs(to, from, n);
}
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(raw_copy_to_user);
static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
unsigned long size)
......@@ -240,13 +218,13 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
return size;
}
unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
if (static_branch_likely(&have_mvcos))
return copy_in_user_mvcos(to, from, n);
return copy_in_user_mvc(to, from, n);
}
EXPORT_SYMBOL(__copy_in_user);
EXPORT_SYMBOL(raw_copy_in_user);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册