Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
beba3a20
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
beba3a20
编写于
3月 25, 2017
作者:
A
Al Viro
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
x86: switch to RAW_COPY_USER
Signed-off-by:
N
Al Viro
<
viro@zeniv.linux.org.uk
>
上级
a41e0d75
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
24 addition
and
395 deletion
+24
-395
arch/x86/Kconfig
arch/x86/Kconfig
+1
-0
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess.h
+0
-53
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_32.h
+15
-80
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/uaccess_64.h
+5
-40
arch/x86/lib/usercopy.c
arch/x86/lib/usercopy.c
+1
-54
arch/x86/lib/usercopy_32.c
arch/x86/lib/usercopy_32.c
+2
-168
未找到文件。
arch/x86/Kconfig
浏览文件 @
beba3a20
...
...
@@ -175,6 +175,7 @@ config X86
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
select X86_FEATURE_NAMES if PROC_FS
select ARCH_HAS_RAW_COPY_USER
config INSTRUCTION_DECODER
def_bool y
...
...
arch/x86/include/asm/uaccess.h
浏览文件 @
beba3a20
...
...
@@ -682,59 +682,6 @@ extern struct movsl_mask {
# include <asm/uaccess_64.h>
#endif
unsigned
long
__must_check
_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
n
);
unsigned
long
__must_check
_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
n
);
extern
void
__compiletime_error
(
"usercopy buffer size is too small"
)
__bad_copy_user
(
void
);
static
inline
void
copy_user_overflow
(
int
size
,
unsigned
long
count
)
{
WARN
(
1
,
"Buffer overflow detected (%d < %lu)!
\n
"
,
size
,
count
);
}
static
__always_inline
unsigned
long
__must_check
copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
int
sz
=
__compiletime_object_size
(
to
);
might_fault
();
kasan_check_write
(
to
,
n
);
if
(
likely
(
sz
<
0
||
sz
>=
n
))
{
check_object_size
(
to
,
n
,
false
);
n
=
_copy_from_user
(
to
,
from
,
n
);
}
else
if
(
!
__builtin_constant_p
(
n
))
copy_user_overflow
(
sz
,
n
);
else
__bad_copy_user
();
return
n
;
}
static
__always_inline
unsigned
long
__must_check
copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
int
sz
=
__compiletime_object_size
(
from
);
kasan_check_read
(
from
,
n
);
might_fault
();
if
(
likely
(
sz
<
0
||
sz
>=
n
))
{
check_object_size
(
from
,
n
,
true
);
n
=
_copy_to_user
(
to
,
from
,
n
);
}
else
if
(
!
__builtin_constant_p
(
n
))
copy_user_overflow
(
sz
,
n
);
else
__bad_copy_user
();
return
n
;
}
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
...
...
arch/x86/include/asm/uaccess_32.h
浏览文件 @
beba3a20
...
...
@@ -8,113 +8,48 @@
#include <asm/asm.h>
#include <asm/page.h>
unsigned
long
__must_check
__copy_to_user_ll
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
unsigned
long
__must_check
__copy_from_user_ll
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
unsigned
long
__must_check
__copy_from_user_ll_nozero
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
unsigned
long
__must_check
__copy_user_ll
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
);
unsigned
long
__must_check
__copy_from_user_ll_nocache_nozero
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
/**
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
static
__always_inline
unsigned
long
__must_check
__copy_to_user_inatomic
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
raw_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
check_object_size
(
from
,
n
,
true
);
return
__copy_to_user_ll
(
to
,
from
,
n
);
return
__copy_user_ll
((
__force
void
*
)
to
,
from
,
n
);
}
/**
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static
__always_inline
unsigned
long
__must_check
__copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
might_fault
();
return
__copy_to_user_inatomic
(
to
,
from
,
n
);
}
static
__always_inline
unsigned
long
__copy_from_user_inatomic
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
return
__copy_from_user_ll_nozero
(
to
,
from
,
n
);
}
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*
* An alternate version - __copy_from_user_inatomic() - may be called from
* atomic context and will fail rather than sleep. In this case the
* uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
* for explanation of why this is needed.
*/
static
__always_inline
unsigned
long
_
_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
raw
_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
might_fault
();
check_object_size
(
to
,
n
,
false
);
if
(
__builtin_constant_p
(
n
))
{
unsigned
long
ret
;
switch
(
n
)
{
case
1
:
ret
=
0
;
__uaccess_begin
();
__get_user_size
(
*
(
u8
*
)
to
,
from
,
1
,
ret
,
1
);
__get_user_asm_nozero
(
*
(
u8
*
)
to
,
from
,
ret
,
"b"
,
"b"
,
"=q"
,
1
);
__uaccess_end
();
return
ret
;
case
2
:
ret
=
0
;
__uaccess_begin
();
__get_user_size
(
*
(
u16
*
)
to
,
from
,
2
,
ret
,
2
);
__get_user_asm_nozero
(
*
(
u16
*
)
to
,
from
,
ret
,
"w"
,
"w"
,
"=r"
,
2
);
__uaccess_end
();
return
ret
;
case
4
:
ret
=
0
;
__uaccess_begin
();
__get_user_size
(
*
(
u32
*
)
to
,
from
,
4
,
ret
,
4
);
__get_user_asm_nozero
(
*
(
u32
*
)
to
,
from
,
ret
,
"l"
,
"k"
,
"=r"
,
4
);
__uaccess_end
();
return
ret
;
}
}
return
__copy_
from_user_ll
(
to
,
from
,
n
);
return
__copy_
user_ll
(
to
,
(
__force
const
void
*
)
from
,
n
);
}
static
__always_inline
unsigned
long
...
...
arch/x86/include/asm/uaccess_64.h
浏览文件 @
beba3a20
...
...
@@ -45,15 +45,11 @@ copy_user_generic(void *to, const void *from, unsigned len)
return
ret
;
}
__must_check
unsigned
long
copy_in_user
(
void
__user
*
to
,
const
void
__user
*
from
,
unsigned
len
);
static
__always_inline
__must_check
int
__copy_from_user_nocheck
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
)
static
__always_inline
__must_check
unsigned
long
raw_copy_from_user
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
long
size
)
{
int
ret
=
0
;
check_object_size
(
dst
,
size
,
false
);
if
(
!
__builtin_constant_p
(
size
))
return
copy_user_generic
(
dst
,
(
__force
void
*
)
src
,
size
);
switch
(
size
)
{
...
...
@@ -106,20 +102,11 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
}
}
static
__always_inline
__must_check
int
__copy_from_user
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
)
{
might_fault
();
kasan_check_write
(
dst
,
size
);
return
__copy_from_user_nocheck
(
dst
,
src
,
size
);
}
static
__always_inline
__must_check
int
__copy_to_user_nocheck
(
void
__user
*
dst
,
const
void
*
src
,
unsigned
size
)
static
__always_inline
__must_check
unsigned
long
raw_copy_to_user
(
void
__user
*
dst
,
const
void
*
src
,
unsigned
long
size
)
{
int
ret
=
0
;
check_object_size
(
src
,
size
,
true
);
if
(
!
__builtin_constant_p
(
size
))
return
copy_user_generic
((
__force
void
*
)
dst
,
src
,
size
);
switch
(
size
)
{
...
...
@@ -175,34 +162,12 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
}
static
__always_inline
__must_check
int
__copy_to_user
(
void
__user
*
dst
,
const
void
*
src
,
unsigned
size
)
{
might_fault
();
kasan_check_read
(
src
,
size
);
return
__copy_to_user_nocheck
(
dst
,
src
,
size
);
}
static
__always_inline
__must_check
int
__copy_in_user
(
void
__user
*
dst
,
const
void
__user
*
src
,
unsigned
size
)
unsigned
long
raw_copy_in_user
(
void
__user
*
dst
,
const
void
__user
*
src
,
unsigned
long
size
)
{
return
copy_user_generic
((
__force
void
*
)
dst
,
(
__force
void
*
)
src
,
size
);
}
static
__must_check
__always_inline
int
__copy_from_user_inatomic
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
)
{
kasan_check_write
(
dst
,
size
);
return
__copy_from_user_nocheck
(
dst
,
src
,
size
);
}
static
__must_check
__always_inline
int
__copy_to_user_inatomic
(
void
__user
*
dst
,
const
void
*
src
,
unsigned
size
)
{
kasan_check_read
(
src
,
size
);
return
__copy_to_user_nocheck
(
dst
,
src
,
size
);
}
extern
long
__copy_user_nocache
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
,
int
zerorest
);
...
...
arch/x86/lib/usercopy.c
浏览文件 @
beba3a20
...
...
@@ -4,12 +4,9 @@
* For licencing details see kernel-base/COPYING
*/
#include <linux/
highmem
.h>
#include <linux/
uaccess
.h>
#include <linux/export.h>
#include <asm/word-at-a-time.h>
#include <linux/sched.h>
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
...
...
@@ -34,53 +31,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return
ret
;
}
EXPORT_SYMBOL_GPL
(
copy_from_user_nmi
);
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
unsigned
long
_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
n
)
{
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
))
n
=
__copy_to_user
(
to
,
from
,
n
);
return
n
;
}
EXPORT_SYMBOL
(
_copy_to_user
);
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
unsigned
long
_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
n
)
{
unsigned
long
res
=
n
;
if
(
access_ok
(
VERIFY_READ
,
from
,
n
))
res
=
__copy_from_user_inatomic
(
to
,
from
,
n
);
if
(
unlikely
(
res
))
memset
(
to
+
n
-
res
,
0
,
res
);
return
res
;
}
EXPORT_SYMBOL
(
_copy_from_user
);
arch/x86/lib/usercopy_32.c
浏览文件 @
beba3a20
...
...
@@ -5,12 +5,7 @@
* Copyright 1997 Andi Kleen <ak@muc.de>
* Copyright 1997 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/blkdev.h>
#include <linux/export.h>
#include <linux/backing-dev.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/mmx.h>
#include <asm/asm.h>
...
...
@@ -201,98 +196,6 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
return
size
;
}
static
unsigned
long
__copy_user_zeroing_intel
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
size
)
{
int
d0
,
d1
;
__asm__
__volatile__
(
" .align 2,0x90
\n
"
"0: movl 32(%4), %%eax
\n
"
" cmpl $67, %0
\n
"
" jbe 2f
\n
"
"1: movl 64(%4), %%eax
\n
"
" .align 2,0x90
\n
"
"2: movl 0(%4), %%eax
\n
"
"21: movl 4(%4), %%edx
\n
"
" movl %%eax, 0(%3)
\n
"
" movl %%edx, 4(%3)
\n
"
"3: movl 8(%4), %%eax
\n
"
"31: movl 12(%4),%%edx
\n
"
" movl %%eax, 8(%3)
\n
"
" movl %%edx, 12(%3)
\n
"
"4: movl 16(%4), %%eax
\n
"
"41: movl 20(%4), %%edx
\n
"
" movl %%eax, 16(%3)
\n
"
" movl %%edx, 20(%3)
\n
"
"10: movl 24(%4), %%eax
\n
"
"51: movl 28(%4), %%edx
\n
"
" movl %%eax, 24(%3)
\n
"
" movl %%edx, 28(%3)
\n
"
"11: movl 32(%4), %%eax
\n
"
"61: movl 36(%4), %%edx
\n
"
" movl %%eax, 32(%3)
\n
"
" movl %%edx, 36(%3)
\n
"
"12: movl 40(%4), %%eax
\n
"
"71: movl 44(%4), %%edx
\n
"
" movl %%eax, 40(%3)
\n
"
" movl %%edx, 44(%3)
\n
"
"13: movl 48(%4), %%eax
\n
"
"81: movl 52(%4), %%edx
\n
"
" movl %%eax, 48(%3)
\n
"
" movl %%edx, 52(%3)
\n
"
"14: movl 56(%4), %%eax
\n
"
"91: movl 60(%4), %%edx
\n
"
" movl %%eax, 56(%3)
\n
"
" movl %%edx, 60(%3)
\n
"
" addl $-64, %0
\n
"
" addl $64, %4
\n
"
" addl $64, %3
\n
"
" cmpl $63, %0
\n
"
" ja 0b
\n
"
"5: movl %0, %%eax
\n
"
" shrl $2, %0
\n
"
" andl $3, %%eax
\n
"
" cld
\n
"
"6: rep; movsl
\n
"
" movl %%eax,%0
\n
"
"7: rep; movsb
\n
"
"8:
\n
"
".section .fixup,
\"
ax
\"\n
"
"9: lea 0(%%eax,%0,4),%0
\n
"
"16: pushl %0
\n
"
" pushl %%eax
\n
"
" xorl %%eax,%%eax
\n
"
" rep; stosb
\n
"
" popl %%eax
\n
"
" popl %0
\n
"
" jmp 8b
\n
"
".previous
\n
"
_ASM_EXTABLE
(
0
b
,
16
b
)
_ASM_EXTABLE
(
1
b
,
16
b
)
_ASM_EXTABLE
(
2
b
,
16
b
)
_ASM_EXTABLE
(
21
b
,
16
b
)
_ASM_EXTABLE
(
3
b
,
16
b
)
_ASM_EXTABLE
(
31
b
,
16
b
)
_ASM_EXTABLE
(
4
b
,
16
b
)
_ASM_EXTABLE
(
41
b
,
16
b
)
_ASM_EXTABLE
(
10
b
,
16
b
)
_ASM_EXTABLE
(
51
b
,
16
b
)
_ASM_EXTABLE
(
11
b
,
16
b
)
_ASM_EXTABLE
(
61
b
,
16
b
)
_ASM_EXTABLE
(
12
b
,
16
b
)
_ASM_EXTABLE
(
71
b
,
16
b
)
_ASM_EXTABLE
(
13
b
,
16
b
)
_ASM_EXTABLE
(
81
b
,
16
b
)
_ASM_EXTABLE
(
14
b
,
16
b
)
_ASM_EXTABLE
(
91
b
,
16
b
)
_ASM_EXTABLE
(
6
b
,
9
b
)
_ASM_EXTABLE
(
7
b
,
16
b
)
:
"=&c"
(
size
),
"=&D"
(
d0
),
"=&S"
(
d1
)
:
"1"
(
to
),
"2"
(
from
),
"0"
(
size
)
:
"eax"
,
"edx"
,
"memory"
);
return
size
;
}
static
unsigned
long
__copy_user_intel_nocache
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
size
)
{
...
...
@@ -387,8 +290,6 @@ static unsigned long __copy_user_intel_nocache(void *to,
* Leave these declared but undefined. They should not be any references to
* them
*/
unsigned
long
__copy_user_zeroing_intel
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
size
);
unsigned
long
__copy_user_intel
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
size
);
#endif
/* CONFIG_X86_INTEL_USERCOPY */
...
...
@@ -427,47 +328,7 @@ do { \
: "memory"); \
} while (0)
#define __copy_user_zeroing(to, from, size) \
do { \
int __d0, __d1, __d2; \
__asm__ __volatile__( \
" cmp $7,%0\n" \
" jbe 1f\n" \
" movl %1,%0\n" \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
"4: rep; movsb\n" \
" movl %3,%0\n" \
" shrl $2,%0\n" \
" andl $3,%3\n" \
" .align 2,0x90\n" \
"0: rep; movsl\n" \
" movl %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
" jmp 6f\n" \
"3: lea 0(%3,%0,4),%0\n" \
"6: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosb\n" \
" popl %%eax\n" \
" popl %0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(4b,5b) \
_ASM_EXTABLE(0b,3b) \
_ASM_EXTABLE(1b,6b) \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
: "3"(size), "0"(size), "1"(to), "2"(from) \
: "memory"); \
} while (0)
unsigned
long
__copy_to_user_ll
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
unsigned
long
__copy_user_ll
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
stac
();
if
(
movsl_is_ok
(
to
,
from
,
n
))
...
...
@@ -477,34 +338,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
clac
();
return
n
;
}
EXPORT_SYMBOL
(
__copy_to_user_ll
);
unsigned
long
__copy_from_user_ll
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
stac
();
if
(
movsl_is_ok
(
to
,
from
,
n
))
__copy_user_zeroing
(
to
,
from
,
n
);
else
n
=
__copy_user_zeroing_intel
(
to
,
from
,
n
);
clac
();
return
n
;
}
EXPORT_SYMBOL
(
__copy_from_user_ll
);
unsigned
long
__copy_from_user_ll_nozero
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
stac
();
if
(
movsl_is_ok
(
to
,
from
,
n
))
__copy_user
(
to
,
from
,
n
);
else
n
=
__copy_user_intel
((
void
__user
*
)
to
,
(
const
void
*
)
from
,
n
);
clac
();
return
n
;
}
EXPORT_SYMBOL
(
__copy_from_user_ll_nozero
);
EXPORT_SYMBOL
(
__copy_user_ll
);
unsigned
long
__copy_from_user_ll_nocache_nozero
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录