Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
23504bae
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
161
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
23504bae
编写于
3月 21, 2017
作者:
A
Al Viro
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
tile: get rid of zeroing, switch to RAW_COPY_USER
Signed-off-by:
N
Al Viro
<
viro@zeniv.linux.org.uk
>
上级
c0ea73f1
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
27 addition
and
180 deletion
+27
-180
arch/tile/Kconfig
arch/tile/Kconfig
+1
-0
arch/tile/include/asm/uaccess.h
arch/tile/include/asm/uaccess.h
+7
-136
arch/tile/lib/exports.c
arch/tile/lib/exports.c
+3
-4
arch/tile/lib/memcpy_32.S
arch/tile/lib/memcpy_32.S
+13
-28
arch/tile/lib/memcpy_user_64.c
arch/tile/lib/memcpy_user_64.c
+3
-12
未找到文件。
arch/tile/Kconfig
浏览文件 @
23504bae
...
...
@@ -33,6 +33,7 @@ config TILE
select USER_STACKTRACE_SUPPORT
select USE_PMC if PERF_EVENTS
select VIRT_TO_BUS
select ARCH_HAS_RAW_COPY_USER
config MMU
def_bool y
...
...
arch/tile/include/asm/uaccess.h
浏览文件 @
23504bae
...
...
@@ -313,145 +313,16 @@ extern int __put_user_bad(void)
((x) = 0, -EFAULT); \
})
/**
* __copy_to_user() - copy data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* An alternate version - __copy_to_user_inatomic() - is designed
* to be called from atomic context, typically bracketed by calls
* to pagefault_disable() and pagefault_enable().
*/
extern
unsigned
long
__must_check
__copy_to_user_inatomic
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
static
inline
unsigned
long
__must_check
__copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
might_fault
();
return
__copy_to_user_inatomic
(
to
,
from
,
n
);
}
static
inline
unsigned
long
__must_check
copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
))
n
=
__copy_to_user
(
to
,
from
,
n
);
return
n
;
}
/**
* __copy_from_user() - copy data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*
* An alternate version - __copy_from_user_inatomic() - is designed
* to be called from atomic context, typically bracketed by calls
* to pagefault_disable() and pagefault_enable(). This version
* does *NOT* pad with zeros.
*/
extern
unsigned
long
__must_check
__copy_from_user_inatomic
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__copy_from_user_zeroing
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
static
inline
unsigned
long
__must_check
__copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
might_fault
();
return
__copy_from_user_zeroing
(
to
,
from
,
n
);
}
static
inline
unsigned
long
__must_check
_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
if
(
access_ok
(
VERIFY_READ
,
from
,
n
))
n
=
__copy_from_user
(
to
,
from
,
n
);
else
memset
(
to
,
0
,
n
);
return
n
;
}
extern
void
__compiletime_error
(
"usercopy buffer size is too small"
)
__bad_copy_user
(
void
);
static
inline
void
copy_user_overflow
(
int
size
,
unsigned
long
count
)
{
WARN
(
1
,
"Buffer overflow detected (%d < %lu)!
\n
"
,
size
,
count
);
}
static
inline
unsigned
long
__must_check
copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
int
sz
=
__compiletime_object_size
(
to
);
if
(
likely
(
sz
==
-
1
||
sz
>=
n
))
n
=
_copy_from_user
(
to
,
from
,
n
);
else
if
(
!
__builtin_constant_p
(
n
))
copy_user_overflow
(
sz
,
n
);
else
__bad_copy_user
();
return
n
;
}
extern
unsigned
long
__must_check
raw_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
extern
unsigned
long
__must_check
raw_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
#ifdef __tilegx__
/**
* __copy_in_user() - copy data within user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to user space. Caller must check
* the specified blocks with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
extern
unsigned
long
__copy_in_user_inatomic
(
extern
unsigned
long
raw_copy_in_user
(
void
__user
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
static
inline
unsigned
long
__must_check
__copy_in_user
(
void
__user
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
might_fault
();
return
__copy_in_user_inatomic
(
to
,
from
,
n
);
}
static
inline
unsigned
long
__must_check
copy_in_user
(
void
__user
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
)
&&
access_ok
(
VERIFY_READ
,
from
,
n
))
n
=
__copy_in_user
(
to
,
from
,
n
);
return
n
;
}
#endif
...
...
arch/tile/lib/exports.c
浏览文件 @
23504bae
...
...
@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount);
/* arch/tile/lib/, various memcpy files */
EXPORT_SYMBOL
(
memcpy
);
EXPORT_SYMBOL
(
__copy_to_user_inatomic
);
EXPORT_SYMBOL
(
__copy_from_user_inatomic
);
EXPORT_SYMBOL
(
__copy_from_user_zeroing
);
EXPORT_SYMBOL
(
raw_copy_to_user
);
EXPORT_SYMBOL
(
raw_copy_from_user
);
#ifdef __tilegx__
EXPORT_SYMBOL
(
__copy_in_user_inatomic
);
EXPORT_SYMBOL
(
raw_copy_in_user
);
#endif
/* hypervisor glue */
...
...
arch/tile/lib/memcpy_32.S
浏览文件 @
23504bae
...
...
@@ -24,7 +24,6 @@
#define IS_MEMCPY 0
#define IS_COPY_FROM_USER 1
#define IS_COPY_FROM_USER_ZEROING 2
#define IS_COPY_TO_USER -1
.
section
.
text
.
memcpy_common
,
"ax"
...
...
@@ -42,40 +41,31 @@
9
/*
__copy_from_user_inatomic
takes
the
kernel
target
address
in
r0
,
/*
raw_copy_from_user
takes
the
kernel
target
address
in
r0
,
*
the
user
source
in
r1
,
and
the
bytes
to
copy
in
r2
.
*
It
returns
the
number
of
uncopiable
bytes
(
hopefully
zero
)
in
r0
.
*/
ENTRY
(
__copy_from_user_inatomic
)
.
type
__copy_from_user_inatomic
,
@
function
FEEDBACK_ENTER_EXPLICIT
(
__copy_from_user_inatomic
,
\
ENTRY
(
raw_copy_from_user
)
.
type
raw_copy_from_user
,
@
function
FEEDBACK_ENTER_EXPLICIT
(
raw_copy_from_user
,
\
.
text.
memcpy_common
,
\
.
Lend_memcpy_common
-
__copy_from_user_inatomic
)
.
Lend_memcpy_common
-
raw_copy_from_user
)
{
movei
r29
,
IS_COPY_FROM_USER
; j memcpy_common }
.
size
__copy_from_user_inatomic
,
.
-
__copy_from_user_inatomic
.
size
raw_copy_from_user
,
.
-
raw_copy_from_user
/*
__copy_from_user_zeroing
is
like
__copy_from_user_inatomic
,
but
*
any
uncopiable
bytes
are
zeroed
in
the
target
.
*/
ENTRY
(
__copy_from_user_zeroing
)
.
type
__copy_from_user_zeroing
,
@
function
FEEDBACK_REENTER
(
__copy_from_user_inatomic
)
{
movei
r29
,
IS_COPY_FROM_USER_ZEROING
; j memcpy_common }
.
size
__copy_from_user_zeroing
,
.
-
__copy_from_user_zeroing
/*
__copy_to_user_inatomic
takes
the
user
target
address
in
r0
,
/*
raw_copy_to_user
takes
the
user
target
address
in
r0
,
*
the
kernel
source
in
r1
,
and
the
bytes
to
copy
in
r2
.
*
It
returns
the
number
of
uncopiable
bytes
(
hopefully
zero
)
in
r0
.
*/
ENTRY
(
__copy_to_user_inatomic
)
.
type
__copy_to_user_inatomic
,
@
function
FEEDBACK_REENTER
(
__copy_from_user_inatomic
)
ENTRY
(
raw_copy_to_user
)
.
type
raw_copy_to_user
,
@
function
FEEDBACK_REENTER
(
raw_copy_from_user
)
{
movei
r29
,
IS_COPY_TO_USER
; j memcpy_common }
.
size
__copy_to_user_inatomic
,
.
-
__copy_to_user_inatomic
.
size
raw_copy_to_user
,
.
-
raw_copy_to_user
ENTRY
(
memcpy
)
.
type
memcpy
,
@
function
FEEDBACK_REENTER
(
__copy_from_user_inatomic
)
FEEDBACK_REENTER
(
raw_copy_from_user
)
{
movei
r29
,
IS_MEMCPY
}
.
size
memcpy
,
.
-
memcpy
/
*
Fall
through
*/
...
...
@@ -520,12 +510,7 @@ copy_from_user_fixup_loop:
{
bnzt
r2
,
copy_from_user_fixup_loop
}
.
Lcopy_from_user_fixup_zero_remainder
:
{
bbs
r29
,
2
f
}
/*
low
bit
set
means
IS_COPY_FROM_USER
*/
/
*
byte
-
at
-
a
-
time
loop
faulted
,
so
zero
the
rest
.
*/
{
move
r3
,
r2
; bz r2, 2f /* should be impossible, but handle it. */ }
1
:
{
sb
r0
,
zero
; addi r0, r0, 1; addi r3, r3, -1 }
{
bnzt
r3
,
1
b
}
2
:
move
lr
,
r27
move
lr
,
r27
{
move
r0
,
r2
; jrp lr }
copy_to_user_fixup_loop
:
...
...
arch/tile/lib/memcpy_user_64.c
浏览文件 @
23504bae
...
...
@@ -51,7 +51,7 @@
__v; \
})
#define USERCOPY_FUNC
__copy_to_user_inatomic
#define USERCOPY_FUNC
raw_copy_to_user
#define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v))
...
...
@@ -62,7 +62,7 @@
#define LD8 LD
#include "memcpy_64.c"
#define USERCOPY_FUNC
__copy_from_user_inatomic
#define USERCOPY_FUNC
raw_copy_from_user
#define ST1 ST
#define ST2 ST
#define ST4 ST
...
...
@@ -73,7 +73,7 @@
#define LD8(p) _LD((p), ld)
#include "memcpy_64.c"
#define USERCOPY_FUNC
__copy_in_user_inatomic
#define USERCOPY_FUNC
raw_copy_in_user
#define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v))
...
...
@@ -83,12 +83,3 @@
#define LD4(p) _LD((p), ld4u)
#define LD8(p) _LD((p), ld)
#include "memcpy_64.c"
unsigned
long
__copy_from_user_zeroing
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
unsigned
long
rc
=
__copy_from_user_inatomic
(
to
,
from
,
n
);
if
(
unlikely
(
rc
))
memset
(
to
+
n
-
rc
,
0
,
rc
);
return
rc
;
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录