Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
3a0e75ad
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
161
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3a0e75ad
编写于
3月 22, 2017
作者:
A
Al Viro
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
xtensa: get rid of zeroing, use RAW_COPY_USER
Signed-off-by:
N
Al Viro
<
viro@zeniv.linux.org.uk
>
上级
0b46a94e
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
57 addition
and
114 deletion
+57
-114
arch/xtensa/Kconfig
arch/xtensa/Kconfig
+1
-0
arch/xtensa/include/asm/uaccess.h
arch/xtensa/include/asm/uaccess.h
+8
-46
arch/xtensa/lib/usercopy.S
arch/xtensa/lib/usercopy.S
+48
-68
未找到文件。
arch/xtensa/Kconfig
浏览文件 @
3a0e75ad
...
...
@@ -29,6 +29,7 @@ config XTENSA
select NO_BOOTMEM
select PERF_USE_VMALLOC
select VIRT_TO_BUS
select ARCH_HAS_RAW_COPY_USER
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both
...
...
arch/xtensa/include/asm/uaccess.h
浏览文件 @
3a0e75ad
...
...
@@ -234,60 +234,22 @@ __asm__ __volatile__( \
* Copy to/from user space
*/
/*
* We use a generic, arbitrary-sized copy subroutine. The Xtensa
* architecture would cause heavy code bloat if we tried to inline
* these functions and provide __constant_copy_* equivalents like the
* i386 versions. __xtensa_copy_user is quite efficient. See the
* .fixup section of __xtensa_copy_user for a discussion on the
* X_zeroing equivalents for Xtensa.
*/
extern
unsigned
__xtensa_copy_user
(
void
*
to
,
const
void
*
from
,
unsigned
n
);
#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size)
static
inline
unsigned
long
__generic_copy_from_user_nocheck
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
raw_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
return
__copy_user
(
to
,
from
,
n
);
}
static
inline
unsigned
long
__generic_copy_to_user_nocheck
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
return
__copy_user
(
to
,
from
,
n
);
}
static
inline
unsigned
long
__generic_copy_to_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
prefetch
(
from
);
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
))
return
__copy_user
(
to
,
from
,
n
);
return
n
;
prefetchw
(
to
);
return
__xtensa_copy_user
(
to
,
(
__force
const
void
*
)
from
,
n
);
}
static
inline
unsigned
long
__generic_copy_from_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
raw_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
prefetchw
(
to
);
if
(
access_ok
(
VERIFY_READ
,
from
,
n
))
return
__copy_user
(
to
,
from
,
n
);
else
memset
(
to
,
0
,
n
);
return
n
;
prefetchw
(
from
);
return
__xtensa_copy_user
((
__force
void
*
)
to
,
from
,
n
);
}
#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
#define __copy_to_user(to, from, n) \
__generic_copy_to_user_nocheck((to), (from), (n))
#define __copy_from_user(to, from, n) \
__generic_copy_from_user_nocheck((to), (from), (n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
/*
* We need to return the number of bytes not cleared. Our memset()
...
...
arch/xtensa/lib/usercopy.S
浏览文件 @
3a0e75ad
...
...
@@ -102,9 +102,9 @@ __xtensa_copy_user:
bltui
a4
,
7
,
.
Lbytecopy
#
do
short
copies
byte
by
byte
#
copy
1
byte
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
addi
a3
,
a3
,
1
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
addi
a5
,
a5
,
1
addi
a4
,
a4
,
-
1
bbci.l
a5
,
1
,
.
Ldstaligned
#
if
dst
is
now
aligned
,
then
...
...
@@ -112,11 +112,11 @@ __xtensa_copy_user:
.
Ldst2mod4
:
#
dst
16
-
bit
aligned
#
copy
2
bytes
bltui
a4
,
6
,
.
Lbytecopy
#
do
short
copies
byte
by
byte
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l8ui
,
a7
,
a3
,
1
,
l_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
EX
(
l8ui
,
a7
,
a3
,
1
,
fixup
)
addi
a3
,
a3
,
2
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s8i
,
a7
,
a5
,
1
,
s_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
EX
(
s8i
,
a7
,
a5
,
1
,
fixup
)
addi
a5
,
a5
,
2
addi
a4
,
a4
,
-
2
j
.
Ldstaligned
#
dst
is
now
aligned
,
return
to
main
algorithm
...
...
@@ -135,9 +135,9 @@ __xtensa_copy_user:
add
a7
,
a3
,
a4
#
a7
=
end
address
for
source
#endif /* !XCHAL_HAVE_LOOPS */
.
Lnextbyte
:
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
addi
a3
,
a3
,
1
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
addi
a5
,
a5
,
1
#if !XCHAL_HAVE_LOOPS
blt
a3
,
a7
,
.
Lnextbyte
...
...
@@ -161,15 +161,15 @@ __xtensa_copy_user:
add
a8
,
a8
,
a3
#
a8
=
end
of
last
16
B
source
chunk
#endif /* !XCHAL_HAVE_LOOPS */
.
Loop1
:
EX
(
l32i
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
l_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
l32i
,
a6
,
a3
,
8
,
l_
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
s_
fixup
)
EX
(
l32i
,
a7
,
a3
,
12
,
l_
fixup
)
EX
(
s32i
,
a6
,
a5
,
8
,
s_
fixup
)
EX
(
l32i
,
a6
,
a3
,
0
,
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
EX
(
l32i
,
a6
,
a3
,
8
,
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
fixup
)
EX
(
l32i
,
a7
,
a3
,
12
,
fixup
)
EX
(
s32i
,
a6
,
a5
,
8
,
fixup
)
addi
a3
,
a3
,
16
EX
(
s32i
,
a7
,
a5
,
12
,
s_
fixup
)
EX
(
s32i
,
a7
,
a5
,
12
,
fixup
)
addi
a5
,
a5
,
16
#if !XCHAL_HAVE_LOOPS
blt
a3
,
a8
,
.
Loop1
...
...
@@ -177,31 +177,31 @@ __xtensa_copy_user:
.
Loop1done
:
bbci.l
a4
,
3
,
.
L2
#
copy
8
bytes
EX
(
l32i
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
l_
fixup
)
EX
(
l32i
,
a6
,
a3
,
0
,
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
fixup
)
addi
a3
,
a3
,
8
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
s_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
fixup
)
addi
a5
,
a5
,
8
.
L2
:
bbci.l
a4
,
2
,
.
L3
#
copy
4
bytes
EX
(
l32i
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l32i
,
a6
,
a3
,
0
,
fixup
)
addi
a3
,
a3
,
4
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
addi
a5
,
a5
,
4
.
L3
:
bbci.l
a4
,
1
,
.
L4
#
copy
2
bytes
EX
(
l16ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l16ui
,
a6
,
a3
,
0
,
fixup
)
addi
a3
,
a3
,
2
EX
(
s16i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s16i
,
a6
,
a5
,
0
,
fixup
)
addi
a5
,
a5
,
2
.
L4
:
bbci.l
a4
,
0
,
.
L5
#
copy
1
byte
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
.
L5
:
movi
a2
,
0
#
return
success
for
len
bytes
copied
retw
...
...
@@ -217,7 +217,7 @@ __xtensa_copy_user:
#
copy
16
bytes
per
iteration
for
word
-
aligned
dst
and
unaligned
src
and
a10
,
a3
,
a8
#
save
unalignment
offset
for
below
sub
a3
,
a3
,
a10
#
align
a3
(
to
avoid
sim
warnings
only
; not needed for hardware)
EX
(
l32i
,
a6
,
a3
,
0
,
l_
fixup
)
#
load
first
word
EX
(
l32i
,
a6
,
a3
,
0
,
fixup
)
#
load
first
word
#if XCHAL_HAVE_LOOPS
loopnez
a7
,
.
Loop2done
#else /* !XCHAL_HAVE_LOOPS */
...
...
@@ -226,19 +226,19 @@ __xtensa_copy_user:
add
a12
,
a12
,
a3
#
a12
=
end
of
last
16
B
source
chunk
#endif /* !XCHAL_HAVE_LOOPS */
.
Loop2
:
EX
(
l32i
,
a7
,
a3
,
4
,
l_
fixup
)
EX
(
l32i
,
a8
,
a3
,
8
,
l_
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
fixup
)
EX
(
l32i
,
a8
,
a3
,
8
,
fixup
)
ALIGN
(
a6
,
a6
,
a7
)
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
l32i
,
a9
,
a3
,
12
,
l_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
EX
(
l32i
,
a9
,
a3
,
12
,
fixup
)
ALIGN
(
a7
,
a7
,
a8
)
EX
(
s32i
,
a7
,
a5
,
4
,
s_
fixup
)
EX
(
l32i
,
a6
,
a3
,
16
,
l_
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
fixup
)
EX
(
l32i
,
a6
,
a3
,
16
,
fixup
)
ALIGN
(
a8
,
a8
,
a9
)
EX
(
s32i
,
a8
,
a5
,
8
,
s_
fixup
)
EX
(
s32i
,
a8
,
a5
,
8
,
fixup
)
addi
a3
,
a3
,
16
ALIGN
(
a9
,
a9
,
a6
)
EX
(
s32i
,
a9
,
a5
,
12
,
s_
fixup
)
EX
(
s32i
,
a9
,
a5
,
12
,
fixup
)
addi
a5
,
a5
,
16
#if !XCHAL_HAVE_LOOPS
blt
a3
,
a12
,
.
Loop2
...
...
@@ -246,39 +246,39 @@ __xtensa_copy_user:
.
Loop2done
:
bbci.l
a4
,
3
,
.
L12
#
copy
8
bytes
EX
(
l32i
,
a7
,
a3
,
4
,
l_
fixup
)
EX
(
l32i
,
a8
,
a3
,
8
,
l_
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
fixup
)
EX
(
l32i
,
a8
,
a3
,
8
,
fixup
)
ALIGN
(
a6
,
a6
,
a7
)
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
addi
a3
,
a3
,
8
ALIGN
(
a7
,
a7
,
a8
)
EX
(
s32i
,
a7
,
a5
,
4
,
s_
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
fixup
)
addi
a5
,
a5
,
8
mov
a6
,
a8
.
L12
:
bbci.l
a4
,
2
,
.
L13
#
copy
4
bytes
EX
(
l32i
,
a7
,
a3
,
4
,
l_
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
fixup
)
addi
a3
,
a3
,
4
ALIGN
(
a6
,
a6
,
a7
)
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
addi
a5
,
a5
,
4
mov
a6
,
a7
.
L13
:
add
a3
,
a3
,
a10
#
readjust
a3
with
correct
misalignment
bbci.l
a4
,
1
,
.
L14
#
copy
2
bytes
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l8ui
,
a7
,
a3
,
1
,
l_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
EX
(
l8ui
,
a7
,
a3
,
1
,
fixup
)
addi
a3
,
a3
,
2
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s8i
,
a7
,
a5
,
1
,
s_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
EX
(
s8i
,
a7
,
a5
,
1
,
fixup
)
addi
a5
,
a5
,
2
.
L14
:
bbci.l
a4
,
0
,
.
L15
#
copy
1
byte
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
.
L15
:
movi
a2
,
0
#
return
success
for
len
bytes
copied
retw
...
...
@@ -291,30 +291,10 @@ __xtensa_copy_user:
*
bytes_copied
=
a5
-
a2
*
retval
=
bytes_not_copied
=
original
len
-
bytes_copied
*
retval
=
a11
-
(
a5
-
a2
)
*
*
Clearing
the
remaining
pieces
of
kernel
memory
plugs
security
*
holes
.
This
functionality
is
the
equivalent
of
the
*
_zeroing
*
functions
that
some
architectures
provide
.
*/
.
Lmemset
:
.
word
memset
s_
fixup
:
fixup
:
sub
a2
,
a5
,
a2
/*
a2
<--
bytes
copied
*/
sub
a2
,
a11
,
a2
/*
a2
<--
bytes
not
copied
*/
retw
l_fixup
:
sub
a2
,
a5
,
a2
/*
a2
<--
bytes
copied
*/
sub
a2
,
a11
,
a2
/*
a2
<--
bytes
not
copied
==
return
value
*/
/
*
void
*
memset
(
void
*
s
,
int
c
,
size_t
n
)
; */
mov
a6
,
a5
/*
s
*/
movi
a7
,
0
/*
c
*/
mov
a8
,
a2
/*
n
*/
l32r
a4
,
.
Lmemset
callx4
a4
/
*
Ignore
memset
return
value
in
a6
.
*/
/
*
a2
still
contains
bytes
not
copied
.
*/
retw
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录