Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
4df3715e
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
4df3715e
编写于
3月 20, 2017
作者:
A
Al Viro
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
mn10300: get rid of zeroing
Signed-off-by:
N
Al Viro
<
viro@zeniv.linux.org.uk
>
上级
6c03905a
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
20 addition
and
141 deletion
+20
-141
arch/mn10300/include/asm/uaccess.h
arch/mn10300/include/asm/uaccess.h
+14
-136
arch/mn10300/lib/usercopy.c
arch/mn10300/lib/usercopy.c
+6
-5
未找到文件。
arch/mn10300/include/asm/uaccess.h
浏览文件 @
4df3715e
...
...
@@ -275,55 +275,19 @@ do { \
} \
} while (0)
#define __copy_user_zeroing(to, from, size) \
do { \
if (size) { \
void *__to = to; \
const void *__from = from; \
int w; \
asm volatile( \
"0: movbu (%0),%3;\n" \
"1: movbu %3,(%1);\n" \
" inc %0;\n" \
" inc %1;\n" \
" add -1,%2;\n" \
" bne 0b;\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
"3:\n" \
" mov %2,%0\n" \
" clr %3\n" \
"4: movbu %3,(%1);\n" \
" inc %1;\n" \
" add -1,%2;\n" \
" bne 4b;\n" \
" mov %0,%2\n" \
" jmp 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .balign 4\n" \
" .long 0b,3b\n" \
" .long 1b,3b\n" \
" .previous\n" \
: "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
: "0"(__from), "1"(__to), "2"(size) \
: "cc", "memory"); \
} \
} while (0)
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.
*/
static
inline
unsigned
long
__
generic_copy_from_user_nocheck
(
void
*
to
,
const
void
*
from
,
unsigned
long
__
copy_from_user_inatomic
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
__copy_user
_zeroing
(
to
,
from
,
n
);
__copy_user
(
to
,
from
,
n
);
return
n
;
}
static
inline
unsigned
long
__
generic_copy_to_user_nocheck
(
void
*
to
,
const
void
*
from
,
unsigned
long
__
copy_to_user_inatomic
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
__copy_user
(
to
,
from
,
n
);
...
...
@@ -331,110 +295,24 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
}
#if 0
#error "don't use - these macros don't increment to & from pointers"
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user(to, from, size) \
do { \
asm volatile( \
" mov %0,a0;\n" \
"0: movbu (%1),d3;\n" \
"1: movbu d3,(%2);\n" \
" add -1,a0;\n" \
" bne 0b;\n" \
"2:;" \
".section .fixup,\"ax\"\n" \
"3: jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .balign 4\n" \
" .long 0b,3b\n" \
" .long 1b,3b\n" \
".previous" \
: \
: "d"(size), "d"(to), "d"(from) \
: "d3", "a0"); \
} while (0)
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user_zeroing(to, from, size) \
do { \
asm volatile( \
" mov %0,a0;\n" \
"0: movbu (%1),d3;\n" \
"1: movbu d3,(%2);\n" \
" add -1,a0;\n" \
" bne 0b;\n" \
"2:;" \
".section .fixup,\"ax\"\n" \
"3: jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .balign 4\n" \
" .long 0b,3b\n" \
" .long 1b,3b\n" \
".previous" \
: \
: "d"(size), "d"(to), "d"(from) \
: "d3", "a0"); \
} while (0)
static inline
unsigned long __constant_copy_to_user(void *to, const void *from,
unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
__constant_copy_user(to, from, n);
return n;
}
static inline
unsigned long __constant_copy_from_user(void *to, const void *from,
unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
__constant_copy_user_zeroing(to, from, n);
return n;
}
extern
unsigned
long
__generic_copy_to_user
(
void
__user
*
,
const
void
*
,
unsigned
long
);
extern
unsigned
long
__generic_copy_from_user
(
void
*
,
const
void
__user
*
,
unsigned
long
);
static inline
unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
unsigned long n)
static
inline
unsigned
long
__copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
__constant_copy_user(to, from, n
);
return
n
;
might_fault
(
);
return
__copy_to_user_inatomic
(
to
,
from
,
n
)
;
}
static inline
unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
static
inline
unsigned
long
__copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
__constant_copy_user_zeroing(to, from, n
);
return
n
;
might_fault
(
);
return
__copy_from_user_inatomic
(
to
,
from
,
n
)
;
}
#endif
extern
unsigned
long
__generic_copy_to_user
(
void
__user
*
,
const
void
*
,
unsigned
long
);
extern
unsigned
long
__generic_copy_from_user
(
void
*
,
const
void
__user
*
,
unsigned
long
);
#define __copy_to_user_inatomic(to, from, n) \
__generic_copy_to_user_nocheck((to), (from), (n))
#define __copy_from_user_inatomic(to, from, n) \
__generic_copy_from_user_nocheck((to), (from), (n))
#define __copy_to_user(to, from, n) \
({ \
might_fault(); \
__copy_to_user_inatomic((to), (from), (n)); \
})
#define __copy_from_user(to, from, n) \
({ \
might_fault(); \
__copy_from_user_inatomic((to), (from), (n)); \
})
#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
...
...
arch/mn10300/lib/usercopy.c
浏览文件 @
4df3715e
...
...
@@ -22,11 +22,12 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
unsigned
long
__generic_copy_from_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
if
(
access_ok
(
VERIFY_READ
,
from
,
n
))
__copy_user_zeroing
(
to
,
from
,
n
);
else
memset
(
to
,
0
,
n
);
return
n
;
unsigned
long
res
=
n
;
if
(
access_ok
(
VERIFY_READ
,
from
,
res
))
__copy_user
(
to
,
from
,
res
);
if
(
unlikely
(
res
))
memset
(
to
+
n
-
res
,
0
,
res
);
return
res
;
}
/*
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录