Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
98797a24
K
Kernel
项目概览
openeuler
/
Kernel
大约 1 年 前同步成功
通知
6
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
98797a24
编写于
6月 14, 2009
作者:
R
Russell King
提交者:
Russell King
6月 14, 2009
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'copy_user' of
git://git.marvell.com/orion
into devel
上级
ca8cbc83
c626e3f5
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
253 addition
and
2 deletion
+253
-2
arch/arm/Kconfig
arch/arm/Kconfig
+16
-0
arch/arm/include/asm/uaccess.h
arch/arm/include/asm/uaccess.h
+2
-0
arch/arm/lib/Makefile
arch/arm/lib/Makefile
+3
-0
arch/arm/lib/clear_user.S
arch/arm/lib/clear_user.S
+2
-1
arch/arm/lib/copy_to_user.S
arch/arm/lib/copy_to_user.S
+2
-1
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/lib/uaccess_with_memcpy.c
+228
-0
未找到文件。
arch/arm/Kconfig
浏览文件 @
98797a24
...
...
@@ -1091,6 +1091,22 @@ config ALIGNMENT_TRAP
correct operation of some network protocols. With an IP-only
configuration it is safe to say N, otherwise say Y.
config UACCESS_WITH_MEMCPY
bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user() (EXPERIMENTAL)"
depends on MMU && EXPERIMENTAL
default y if CPU_FEROCEON
help
Implement faster copy_to_user and clear_user methods for CPU
cores where a 8-word STM instruction give significantly higher
memory write throughput than a sequence of individual 32bit stores.
A possible side effect is a slight increase in scheduling latency
between threads sharing the same address space if they invoke
such copy operations with large buffers.
However, if the CPU data cache is using a write-allocate mode,
this option is unlikely to provide any performance gain.
endmenu
menu "Boot options"
...
...
arch/arm/include/asm/uaccess.h
浏览文件 @
98797a24
...
...
@@ -386,7 +386,9 @@ do { \
#ifdef CONFIG_MMU
extern
unsigned
long
__must_check
__copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__copy_to_user_std
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__clear_user
(
void
__user
*
addr
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__clear_user_std
(
void
__user
*
addr
,
unsigned
long
n
);
#else
#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
...
...
arch/arm/lib/Makefile
浏览文件 @
98797a24
...
...
@@ -29,6 +29,9 @@ else
endif
endif
# using lib_ here won't override already available weak symbols
obj-$(CONFIG_UACCESS_WITH_MEMCPY)
+=
uaccess_with_memcpy.o
lib-$(CONFIG_MMU)
+=
$
(
mmu-y
)
ifeq
($(CONFIG_CPU_32v3),y)
...
...
arch/arm/lib/clear_user.S
浏览文件 @
98797a24
...
...
@@ -18,7 +18,8 @@
*
:
sz
-
number
of
bytes
to
clear
*
Returns
:
number
of
bytes
NOT
cleared
*/
ENTRY
(
__clear_user
)
ENTRY
(
__clear_user_std
)
WEAK
(
__clear_user
)
stmfd
sp
!,
{
r1
,
lr
}
mov
r2
,
#
0
cmp
r1
,
#
4
...
...
arch/arm/lib/copy_to_user.S
浏览文件 @
98797a24
...
...
@@ -86,7 +86,8 @@
.
text
ENTRY
(
__copy_to_user
)
ENTRY
(
__copy_to_user_std
)
WEAK
(
__copy_to_user
)
#include "copy_template.S"
...
...
arch/arm/lib/uaccess_with_memcpy.c
0 → 100644
浏览文件 @
98797a24
/*
* linux/arch/arm/lib/uaccess_with_memcpy.c
*
* Written by: Lennert Buytenhek and Nicolas Pitre
* Copyright (C) 2009 Marvell Semiconductor
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/uaccess.h>
#include <linux/rwsem.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/hardirq.h>
/* for in_atomic() */
#include <asm/current.h>
#include <asm/page.h>
static
int
pin_page_for_write
(
const
void
__user
*
_addr
,
pte_t
**
ptep
,
spinlock_t
**
ptlp
)
{
unsigned
long
addr
=
(
unsigned
long
)
_addr
;
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
;
spinlock_t
*
ptl
;
pgd
=
pgd_offset
(
current
->
mm
,
addr
);
if
(
unlikely
(
pgd_none
(
*
pgd
)
||
pgd_bad
(
*
pgd
)))
return
0
;
pmd
=
pmd_offset
(
pgd
,
addr
);
if
(
unlikely
(
pmd_none
(
*
pmd
)
||
pmd_bad
(
*
pmd
)))
return
0
;
pte
=
pte_offset_map_lock
(
current
->
mm
,
pmd
,
addr
,
&
ptl
);
if
(
unlikely
(
!
pte_present
(
*
pte
)
||
!
pte_young
(
*
pte
)
||
!
pte_write
(
*
pte
)
||
!
pte_dirty
(
*
pte
)))
{
pte_unmap_unlock
(
pte
,
ptl
);
return
0
;
}
*
ptep
=
pte
;
*
ptlp
=
ptl
;
return
1
;
}
static
unsigned
long
noinline
__copy_to_user_memcpy
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
int
atomic
;
if
(
unlikely
(
segment_eq
(
get_fs
(),
KERNEL_DS
)))
{
memcpy
((
void
*
)
to
,
from
,
n
);
return
0
;
}
/* the mmap semaphore is taken only if not in an atomic context */
atomic
=
in_atomic
();
if
(
!
atomic
)
down_read
(
&
current
->
mm
->
mmap_sem
);
while
(
n
)
{
pte_t
*
pte
;
spinlock_t
*
ptl
;
int
tocopy
;
while
(
!
pin_page_for_write
(
to
,
&
pte
,
&
ptl
))
{
if
(
!
atomic
)
up_read
(
&
current
->
mm
->
mmap_sem
);
if
(
__put_user
(
0
,
(
char
__user
*
)
to
))
goto
out
;
if
(
!
atomic
)
down_read
(
&
current
->
mm
->
mmap_sem
);
}
tocopy
=
(
~
(
unsigned
long
)
to
&
~
PAGE_MASK
)
+
1
;
if
(
tocopy
>
n
)
tocopy
=
n
;
memcpy
((
void
*
)
to
,
from
,
tocopy
);
to
+=
tocopy
;
from
+=
tocopy
;
n
-=
tocopy
;
pte_unmap_unlock
(
pte
,
ptl
);
}
if
(
!
atomic
)
up_read
(
&
current
->
mm
->
mmap_sem
);
out:
return
n
;
}
unsigned
long
__copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
/*
* This test is stubbed out of the main function above to keep
* the overhead for small copies low by avoiding a large
* register dump on the stack just to reload them right away.
* With frame pointer disabled, tail call optimization kicks in
* as well making this test almost invisible.
*/
if
(
n
<
64
)
return
__copy_to_user_std
(
to
,
from
,
n
);
return
__copy_to_user_memcpy
(
to
,
from
,
n
);
}
static
unsigned
long
noinline
__clear_user_memset
(
void
__user
*
addr
,
unsigned
long
n
)
{
if
(
unlikely
(
segment_eq
(
get_fs
(),
KERNEL_DS
)))
{
memset
((
void
*
)
addr
,
0
,
n
);
return
0
;
}
down_read
(
&
current
->
mm
->
mmap_sem
);
while
(
n
)
{
pte_t
*
pte
;
spinlock_t
*
ptl
;
int
tocopy
;
while
(
!
pin_page_for_write
(
addr
,
&
pte
,
&
ptl
))
{
up_read
(
&
current
->
mm
->
mmap_sem
);
if
(
__put_user
(
0
,
(
char
__user
*
)
addr
))
goto
out
;
down_read
(
&
current
->
mm
->
mmap_sem
);
}
tocopy
=
(
~
(
unsigned
long
)
addr
&
~
PAGE_MASK
)
+
1
;
if
(
tocopy
>
n
)
tocopy
=
n
;
memset
((
void
*
)
addr
,
0
,
tocopy
);
addr
+=
tocopy
;
n
-=
tocopy
;
pte_unmap_unlock
(
pte
,
ptl
);
}
up_read
(
&
current
->
mm
->
mmap_sem
);
out:
return
n
;
}
unsigned
long
__clear_user
(
void
__user
*
addr
,
unsigned
long
n
)
{
/* See rational for this in __copy_to_user() above. */
if
(
n
<
64
)
return
__clear_user_std
(
addr
,
n
);
return
__clear_user_memset
(
addr
,
n
);
}
#if 0
/*
* This code is disabled by default, but kept around in case the chosen
* thresholds need to be revalidated. Some overhead (small but still)
* would be implied by a runtime determined variable threshold, and
* so far the measurement on concerned targets didn't show a worthwhile
* variation.
*
* Note that a fairly precise sched_clock() implementation is needed
* for results to make some sense.
*/
#include <linux/vmalloc.h>
static int __init test_size_treshold(void)
{
struct page *src_page, *dst_page;
void *user_ptr, *kernel_ptr;
unsigned long long t0, t1, t2;
int size, ret;
ret = -ENOMEM;
src_page = alloc_page(GFP_KERNEL);
if (!src_page)
goto no_src;
dst_page = alloc_page(GFP_KERNEL);
if (!dst_page)
goto no_dst;
kernel_ptr = page_address(src_page);
user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
if (!user_ptr)
goto no_vmap;
/* warm up the src page dcache */
ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
for (size = PAGE_SIZE; size >= 4; size /= 2) {
t0 = sched_clock();
ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
t1 = sched_clock();
ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
t2 = sched_clock();
printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
}
for (size = PAGE_SIZE; size >= 4; size /= 2) {
t0 = sched_clock();
ret |= __clear_user_memset(user_ptr, size);
t1 = sched_clock();
ret |= __clear_user_std(user_ptr, size);
t2 = sched_clock();
printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
}
if (ret)
ret = -EFAULT;
vunmap(user_ptr);
no_vmap:
put_page(dst_page);
no_dst:
put_page(src_page);
no_src:
return ret;
}
subsys_initcall(test_size_treshold);
#endif
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录