Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
4f18cd31
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
4f18cd31
编写于
2月 05, 2014
作者:
A
Al Viro
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
take iov_iter stuff to mm/iov_iter.c
Signed-off-by:
N
Al Viro
<
viro@zeniv.linux.org.uk
>
上级
4bafbec7
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
225 addition
and
222 deletion
+225
-222
mm/Makefile
mm/Makefile
+1
-1
mm/filemap.c
mm/filemap.c
+0
-221
mm/iov_iter.c
mm/iov_iter.c
+224
-0
未找到文件。
mm/Makefile
浏览文件 @
4f18cd31
...
...
@@ -17,7 +17,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
util.o mmzone.o vmstat.o backing-dev.o
\
mm_init.o mmu_context.o percpu.o slab_common.o
\
compaction.o balloon_compaction.o
\
interval_tree.o list_lru.o
$
(
mmu-y
)
interval_tree.o list_lru.o
iov_iter.o
$
(
mmu-y
)
obj-y
+=
init-mm.o
...
...
mm/filemap.c
浏览文件 @
4f18cd31
...
...
@@ -1085,84 +1085,6 @@ static void shrink_readahead_size_eio(struct file *filp,
ra
->
ra_pages
/=
4
;
}
size_t
copy_page_to_iter
(
struct
page
*
page
,
size_t
offset
,
size_t
bytes
,
struct
iov_iter
*
i
)
{
size_t
skip
,
copy
,
left
,
wanted
;
const
struct
iovec
*
iov
;
char
__user
*
buf
;
void
*
kaddr
,
*
from
;
if
(
unlikely
(
bytes
>
i
->
count
))
bytes
=
i
->
count
;
if
(
unlikely
(
!
bytes
))
return
0
;
wanted
=
bytes
;
iov
=
i
->
iov
;
skip
=
i
->
iov_offset
;
buf
=
iov
->
iov_base
+
skip
;
copy
=
min
(
bytes
,
iov
->
iov_len
-
skip
);
if
(
!
fault_in_pages_writeable
(
buf
,
copy
))
{
kaddr
=
kmap_atomic
(
page
);
from
=
kaddr
+
offset
;
/* first chunk, usually the only one */
left
=
__copy_to_user_inatomic
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
+=
copy
;
from
+=
copy
;
bytes
-=
copy
;
while
(
unlikely
(
!
left
&&
bytes
))
{
iov
++
;
buf
=
iov
->
iov_base
;
copy
=
min
(
bytes
,
iov
->
iov_len
);
left
=
__copy_to_user_inatomic
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
=
copy
;
from
+=
copy
;
bytes
-=
copy
;
}
if
(
likely
(
!
bytes
))
{
kunmap_atomic
(
kaddr
);
goto
done
;
}
offset
=
from
-
kaddr
;
buf
+=
copy
;
kunmap_atomic
(
kaddr
);
copy
=
min
(
bytes
,
iov
->
iov_len
-
skip
);
}
/* Too bad - revert to non-atomic kmap */
kaddr
=
kmap
(
page
);
from
=
kaddr
+
offset
;
left
=
__copy_to_user
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
+=
copy
;
from
+=
copy
;
bytes
-=
copy
;
while
(
unlikely
(
!
left
&&
bytes
))
{
iov
++
;
buf
=
iov
->
iov_base
;
copy
=
min
(
bytes
,
iov
->
iov_len
);
left
=
__copy_to_user
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
=
copy
;
from
+=
copy
;
bytes
-=
copy
;
}
kunmap
(
page
);
done:
i
->
count
-=
wanted
-
bytes
;
i
->
nr_segs
-=
iov
-
i
->
iov
;
i
->
iov
=
iov
;
i
->
iov_offset
=
skip
;
return
wanted
-
bytes
;
}
EXPORT_SYMBOL
(
copy_page_to_iter
);
/**
* do_generic_file_read - generic file read routine
* @filp: the file to read
...
...
@@ -1957,149 +1879,6 @@ struct page *read_cache_page(struct address_space *mapping,
}
EXPORT_SYMBOL
(
read_cache_page
);
static
size_t
__iovec_copy_from_user_inatomic
(
char
*
vaddr
,
const
struct
iovec
*
iov
,
size_t
base
,
size_t
bytes
)
{
size_t
copied
=
0
,
left
=
0
;
while
(
bytes
)
{
char
__user
*
buf
=
iov
->
iov_base
+
base
;
int
copy
=
min
(
bytes
,
iov
->
iov_len
-
base
);
base
=
0
;
left
=
__copy_from_user_inatomic
(
vaddr
,
buf
,
copy
);
copied
+=
copy
;
bytes
-=
copy
;
vaddr
+=
copy
;
iov
++
;
if
(
unlikely
(
left
))
break
;
}
return
copied
-
left
;
}
/*
* Copy as much as we can into the page and return the number of bytes which
* were successfully copied. If a fault is encountered then return the number of
* bytes which were copied.
*/
size_t
iov_iter_copy_from_user_atomic
(
struct
page
*
page
,
struct
iov_iter
*
i
,
unsigned
long
offset
,
size_t
bytes
)
{
char
*
kaddr
;
size_t
copied
;
kaddr
=
kmap_atomic
(
page
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
int
left
;
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
left
=
__copy_from_user_inatomic
(
kaddr
+
offset
,
buf
,
bytes
);
copied
=
bytes
-
left
;
}
else
{
copied
=
__iovec_copy_from_user_inatomic
(
kaddr
+
offset
,
i
->
iov
,
i
->
iov_offset
,
bytes
);
}
kunmap_atomic
(
kaddr
);
return
copied
;
}
EXPORT_SYMBOL
(
iov_iter_copy_from_user_atomic
);
/*
* This has the same sideeffects and return value as
* iov_iter_copy_from_user_atomic().
* The difference is that it attempts to resolve faults.
* Page must not be locked.
*/
size_t
iov_iter_copy_from_user
(
struct
page
*
page
,
struct
iov_iter
*
i
,
unsigned
long
offset
,
size_t
bytes
)
{
char
*
kaddr
;
size_t
copied
;
kaddr
=
kmap
(
page
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
int
left
;
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
left
=
__copy_from_user
(
kaddr
+
offset
,
buf
,
bytes
);
copied
=
bytes
-
left
;
}
else
{
copied
=
__iovec_copy_from_user_inatomic
(
kaddr
+
offset
,
i
->
iov
,
i
->
iov_offset
,
bytes
);
}
kunmap
(
page
);
return
copied
;
}
EXPORT_SYMBOL
(
iov_iter_copy_from_user
);
void
iov_iter_advance
(
struct
iov_iter
*
i
,
size_t
bytes
)
{
BUG_ON
(
i
->
count
<
bytes
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
i
->
iov_offset
+=
bytes
;
i
->
count
-=
bytes
;
}
else
{
const
struct
iovec
*
iov
=
i
->
iov
;
size_t
base
=
i
->
iov_offset
;
unsigned
long
nr_segs
=
i
->
nr_segs
;
/*
* The !iov->iov_len check ensures we skip over unlikely
* zero-length segments (without overruning the iovec).
*/
while
(
bytes
||
unlikely
(
i
->
count
&&
!
iov
->
iov_len
))
{
int
copy
;
copy
=
min
(
bytes
,
iov
->
iov_len
-
base
);
BUG_ON
(
!
i
->
count
||
i
->
count
<
copy
);
i
->
count
-=
copy
;
bytes
-=
copy
;
base
+=
copy
;
if
(
iov
->
iov_len
==
base
)
{
iov
++
;
nr_segs
--
;
base
=
0
;
}
}
i
->
iov
=
iov
;
i
->
iov_offset
=
base
;
i
->
nr_segs
=
nr_segs
;
}
}
EXPORT_SYMBOL
(
iov_iter_advance
);
/*
* Fault in the first iovec of the given iov_iter, to a maximum length
* of bytes. Returns 0 on success, or non-zero if the memory could not be
* accessed (ie. because it is an invalid address).
*
* writev-intensive code may want this to prefault several iovecs -- that
* would be possible (callers must not rely on the fact that _only_ the
* first iovec will be faulted with the current implementation).
*/
int
iov_iter_fault_in_readable
(
struct
iov_iter
*
i
,
size_t
bytes
)
{
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
bytes
=
min
(
bytes
,
i
->
iov
->
iov_len
-
i
->
iov_offset
);
return
fault_in_pages_readable
(
buf
,
bytes
);
}
EXPORT_SYMBOL
(
iov_iter_fault_in_readable
);
/*
* Return the count of just the current iov_iter segment.
*/
size_t
iov_iter_single_seg_count
(
const
struct
iov_iter
*
i
)
{
const
struct
iovec
*
iov
=
i
->
iov
;
if
(
i
->
nr_segs
==
1
)
return
i
->
count
;
else
return
min
(
i
->
count
,
iov
->
iov_len
-
i
->
iov_offset
);
}
EXPORT_SYMBOL
(
iov_iter_single_seg_count
);
/*
* Performs necessary checks before doing a write
*
...
...
mm/iov_iter.c
0 → 100644
浏览文件 @
4f18cd31
#include <linux/export.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
size_t
copy_page_to_iter
(
struct
page
*
page
,
size_t
offset
,
size_t
bytes
,
struct
iov_iter
*
i
)
{
size_t
skip
,
copy
,
left
,
wanted
;
const
struct
iovec
*
iov
;
char
__user
*
buf
;
void
*
kaddr
,
*
from
;
if
(
unlikely
(
bytes
>
i
->
count
))
bytes
=
i
->
count
;
if
(
unlikely
(
!
bytes
))
return
0
;
wanted
=
bytes
;
iov
=
i
->
iov
;
skip
=
i
->
iov_offset
;
buf
=
iov
->
iov_base
+
skip
;
copy
=
min
(
bytes
,
iov
->
iov_len
-
skip
);
if
(
!
fault_in_pages_writeable
(
buf
,
copy
))
{
kaddr
=
kmap_atomic
(
page
);
from
=
kaddr
+
offset
;
/* first chunk, usually the only one */
left
=
__copy_to_user_inatomic
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
+=
copy
;
from
+=
copy
;
bytes
-=
copy
;
while
(
unlikely
(
!
left
&&
bytes
))
{
iov
++
;
buf
=
iov
->
iov_base
;
copy
=
min
(
bytes
,
iov
->
iov_len
);
left
=
__copy_to_user_inatomic
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
=
copy
;
from
+=
copy
;
bytes
-=
copy
;
}
if
(
likely
(
!
bytes
))
{
kunmap_atomic
(
kaddr
);
goto
done
;
}
offset
=
from
-
kaddr
;
buf
+=
copy
;
kunmap_atomic
(
kaddr
);
copy
=
min
(
bytes
,
iov
->
iov_len
-
skip
);
}
/* Too bad - revert to non-atomic kmap */
kaddr
=
kmap
(
page
);
from
=
kaddr
+
offset
;
left
=
__copy_to_user
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
+=
copy
;
from
+=
copy
;
bytes
-=
copy
;
while
(
unlikely
(
!
left
&&
bytes
))
{
iov
++
;
buf
=
iov
->
iov_base
;
copy
=
min
(
bytes
,
iov
->
iov_len
);
left
=
__copy_to_user
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
=
copy
;
from
+=
copy
;
bytes
-=
copy
;
}
kunmap
(
page
);
done:
i
->
count
-=
wanted
-
bytes
;
i
->
nr_segs
-=
iov
-
i
->
iov
;
i
->
iov
=
iov
;
i
->
iov_offset
=
skip
;
return
wanted
-
bytes
;
}
EXPORT_SYMBOL
(
copy_page_to_iter
);
static
size_t
__iovec_copy_from_user_inatomic
(
char
*
vaddr
,
const
struct
iovec
*
iov
,
size_t
base
,
size_t
bytes
)
{
size_t
copied
=
0
,
left
=
0
;
while
(
bytes
)
{
char
__user
*
buf
=
iov
->
iov_base
+
base
;
int
copy
=
min
(
bytes
,
iov
->
iov_len
-
base
);
base
=
0
;
left
=
__copy_from_user_inatomic
(
vaddr
,
buf
,
copy
);
copied
+=
copy
;
bytes
-=
copy
;
vaddr
+=
copy
;
iov
++
;
if
(
unlikely
(
left
))
break
;
}
return
copied
-
left
;
}
/*
* Copy as much as we can into the page and return the number of bytes which
* were successfully copied. If a fault is encountered then return the number of
* bytes which were copied.
*/
size_t
iov_iter_copy_from_user_atomic
(
struct
page
*
page
,
struct
iov_iter
*
i
,
unsigned
long
offset
,
size_t
bytes
)
{
char
*
kaddr
;
size_t
copied
;
kaddr
=
kmap_atomic
(
page
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
int
left
;
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
left
=
__copy_from_user_inatomic
(
kaddr
+
offset
,
buf
,
bytes
);
copied
=
bytes
-
left
;
}
else
{
copied
=
__iovec_copy_from_user_inatomic
(
kaddr
+
offset
,
i
->
iov
,
i
->
iov_offset
,
bytes
);
}
kunmap_atomic
(
kaddr
);
return
copied
;
}
EXPORT_SYMBOL
(
iov_iter_copy_from_user_atomic
);
/*
* This has the same sideeffects and return value as
* iov_iter_copy_from_user_atomic().
* The difference is that it attempts to resolve faults.
* Page must not be locked.
*/
size_t
iov_iter_copy_from_user
(
struct
page
*
page
,
struct
iov_iter
*
i
,
unsigned
long
offset
,
size_t
bytes
)
{
char
*
kaddr
;
size_t
copied
;
kaddr
=
kmap
(
page
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
int
left
;
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
left
=
__copy_from_user
(
kaddr
+
offset
,
buf
,
bytes
);
copied
=
bytes
-
left
;
}
else
{
copied
=
__iovec_copy_from_user_inatomic
(
kaddr
+
offset
,
i
->
iov
,
i
->
iov_offset
,
bytes
);
}
kunmap
(
page
);
return
copied
;
}
EXPORT_SYMBOL
(
iov_iter_copy_from_user
);
void
iov_iter_advance
(
struct
iov_iter
*
i
,
size_t
bytes
)
{
BUG_ON
(
i
->
count
<
bytes
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
i
->
iov_offset
+=
bytes
;
i
->
count
-=
bytes
;
}
else
{
const
struct
iovec
*
iov
=
i
->
iov
;
size_t
base
=
i
->
iov_offset
;
unsigned
long
nr_segs
=
i
->
nr_segs
;
/*
* The !iov->iov_len check ensures we skip over unlikely
* zero-length segments (without overruning the iovec).
*/
while
(
bytes
||
unlikely
(
i
->
count
&&
!
iov
->
iov_len
))
{
int
copy
;
copy
=
min
(
bytes
,
iov
->
iov_len
-
base
);
BUG_ON
(
!
i
->
count
||
i
->
count
<
copy
);
i
->
count
-=
copy
;
bytes
-=
copy
;
base
+=
copy
;
if
(
iov
->
iov_len
==
base
)
{
iov
++
;
nr_segs
--
;
base
=
0
;
}
}
i
->
iov
=
iov
;
i
->
iov_offset
=
base
;
i
->
nr_segs
=
nr_segs
;
}
}
EXPORT_SYMBOL
(
iov_iter_advance
);
/*
* Fault in the first iovec of the given iov_iter, to a maximum length
* of bytes. Returns 0 on success, or non-zero if the memory could not be
* accessed (ie. because it is an invalid address).
*
* writev-intensive code may want this to prefault several iovecs -- that
* would be possible (callers must not rely on the fact that _only_ the
* first iovec will be faulted with the current implementation).
*/
int
iov_iter_fault_in_readable
(
struct
iov_iter
*
i
,
size_t
bytes
)
{
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
bytes
=
min
(
bytes
,
i
->
iov
->
iov_len
-
i
->
iov_offset
);
return
fault_in_pages_readable
(
buf
,
bytes
);
}
EXPORT_SYMBOL
(
iov_iter_fault_in_readable
);
/*
* Return the count of just the current iov_iter segment.
*/
size_t
iov_iter_single_seg_count
(
const
struct
iov_iter
*
i
)
{
const
struct
iovec
*
iov
=
i
->
iov
;
if
(
i
->
nr_segs
==
1
)
return
i
->
count
;
else
return
min
(
i
->
count
,
iov
->
iov_len
-
i
->
iov_offset
);
}
EXPORT_SYMBOL
(
iov_iter_single_seg_count
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录