Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
4908b822
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
161
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4908b822
编写于
4月 03, 2014
作者:
A
Al Viro
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
ceph: switch to ->write_iter()
Signed-off-by:
N
Al Viro
<
viro@zeniv.linux.org.uk
>
上级
64c31311
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
26 addition
and
31 deletion
+26
-31
fs/ceph/file.c
fs/ceph/file.c
+26
-31
未找到文件。
fs/ceph/file.c
浏览文件 @
4908b822
...
...
@@ -531,8 +531,7 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
* objects, rollback on failure, etc.)
*/
static
ssize_t
ceph_sync_direct_write
(
struct
kiocb
*
iocb
,
const
struct
iovec
*
iov
,
unsigned
long
nr_segs
,
size_t
count
)
ceph_sync_direct_write
(
struct
kiocb
*
iocb
,
struct
iov_iter
*
from
)
{
struct
file
*
file
=
iocb
->
ki_filp
;
struct
inode
*
inode
=
file_inode
(
file
);
...
...
@@ -549,7 +548,7 @@ ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov,
int
ret
;
struct
timespec
mtime
=
CURRENT_TIME
;
loff_t
pos
=
iocb
->
ki_pos
;
s
truct
iov_iter
i
;
s
ize_t
count
=
iov_iter_count
(
from
)
;
if
(
ceph_snap
(
file_inode
(
file
))
!=
CEPH_NOSNAP
)
return
-
EROFS
;
...
...
@@ -571,10 +570,8 @@ ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov,
CEPH_OSD_FLAG_ONDISK
|
CEPH_OSD_FLAG_WRITE
;
iov_iter_init
(
&
i
,
WRITE
,
iov
,
nr_segs
,
count
);
while
(
iov_iter_count
(
&
i
)
>
0
)
{
u64
len
=
iov_iter_single_seg_count
(
&
i
);
while
(
iov_iter_count
(
from
)
>
0
)
{
u64
len
=
iov_iter_single_seg_count
(
from
);
size_t
start
;
ssize_t
n
;
...
...
@@ -592,7 +589,7 @@ ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov,
break
;
}
n
=
iov_iter_get_pages_alloc
(
&
i
,
&
pages
,
len
,
&
start
);
n
=
iov_iter_get_pages_alloc
(
from
,
&
pages
,
len
,
&
start
);
if
(
unlikely
(
n
<
0
))
{
ret
=
n
;
ceph_osdc_put_request
(
req
);
...
...
@@ -623,7 +620,7 @@ ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov,
break
;
pos
+=
n
;
written
+=
n
;
iov_iter_advance
(
&
i
,
n
);
iov_iter_advance
(
from
,
n
);
if
(
pos
>
i_size_read
(
inode
))
{
check_caps
=
ceph_inode_set_size
(
inode
,
pos
);
...
...
@@ -649,8 +646,7 @@ ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov,
* correct atomic write, we should e.g. take write locks on all
* objects, rollback on failure, etc.)
*/
static
ssize_t
ceph_sync_write
(
struct
kiocb
*
iocb
,
const
struct
iovec
*
iov
,
unsigned
long
nr_segs
,
size_t
count
)
static
ssize_t
ceph_sync_write
(
struct
kiocb
*
iocb
,
struct
iov_iter
*
from
)
{
struct
file
*
file
=
iocb
->
ki_filp
;
struct
inode
*
inode
=
file_inode
(
file
);
...
...
@@ -668,7 +664,7 @@ static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov,
int
ret
;
struct
timespec
mtime
=
CURRENT_TIME
;
loff_t
pos
=
iocb
->
ki_pos
;
s
truct
iov_iter
i
;
s
ize_t
count
=
iov_iter_count
(
from
)
;
if
(
ceph_snap
(
file_inode
(
file
))
!=
CEPH_NOSNAP
)
return
-
EROFS
;
...
...
@@ -690,9 +686,7 @@ static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov,
CEPH_OSD_FLAG_WRITE
|
CEPH_OSD_FLAG_ACK
;
iov_iter_init
(
&
i
,
WRITE
,
iov
,
nr_segs
,
count
);
while
((
len
=
iov_iter_count
(
&
i
))
>
0
)
{
while
((
len
=
iov_iter_count
(
from
))
>
0
)
{
size_t
left
;
int
n
;
...
...
@@ -724,7 +718,7 @@ static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov,
left
=
len
;
for
(
n
=
0
;
n
<
num_pages
;
n
++
)
{
size_t
plen
=
min_t
(
size_t
,
left
,
PAGE_SIZE
);
ret
=
copy_page_from_iter
(
pages
[
n
],
0
,
plen
,
&
i
);
ret
=
copy_page_from_iter
(
pages
[
n
],
0
,
plen
,
from
);
if
(
ret
!=
plen
)
{
ret
=
-
EFAULT
;
break
;
...
...
@@ -861,8 +855,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
*
* If we are near ENOSPC, write synchronously.
*/
static
ssize_t
ceph_aio_write
(
struct
kiocb
*
iocb
,
const
struct
iovec
*
iov
,
unsigned
long
nr_segs
,
loff_t
pos
)
static
ssize_t
ceph_write_iter
(
struct
kiocb
*
iocb
,
struct
iov_iter
*
from
)
{
struct
file
*
file
=
iocb
->
ki_filp
;
struct
ceph_file_info
*
fi
=
file
->
private_data
;
...
...
@@ -870,16 +863,15 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
struct
ceph_inode_info
*
ci
=
ceph_inode
(
inode
);
struct
ceph_osd_client
*
osdc
=
&
ceph_sb_to_client
(
inode
->
i_sb
)
->
client
->
osdc
;
ssize_t
count
,
written
=
0
;
ssize_t
count
=
iov_iter_count
(
from
)
,
written
=
0
;
int
err
,
want
,
got
;
loff_t
pos
=
iocb
->
ki_pos
;
if
(
ceph_snap
(
inode
)
!=
CEPH_NOSNAP
)
return
-
EROFS
;
mutex_lock
(
&
inode
->
i_mutex
);
count
=
iov_length
(
iov
,
nr_segs
);
/* We can write back this queue in page reclaim */
current
->
backing_dev_info
=
file
->
f_mapping
->
backing_dev_info
;
...
...
@@ -889,6 +881,7 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
if
(
count
==
0
)
goto
out
;
iov_iter_truncate
(
from
,
count
);
err
=
file_remove_suid
(
file
);
if
(
err
)
...
...
@@ -920,23 +913,26 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
if
((
got
&
(
CEPH_CAP_FILE_BUFFER
|
CEPH_CAP_FILE_LAZYIO
))
==
0
||
(
file
->
f_flags
&
O_DIRECT
)
||
(
fi
->
flags
&
CEPH_F_SYNC
))
{
struct
iov_iter
data
;
mutex_unlock
(
&
inode
->
i_mutex
);
/* we might need to revert back to that point */
data
=
*
from
;
if
(
file
->
f_flags
&
O_DIRECT
)
written
=
ceph_sync_direct_write
(
iocb
,
iov
,
nr_segs
,
count
);
written
=
ceph_sync_direct_write
(
iocb
,
&
data
);
else
written
=
ceph_sync_write
(
iocb
,
iov
,
nr_segs
,
count
);
written
=
ceph_sync_write
(
iocb
,
&
data
);
if
(
written
==
-
EOLDSNAPC
)
{
dout
(
"aio_write %p %llx.%llx %llu~%u"
"got EOLDSNAPC, retrying
\n
"
,
inode
,
ceph_vinop
(
inode
),
pos
,
(
unsigned
)
iov
->
iov_len
);
pos
,
(
unsigned
)
count
);
mutex_lock
(
&
inode
->
i_mutex
);
goto
retry_snap
;
}
if
(
written
>
0
)
iov_iter_advance
(
from
,
written
);
}
else
{
loff_t
old_size
=
inode
->
i_size
;
struct
iov_iter
from
;
/*
* No need to acquire the i_truncate_mutex. Because
* the MDS revokes Fwb caps before sending truncate
...
...
@@ -944,8 +940,7 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
* are pending vmtruncate. So write and vmtruncate
* can not run at the same time
*/
iov_iter_init
(
&
from
,
WRITE
,
iov
,
nr_segs
,
count
);
written
=
generic_perform_write
(
file
,
&
from
,
pos
);
written
=
generic_perform_write
(
file
,
from
,
pos
);
if
(
likely
(
written
>=
0
))
iocb
->
ki_pos
=
pos
+
written
;
if
(
inode
->
i_size
>
old_size
)
...
...
@@ -963,7 +958,7 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
}
dout
(
"aio_write %p %llx.%llx %llu~%u dropping cap refs on %s
\n
"
,
inode
,
ceph_vinop
(
inode
),
pos
,
(
unsigned
)
iov
->
iov_len
,
inode
,
ceph_vinop
(
inode
),
pos
,
(
unsigned
)
count
,
ceph_cap_string
(
got
));
ceph_put_cap_refs
(
ci
,
got
);
...
...
@@ -1241,9 +1236,9 @@ const struct file_operations ceph_file_fops = {
.
release
=
ceph_release
,
.
llseek
=
ceph_llseek
,
.
read
=
new_sync_read
,
.
write
=
do
_sync_write
,
.
write
=
new
_sync_write
,
.
read_iter
=
ceph_read_iter
,
.
aio_write
=
ceph_aio_write
,
.
write_iter
=
ceph_write_iter
,
.
mmap
=
ceph_mmap
,
.
fsync
=
ceph_fsync
,
.
lock
=
ceph_lock
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录