Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
a8324754
cloud-kernel
项目概览
openanolis
/
cloud-kernel
大约 1 年 前同步成功
通知
158
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
a8324754
编写于
5月 06, 2014
作者:
A
Al Viro
浏览文件
操作
浏览文件
下载
差异文件
Merge ext4 changes in ext4_file_write() into for-next
From ext4.git#dev, needed for switch of ext4 to ->write_iter() ;-/
上级
1456c0a8
f5ccfe1d
变更
9
展开全部
隐藏空白更改
内联
并排
Showing
9 changed file
with
244 addition
and
250 deletion
+244
-250
fs/ext4/ext4.h
fs/ext4/ext4.h
+11
-13
fs/ext4/ext4_extents.h
fs/ext4/ext4_extents.h
+11
-11
fs/ext4/extents.c
fs/ext4/extents.c
+109
-113
fs/ext4/extents_status.c
fs/ext4/extents_status.c
+1
-1
fs/ext4/file.c
fs/ext4/file.c
+75
-75
fs/ext4/inode.c
fs/ext4/inode.c
+13
-12
fs/ext4/move_extent.c
fs/ext4/move_extent.c
+19
-19
fs/ext4/super.c
fs/ext4/super.c
+1
-1
include/trace/events/ext4.h
include/trace/events/ext4.h
+4
-5
未找到文件。
fs/ext4/ext4.h
浏览文件 @
a8324754
...
...
@@ -158,7 +158,6 @@ struct ext4_allocation_request {
#define EXT4_MAP_MAPPED (1 << BH_Mapped)
#define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten)
#define EXT4_MAP_BOUNDARY (1 << BH_Boundary)
#define EXT4_MAP_UNINIT (1 << BH_Uninit)
/* Sometimes (in the bigalloc case, from ext4_da_get_block_prep) the caller of
* ext4_map_blocks wants to know whether or not the underlying cluster has
* already been accounted for. EXT4_MAP_FROM_CLUSTER conveys to the caller that
...
...
@@ -169,7 +168,7 @@ struct ext4_allocation_request {
#define EXT4_MAP_FROM_CLUSTER (1 << BH_AllocFromCluster)
#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
EXT4_MAP_
UNINIT | EXT4_MAP_
FROM_CLUSTER)
EXT4_MAP_FROM_CLUSTER)
struct
ext4_map_blocks
{
ext4_fsblk_t
m_pblk
;
...
...
@@ -184,7 +183,7 @@ struct ext4_map_blocks {
#define EXT4_IO_END_UNWRITTEN 0x0001
/*
* For converting un
initialized
extents on a work queue. 'handle' is used for
* For converting un
written
extents on a work queue. 'handle' is used for
* buffered writeback.
*/
typedef
struct
ext4_io_end
{
...
...
@@ -537,26 +536,26 @@ enum {
/*
* Flags used by ext4_map_blocks()
*/
/* Allocate any needed blocks and/or convert an un
itialized
/* Allocate any needed blocks and/or convert an un
written
extent to be an initialized ext4 */
#define EXT4_GET_BLOCKS_CREATE 0x0001
/* Request the creation of an un
itialized
extent */
#define EXT4_GET_BLOCKS_UN
IN
IT_EXT 0x0002
#define EXT4_GET_BLOCKS_CREATE_UN
INIT_EXT (EXT4_GET_BLOCKS_UNIN
IT_EXT|\
/* Request the creation of an un
written
extent */
#define EXT4_GET_BLOCKS_UN
WR
IT_EXT 0x0002
#define EXT4_GET_BLOCKS_CREATE_UN
WRIT_EXT (EXT4_GET_BLOCKS_UNWR
IT_EXT|\
EXT4_GET_BLOCKS_CREATE)
/* Caller is from the delayed allocation writeout path
* finally doing the actual allocation of delayed blocks */
#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
/* caller is from the direct IO path, request to creation of an
un
itialized extents if not allocated, split the uninitialized
un
written extents if not allocated, split the unwritten
extent if blocks has been preallocated already*/
#define EXT4_GET_BLOCKS_PRE_IO 0x0008
#define EXT4_GET_BLOCKS_CONVERT 0x0010
#define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_PRE_IO|\
EXT4_GET_BLOCKS_CREATE_UN
IN
IT_EXT)
EXT4_GET_BLOCKS_CREATE_UN
WR
IT_EXT)
/* Convert extent to initialized after IO complete */
#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
EXT4_GET_BLOCKS_CREATE_UN
IN
IT_EXT)
EXT4_GET_BLOCKS_CREATE_UN
WR
IT_EXT)
/* Eventual metadata allocation (due to growing extent tree)
* should not fail, so try to use reserved blocks for that.*/
#define EXT4_GET_BLOCKS_METADATA_NOFAIL 0x0020
...
...
@@ -2783,10 +2782,9 @@ extern int ext4_mmp_csum_verify(struct super_block *sb,
* See EXT4_MAP_... to see where this is used.
*/
enum
ext4_state_bits
{
BH_Uninit
/* blocks are allocated but uninitialized on disk */
=
BH_JBDPrivateStart
,
BH_AllocFromCluster
,
/* allocated blocks were part of already
BH_AllocFromCluster
/* allocated blocks were part of already
* allocated cluster. */
=
BH_JBDPrivateStart
};
/*
...
...
fs/ext4/ext4_extents.h
浏览文件 @
a8324754
...
...
@@ -137,21 +137,21 @@ struct ext4_ext_path {
* EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
* initialized extent. This is 2^15 and not (2^16 - 1), since we use the
* MSB of ee_len field in the extent datastructure to signify if this
* particular extent is an initialized extent or an un
initialized
(i.e.
* particular extent is an initialized extent or an un
written
(i.e.
* preallocated).
* EXT_UN
INIT
_MAX_LEN is the maximum number of blocks we can have in an
* un
initialized
extent.
* EXT_UN
WRITTEN
_MAX_LEN is the maximum number of blocks we can have in an
* un
written
extent.
* If ee_len is <= 0x8000, it is an initialized extent. Otherwise, it is an
* un
initialized
one. In other words, if MSB of ee_len is set, it is an
* un
initialized
extent with only one special scenario when ee_len = 0x8000.
* In this case we can not have an un
initialized
extent of zero length and
* un
written
one. In other words, if MSB of ee_len is set, it is an
* un
written
extent with only one special scenario when ee_len = 0x8000.
* In this case we can not have an un
written
extent of zero length and
* thus we make it as a special case of initialized extent with 0x8000 length.
* This way we get better extent-to-group alignment for initialized extents.
* Hence, the maximum number of blocks we can have in an *initialized*
* extent is 2^15 (32768) and in an *un
initialized
* extent is 2^15-1 (32767).
* extent is 2^15 (32768) and in an *un
written
* extent is 2^15-1 (32767).
*/
#define EXT_INIT_MAX_LEN (1UL << 15)
#define EXT_UN
INIT
_MAX_LEN (EXT_INIT_MAX_LEN - 1)
#define EXT_UN
WRITTEN
_MAX_LEN (EXT_INIT_MAX_LEN - 1)
#define EXT_FIRST_EXTENT(__hdr__) \
...
...
@@ -187,14 +187,14 @@ static inline unsigned short ext_depth(struct inode *inode)
return
le16_to_cpu
(
ext_inode_hdr
(
inode
)
->
eh_depth
);
}
static
inline
void
ext4_ext_mark_un
initialized
(
struct
ext4_extent
*
ext
)
static
inline
void
ext4_ext_mark_un
written
(
struct
ext4_extent
*
ext
)
{
/* We can not have an un
initialized
extent of zero length! */
/* We can not have an un
written
extent of zero length! */
BUG_ON
((
le16_to_cpu
(
ext
->
ee_len
)
&
~
EXT_INIT_MAX_LEN
)
==
0
);
ext
->
ee_len
|=
cpu_to_le16
(
EXT_INIT_MAX_LEN
);
}
static
inline
int
ext4_ext_is_un
initialized
(
struct
ext4_extent
*
ext
)
static
inline
int
ext4_ext_is_un
written
(
struct
ext4_extent
*
ext
)
{
/* Extent with ee_len of 0x8000 is treated as an initialized extent */
return
(
le16_to_cpu
(
ext
->
ee_len
)
>
EXT_INIT_MAX_LEN
);
...
...
fs/ext4/extents.c
浏览文件 @
a8324754
此差异已折叠。
点击以展开。
fs/ext4/extents_status.c
浏览文件 @
a8324754
...
...
@@ -433,7 +433,7 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode,
ee_start
=
ext4_ext_pblock
(
ex
);
ee_len
=
ext4_ext_get_actual_len
(
ex
);
ee_status
=
ext4_ext_is_un
initialized
(
ex
)
?
1
:
0
;
ee_status
=
ext4_ext_is_un
written
(
ex
)
?
1
:
0
;
es_status
=
ext4_es_is_unwritten
(
es
)
?
1
:
0
;
/*
...
...
fs/ext4/file.c
浏览文件 @
a8324754
...
...
@@ -92,58 +92,91 @@ ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
}
static
ssize_t
ext4_file_
dio_
write
(
struct
kiocb
*
iocb
,
const
struct
iovec
*
iov
,
unsigned
long
nr_segs
,
loff_t
pos
)
ext4_file_write
(
struct
kiocb
*
iocb
,
const
struct
iovec
*
iov
,
unsigned
long
nr_segs
,
loff_t
pos
)
{
struct
file
*
file
=
iocb
->
ki_filp
;
struct
inode
*
inode
=
file
->
f_mapping
->
host
;
struct
inode
*
inode
=
file_inode
(
iocb
->
ki_filp
);
struct
mutex
*
aio_mutex
=
NULL
;
struct
blk_plug
plug
;
int
unaligned_aio
=
0
;
ssize_t
ret
;
int
o_direct
=
file
->
f_flags
&
O_DIRECT
;
int
overwrite
=
0
;
size_t
length
=
iov_length
(
iov
,
nr_segs
);
ssize_t
ret
;
if
(
ext4_test_inode_flag
(
inode
,
EXT4_INODE_EXTENTS
)
&&
!
is_sync_kiocb
(
iocb
))
unaligned_aio
=
ext4_unaligned_aio
(
inode
,
iov
,
nr_segs
,
pos
);
BUG_ON
(
iocb
->
ki_pos
!=
pos
);
/* Unaligned direct AIO must be serialized; see comment above */
if
(
unaligned_aio
)
{
mutex_lock
(
ext4_aio_mutex
(
inode
));
/*
* Unaligned direct AIO must be serialized; see comment above
* In the case of O_APPEND, assume that we must always serialize
*/
if
(
o_direct
&&
ext4_test_inode_flag
(
inode
,
EXT4_INODE_EXTENTS
)
&&
!
is_sync_kiocb
(
iocb
)
&&
(
file
->
f_flags
&
O_APPEND
||
ext4_unaligned_aio
(
inode
,
iov
,
nr_segs
,
pos
)))
{
aio_mutex
=
ext4_aio_mutex
(
inode
);
mutex_lock
(
aio_mutex
);
ext4_unwritten_wait
(
inode
);
}
BUG_ON
(
iocb
->
ki_pos
!=
pos
);
mutex_lock
(
&
inode
->
i_mutex
);
blk_start_plug
(
&
plug
);
if
(
file
->
f_flags
&
O_APPEND
)
iocb
->
ki_pos
=
pos
=
i_size_read
(
inode
);
/*
* If we have encountered a bitmap-format file, the size limit
* is smaller than s_maxbytes, which is for extent-mapped files.
*/
if
(
!
(
ext4_test_inode_flag
(
inode
,
EXT4_INODE_EXTENTS
)))
{
struct
ext4_sb_info
*
sbi
=
EXT4_SB
(
inode
->
i_sb
);
iocb
->
private
=
&
overwrite
;
if
((
pos
>
sbi
->
s_bitmap_maxbytes
)
||
(
pos
==
sbi
->
s_bitmap_maxbytes
&&
length
>
0
))
{
mutex_unlock
(
&
inode
->
i_mutex
);
ret
=
-
EFBIG
;
goto
errout
;
}
/* check whether we do a DIO overwrite or not */
if
(
ext4_should_dioread_nolock
(
inode
)
&&
!
unaligned_aio
&&
!
file
->
f_mapping
->
nrpages
&&
pos
+
length
<=
i_size_read
(
inode
))
{
struct
ext4_map_blocks
map
;
unsigned
int
blkbits
=
inode
->
i_blkbits
;
int
err
,
len
;
if
(
pos
+
length
>
sbi
->
s_bitmap_maxbytes
)
{
nr_segs
=
iov_shorten
((
struct
iovec
*
)
iov
,
nr_segs
,
sbi
->
s_bitmap_maxbytes
-
pos
);
}
}
map
.
m_lblk
=
pos
>>
blkbits
;
map
.
m_len
=
(
EXT4_BLOCK_ALIGN
(
pos
+
length
,
blkbits
)
>>
blkbits
)
-
map
.
m_lblk
;
len
=
map
.
m_len
;
if
(
o_direct
)
{
blk_start_plug
(
&
plug
);
err
=
ext4_map_blocks
(
NULL
,
inode
,
&
map
,
0
);
/*
* 'err==len' means that all of blocks has been preallocated no
* matter they are initialized or not. For excluding
* uninitialized extents, we need to check m_flags. There are
* two conditions that indicate for initialized extents.
* 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned;
* 2) If we do a real lookup, non-flags are returned.
* So we should check these two conditions.
*/
if
(
err
==
len
&&
(
map
.
m_flags
&
EXT4_MAP_MAPPED
))
overwrite
=
1
;
iocb
->
private
=
&
overwrite
;
/* check whether we do a DIO overwrite or not */
if
(
ext4_should_dioread_nolock
(
inode
)
&&
!
aio_mutex
&&
!
file
->
f_mapping
->
nrpages
&&
pos
+
length
<=
i_size_read
(
inode
))
{
struct
ext4_map_blocks
map
;
unsigned
int
blkbits
=
inode
->
i_blkbits
;
int
err
,
len
;
map
.
m_lblk
=
pos
>>
blkbits
;
map
.
m_len
=
(
EXT4_BLOCK_ALIGN
(
pos
+
length
,
blkbits
)
>>
blkbits
)
-
map
.
m_lblk
;
len
=
map
.
m_len
;
err
=
ext4_map_blocks
(
NULL
,
inode
,
&
map
,
0
);
/*
* 'err==len' means that all of blocks has
* been preallocated no matter they are
* initialized or not. For excluding
* unwritten extents, we need to check
* m_flags. There are two conditions that
* indicate for initialized extents. 1) If we
* hit extent cache, EXT4_MAP_MAPPED flag is
* returned; 2) If we do a real lookup,
* non-flags are returned. So we should check
* these two conditions.
*/
if
(
err
==
len
&&
(
map
.
m_flags
&
EXT4_MAP_MAPPED
))
overwrite
=
1
;
}
}
ret
=
__generic_file_aio_write
(
iocb
,
iov
,
nr_segs
);
...
...
@@ -156,45 +189,12 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
if
(
err
<
0
)
ret
=
err
;
}
blk_finish_plug
(
&
plug
);
if
(
unaligned_aio
)
mutex_unlock
(
ext4_aio_mutex
(
inode
));
return
ret
;
}
static
ssize_t
ext4_file_write
(
struct
kiocb
*
iocb
,
const
struct
iovec
*
iov
,
unsigned
long
nr_segs
,
loff_t
pos
)
{
struct
inode
*
inode
=
file_inode
(
iocb
->
ki_filp
);
ssize_t
ret
;
/*
* If we have encountered a bitmap-format file, the size limit
* is smaller than s_maxbytes, which is for extent-mapped files.
*/
if
(
!
(
ext4_test_inode_flag
(
inode
,
EXT4_INODE_EXTENTS
)))
{
struct
ext4_sb_info
*
sbi
=
EXT4_SB
(
inode
->
i_sb
);
size_t
length
=
iov_length
(
iov
,
nr_segs
);
if
((
pos
>
sbi
->
s_bitmap_maxbytes
||
(
pos
==
sbi
->
s_bitmap_maxbytes
&&
length
>
0
)))
return
-
EFBIG
;
if
(
pos
+
length
>
sbi
->
s_bitmap_maxbytes
)
{
nr_segs
=
iov_shorten
((
struct
iovec
*
)
iov
,
nr_segs
,
sbi
->
s_bitmap_maxbytes
-
pos
);
}
}
if
(
unlikely
(
iocb
->
ki_filp
->
f_flags
&
O_DIRECT
))
ret
=
ext4_file_dio_write
(
iocb
,
iov
,
nr_segs
,
pos
);
else
ret
=
generic_file_aio_write
(
iocb
,
iov
,
nr_segs
,
pos
);
if
(
o_direct
)
blk_finish_plug
(
&
plug
);
errout:
if
(
aio_mutex
)
mutex_unlock
(
aio_mutex
);
return
ret
;
}
...
...
fs/ext4/inode.c
浏览文件 @
a8324754
...
...
@@ -489,8 +489,8 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
* Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
* based files
*
* On success, it returns the number of blocks being mapped or allocate.
* if create==0 and the blocks are pre-allocated and un
initialized
block,
* On success, it returns the number of blocks being mapped or allocate
d
.
* if create==0 and the blocks are pre-allocated and un
written
block,
* the result buffer head is unmapped. If the create ==1, it will make sure
* the buffer head is mapped.
*
...
...
@@ -622,7 +622,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
map
->
m_flags
&=
~
EXT4_MAP_FLAGS
;
/*
* New blocks allocate and/or writing to un
initialized
extent
* New blocks allocate and/or writing to un
written
extent
* will possibly result in updating i_data, so we take
* the write lock of i_data_sem, and call get_blocks()
* with create == 1 flag.
...
...
@@ -2032,7 +2032,7 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
* Scan buffers corresponding to changed extent (we expect corresponding pages
* to be already locked) and update buffer state according to new extent state.
* We map delalloc buffers to their physical location, clear unwritten bits,
* and mark buffers as uninit when we perform writes to un
initialized
extents
* and mark buffers as uninit when we perform writes to un
written
extents
* and do extent conversion after IO is finished. If the last page is not fully
* mapped, we update @map to the next extent in the last page that needs
* mapping. Otherwise we submit the page for IO.
...
...
@@ -2126,12 +2126,12 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
struct
inode
*
inode
=
mpd
->
inode
;
struct
ext4_map_blocks
*
map
=
&
mpd
->
map
;
int
get_blocks_flags
;
int
err
;
int
err
,
dioread_nolock
;
trace_ext4_da_write_pages_extent
(
inode
,
map
);
/*
* Call ext4_map_blocks() to allocate any delayed allocation blocks, or
* to convert an un
initialized
extent to be initialized (in the case
* to convert an un
written
extent to be initialized (in the case
* where we have written into one or more preallocated blocks). It is
* possible that we're going to need more metadata blocks than
* previously reserved. However we must not fail because we're in
...
...
@@ -2148,7 +2148,8 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
*/
get_blocks_flags
=
EXT4_GET_BLOCKS_CREATE
|
EXT4_GET_BLOCKS_METADATA_NOFAIL
;
if
(
ext4_should_dioread_nolock
(
inode
))
dioread_nolock
=
ext4_should_dioread_nolock
(
inode
);
if
(
dioread_nolock
)
get_blocks_flags
|=
EXT4_GET_BLOCKS_IO_CREATE_EXT
;
if
(
map
->
m_flags
&
(
1
<<
BH_Delay
))
get_blocks_flags
|=
EXT4_GET_BLOCKS_DELALLOC_RESERVE
;
...
...
@@ -2156,7 +2157,7 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
err
=
ext4_map_blocks
(
handle
,
inode
,
map
,
get_blocks_flags
);
if
(
err
<
0
)
return
err
;
if
(
map
->
m_flags
&
EXT4_MAP_UNINIT
)
{
if
(
dioread_nolock
&&
(
map
->
m_flags
&
EXT4_MAP_UNWRITTEN
)
)
{
if
(
!
mpd
->
io_submit
.
io_end
->
handle
&&
ext4_handle_valid
(
handle
))
{
mpd
->
io_submit
.
io_end
->
handle
=
handle
->
h_rsv_handle
;
...
...
@@ -3070,9 +3071,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
* preallocated extents, and those write extend the file, no need to
* fall back to buffered IO.
*
* For holes, we fallocate those blocks, mark them as un
initialized
* For holes, we fallocate those blocks, mark them as un
written
* If those blocks were preallocated, we mark sure they are split, but
* still keep the range to write as un
initialized
.
* still keep the range to write as un
written
.
*
* The unwritten extents will be converted to written when DIO is completed.
* For async direct IO, since the IO may still pending when return, we
...
...
@@ -3123,12 +3124,12 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
* We could direct write to holes and fallocate.
*
* Allocated blocks to fill the hole are marked as
* un
initialized
to prevent parallel buffered read to expose
* un
written
to prevent parallel buffered read to expose
* the stale data before DIO complete the data IO.
*
* As to previously fallocated extents, ext4 get_block will
* just simply mark the buffer mapped but still keep the
* extents un
initialized
.
* extents un
written
.
*
* For non AIO case, we will convert those unwritten extents
* to written after return back from blockdev_direct_IO.
...
...
fs/ext4/move_extent.c
浏览文件 @
a8324754
...
...
@@ -57,8 +57,8 @@ get_ext_path(struct inode *inode, ext4_lblk_t lblock,
static
void
copy_extent_status
(
struct
ext4_extent
*
src
,
struct
ext4_extent
*
dest
)
{
if
(
ext4_ext_is_un
initialized
(
src
))
ext4_ext_mark_un
initialized
(
dest
);
if
(
ext4_ext_is_un
written
(
src
))
ext4_ext_mark_un
written
(
dest
);
else
dest
->
ee_len
=
cpu_to_le16
(
ext4_ext_get_actual_len
(
dest
));
}
...
...
@@ -593,14 +593,14 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext,
* @inode: inode in question
* @from: block offset of inode
* @count: block count to be checked
* @un
init: extents expected to be uninitialized
* @un
written: extents expected to be unwritten
* @err: pointer to save error value
*
* Return 1 if all extents in range has expected type, and zero otherwise.
*/
static
int
mext_check_coverage
(
struct
inode
*
inode
,
ext4_lblk_t
from
,
ext4_lblk_t
count
,
int
uninit
,
int
*
err
)
int
unwritten
,
int
*
err
)
{
struct
ext4_ext_path
*
path
=
NULL
;
struct
ext4_extent
*
ext
;
...
...
@@ -611,7 +611,7 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
if
(
*
err
)
goto
out
;
ext
=
path
[
ext_depth
(
inode
)].
p_ext
;
if
(
un
init
!=
ext4_ext_is_uninitialized
(
ext
))
if
(
un
written
!=
ext4_ext_is_unwritten
(
ext
))
goto
out
;
from
+=
ext4_ext_get_actual_len
(
ext
);
ext4_ext_drop_refs
(
path
);
...
...
@@ -894,7 +894,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
* @orig_page_offset: page index on original file
* @data_offset_in_page: block index where data swapping starts
* @block_len_in_page: the number of blocks to be swapped
* @un
init: orig extent is uninitialized
or not
* @un
written: orig extent is unwritten
or not
* @err: pointer to save return value
*
* Save the data in original inode blocks and replace original inode extents
...
...
@@ -905,7 +905,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
static
int
move_extent_per_page
(
struct
file
*
o_filp
,
struct
inode
*
donor_inode
,
pgoff_t
orig_page_offset
,
int
data_offset_in_page
,
int
block_len_in_page
,
int
un
init
,
int
*
err
)
int
block_len_in_page
,
int
un
written
,
int
*
err
)
{
struct
inode
*
orig_inode
=
file_inode
(
o_filp
);
struct
page
*
pagep
[
2
]
=
{
NULL
,
NULL
};
...
...
@@ -962,27 +962,27 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
if
(
unlikely
(
*
err
<
0
))
goto
stop_journal
;
/*
* If orig extent was un
initialized
it can become initialized
* If orig extent was un
written
it can become initialized
* at any time after i_data_sem was dropped, in order to
* serialize with delalloc we have recheck extent while we
* hold page's lock, if it is still the case data copy is not
* necessary, just swap data blocks between orig and donor.
*/
if
(
un
init
)
{
if
(
un
written
)
{
ext4_double_down_write_data_sem
(
orig_inode
,
donor_inode
);
/* If any of extents in range became initialized we have to
* fallback to data copying */
un
init
=
mext_check_coverage
(
orig_inode
,
orig_blk_offset
,
block_len_in_page
,
1
,
err
);
un
written
=
mext_check_coverage
(
orig_inode
,
orig_blk_offset
,
block_len_in_page
,
1
,
err
);
if
(
*
err
)
goto
drop_data_sem
;
un
init
&=
mext_check_coverage
(
donor_inode
,
orig_blk_offset
,
block_len_in_page
,
1
,
err
);
un
written
&=
mext_check_coverage
(
donor_inode
,
orig_blk_offset
,
block_len_in_page
,
1
,
err
);
if
(
*
err
)
goto
drop_data_sem
;
if
(
!
un
init
)
{
if
(
!
un
written
)
{
ext4_double_up_write_data_sem
(
orig_inode
,
donor_inode
);
goto
data_copy
;
}
...
...
@@ -1259,7 +1259,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
int
blocks_per_page
=
PAGE_CACHE_SIZE
>>
orig_inode
->
i_blkbits
;
int
data_offset_in_page
;
int
block_len_in_page
;
int
un
init
;
int
un
written
;
if
(
orig_inode
->
i_sb
!=
donor_inode
->
i_sb
)
{
ext4_debug
(
"ext4 move extent: The argument files "
...
...
@@ -1391,8 +1391,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
!
last_extent
)
continue
;
/* Is original extent is un
initialized
*/
un
init
=
ext4_ext_is_uninitialized
(
ext_prev
);
/* Is original extent is un
written
*/
un
written
=
ext4_ext_is_unwritten
(
ext_prev
);
data_offset_in_page
=
seq_start
%
blocks_per_page
;
...
...
@@ -1432,8 +1432,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
o_filp
,
donor_inode
,
orig_page_offset
,
data_offset_in_page
,
block_len_in_page
,
uninit
,
&
ret
);
block_len_in_page
,
unwritten
,
&
ret
);
/* Count how many blocks we have exchanged */
*
moved_len
+=
block_len_in_page
;
...
...
fs/ext4/super.c
浏览文件 @
a8324754
...
...
@@ -3337,7 +3337,7 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
* By default we reserve 2% or 4096 clusters, whichever is smaller.
* This should cover the situations where we can not afford to run
* out of space like for example punch hole, or converting
* un
initialized
extents in delalloc path. In most cases such
* un
written
extents in delalloc path. In most cases such
* allocation would require 1, or 2 blocks, higher numbers are
* very rare.
*/
...
...
include/trace/events/ext4.h
浏览文件 @
a8324754
...
...
@@ -36,7 +36,7 @@ struct extent_status;
#define show_map_flags(flags) __print_flags(flags, "|", \
{ EXT4_GET_BLOCKS_CREATE, "CREATE" }, \
{ EXT4_GET_BLOCKS_UN
INIT_EXT, "UNIN
IT" }, \
{ EXT4_GET_BLOCKS_UN
WRIT_EXT, "UNWR
IT" }, \
{ EXT4_GET_BLOCKS_DELALLOC_RESERVE, "DELALLOC" }, \
{ EXT4_GET_BLOCKS_PRE_IO, "PRE_IO" }, \
{ EXT4_GET_BLOCKS_CONVERT, "CONVERT" }, \
...
...
@@ -51,7 +51,6 @@ struct extent_status;
{ EXT4_MAP_MAPPED, "M" }, \
{ EXT4_MAP_UNWRITTEN, "U" }, \
{ EXT4_MAP_BOUNDARY, "B" }, \
{ EXT4_MAP_UNINIT, "u" }, \
{ EXT4_MAP_FROM_CLUSTER, "C" })
#define show_free_flags(flags) __print_flags(flags, "|", \
...
...
@@ -1497,7 +1496,7 @@ DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
TP_ARGS
(
inode
)
);
/* 'ux' is the un
initialized
extent. */
/* 'ux' is the un
written
extent. */
TRACE_EVENT
(
ext4_ext_convert_to_initialized_enter
,
TP_PROTO
(
struct
inode
*
inode
,
struct
ext4_map_blocks
*
map
,
struct
ext4_extent
*
ux
),
...
...
@@ -1533,7 +1532,7 @@ TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
);
/*
* 'ux' is the un
initialized
extent.
* 'ux' is the un
written
extent.
* 'ix' is the initialized extent to which blocks are transferred.
*/
TRACE_EVENT
(
ext4_ext_convert_to_initialized_fastpath
,
...
...
@@ -1811,7 +1810,7 @@ DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
TP_ARGS
(
sb
,
group
,
start
,
len
)
);
TRACE_EVENT
(
ext4_ext_handle_un
initialized
_extents
,
TRACE_EVENT
(
ext4_ext_handle_un
written
_extents
,
TP_PROTO
(
struct
inode
*
inode
,
struct
ext4_map_blocks
*
map
,
int
flags
,
unsigned
int
allocated
,
ext4_fsblk_t
newblock
),
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录