Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
9f5974c8
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
9f5974c8
编写于
1月 12, 2006
作者:
L
Linus Torvalds
浏览文件
操作
浏览文件
下载
差异文件
Merge
git://oss.sgi.com:8090/oss/git/xfs-2.6
上级
a2d823bf
ddae9c2e
变更
58
隐藏空白更改
内联
并排
Showing
58 changed file
with
2719 addition
and
2572 deletion
+2719
-2572
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_aops.c
+590
-498
fs/xfs/linux-2.6/xfs_aops.h
fs/xfs/linux-2.6/xfs_aops.h
+10
-0
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_buf.c
+666
-707
fs/xfs/linux-2.6/xfs_buf.h
fs/xfs/linux-2.6/xfs_buf.h
+286
-410
fs/xfs/linux-2.6/xfs_file.c
fs/xfs/linux-2.6/xfs_file.c
+2
-4
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_ioctl.c
+3
-7
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_iops.c
+81
-40
fs/xfs/linux-2.6/xfs_iops.h
fs/xfs/linux-2.6/xfs_iops.h
+0
-5
fs/xfs/linux-2.6/xfs_linux.h
fs/xfs/linux-2.6/xfs_linux.h
+1
-5
fs/xfs/linux-2.6/xfs_lrw.c
fs/xfs/linux-2.6/xfs_lrw.c
+19
-37
fs/xfs/linux-2.6/xfs_stats.c
fs/xfs/linux-2.6/xfs_stats.c
+1
-1
fs/xfs/linux-2.6/xfs_stats.h
fs/xfs/linux-2.6/xfs_stats.h
+9
-9
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_super.c
+11
-8
fs/xfs/linux-2.6/xfs_vnode.c
fs/xfs/linux-2.6/xfs_vnode.c
+0
-1
fs/xfs/linux-2.6/xfs_vnode.h
fs/xfs/linux-2.6/xfs_vnode.h
+19
-0
fs/xfs/quota/xfs_dquot_item.c
fs/xfs/quota/xfs_dquot_item.c
+2
-2
fs/xfs/quota/xfs_qm.c
fs/xfs/quota/xfs_qm.c
+11
-7
fs/xfs/support/debug.c
fs/xfs/support/debug.c
+25
-35
fs/xfs/support/debug.h
fs/xfs/support/debug.h
+12
-13
fs/xfs/support/uuid.c
fs/xfs/support/uuid.c
+14
-9
fs/xfs/xfs_arch.h
fs/xfs/xfs_arch.h
+19
-3
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_attr_leaf.c
+6
-6
fs/xfs/xfs_attr_leaf.h
fs/xfs/xfs_attr_leaf.h
+43
-36
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.c
+251
-161
fs/xfs/xfs_bmap.h
fs/xfs/xfs_bmap.h
+6
-1
fs/xfs/xfs_clnt.h
fs/xfs/xfs_clnt.h
+1
-1
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_dfrag.c
+4
-12
fs/xfs/xfs_dinode.h
fs/xfs/xfs_dinode.h
+17
-5
fs/xfs/xfs_dir.c
fs/xfs/xfs_dir.c
+1
-1
fs/xfs/xfs_dir.h
fs/xfs/xfs_dir.h
+2
-0
fs/xfs/xfs_dir2.h
fs/xfs/xfs_dir2.h
+0
-3
fs/xfs/xfs_dir_leaf.h
fs/xfs/xfs_dir_leaf.h
+34
-30
fs/xfs/xfs_error.c
fs/xfs/xfs_error.c
+0
-1
fs/xfs/xfs_error.h
fs/xfs/xfs_error.h
+4
-4
fs/xfs/xfs_fs.h
fs/xfs/xfs_fs.h
+6
-4
fs/xfs/xfs_fsops.c
fs/xfs/xfs_fsops.c
+26
-0
fs/xfs/xfs_fsops.h
fs/xfs/xfs_fsops.h
+1
-0
fs/xfs/xfs_iget.c
fs/xfs/xfs_iget.c
+1
-4
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.c
+38
-23
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode.h
+4
-0
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_inode_item.c
+7
-2
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.c
+234
-191
fs/xfs/xfs_itable.c
fs/xfs/xfs_itable.c
+3
-2
fs/xfs/xfs_log.c
fs/xfs/xfs_log.c
+96
-27
fs/xfs/xfs_log.h
fs/xfs/xfs_log.h
+1
-10
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_priv.h
+5
-72
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_log_recover.c
+6
-6
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.c
+2
-3
fs/xfs/xfs_mount.h
fs/xfs/xfs_mount.h
+1
-2
fs/xfs/xfs_rename.c
fs/xfs/xfs_rename.c
+2
-5
fs/xfs/xfs_rw.c
fs/xfs/xfs_rw.c
+4
-5
fs/xfs/xfs_sb.h
fs/xfs/xfs_sb.h
+0
-17
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans.c
+8
-6
fs/xfs/xfs_trans.h
fs/xfs/xfs_trans.h
+0
-1
fs/xfs/xfs_utils.c
fs/xfs/xfs_utils.c
+3
-6
fs/xfs/xfs_vfsops.c
fs/xfs/xfs_vfsops.c
+28
-22
fs/xfs/xfs_vnodeops.c
fs/xfs/xfs_vnodeops.c
+91
-102
mm/swap.c
mm/swap.c
+2
-0
未找到文件。
fs/xfs/linux-2.6/xfs_aops.c
浏览文件 @
9f5974c8
...
@@ -40,11 +40,10 @@
...
@@ -40,11 +40,10 @@
#include "xfs_rw.h"
#include "xfs_rw.h"
#include "xfs_iomap.h"
#include "xfs_iomap.h"
#include <linux/mpage.h>
#include <linux/mpage.h>
#include <linux/pagevec.h>
#include <linux/writeback.h>
#include <linux/writeback.h>
STATIC
void
xfs_count_page_state
(
struct
page
*
,
int
*
,
int
*
,
int
*
);
STATIC
void
xfs_count_page_state
(
struct
page
*
,
int
*
,
int
*
,
int
*
);
STATIC
void
xfs_convert_page
(
struct
inode
*
,
struct
page
*
,
xfs_iomap_t
*
,
struct
writeback_control
*
wbc
,
void
*
,
int
,
int
);
#if defined(XFS_RW_TRACE)
#if defined(XFS_RW_TRACE)
void
void
...
@@ -55,17 +54,15 @@ xfs_page_trace(
...
@@ -55,17 +54,15 @@ xfs_page_trace(
int
mask
)
int
mask
)
{
{
xfs_inode_t
*
ip
;
xfs_inode_t
*
ip
;
bhv_desc_t
*
bdp
;
vnode_t
*
vp
=
LINVFS_GET_VP
(
inode
);
vnode_t
*
vp
=
LINVFS_GET_VP
(
inode
);
loff_t
isize
=
i_size_read
(
inode
);
loff_t
isize
=
i_size_read
(
inode
);
loff_t
offset
=
(
loff_t
)
page
->
index
<<
PAGE_CACHE_SHIFT
;
loff_t
offset
=
page_offset
(
page
)
;
int
delalloc
=
-
1
,
unmapped
=
-
1
,
unwritten
=
-
1
;
int
delalloc
=
-
1
,
unmapped
=
-
1
,
unwritten
=
-
1
;
if
(
page_has_buffers
(
page
))
if
(
page_has_buffers
(
page
))
xfs_count_page_state
(
page
,
&
delalloc
,
&
unmapped
,
&
unwritten
);
xfs_count_page_state
(
page
,
&
delalloc
,
&
unmapped
,
&
unwritten
);
bdp
=
vn_bhv_lookup
(
VN_BHV_HEAD
(
vp
),
&
xfs_vnodeops
);
ip
=
xfs_vtoi
(
vp
);
ip
=
XFS_BHVTOI
(
bdp
);
if
(
!
ip
->
i_rwtrace
)
if
(
!
ip
->
i_rwtrace
)
return
;
return
;
...
@@ -103,15 +100,56 @@ xfs_finish_ioend(
...
@@ -103,15 +100,56 @@ xfs_finish_ioend(
queue_work
(
xfsdatad_workqueue
,
&
ioend
->
io_work
);
queue_work
(
xfsdatad_workqueue
,
&
ioend
->
io_work
);
}
}
/*
* We're now finished for good with this ioend structure.
* Update the page state via the associated buffer_heads,
* release holds on the inode and bio, and finally free
* up memory. Do not use the ioend after this.
*/
STATIC
void
STATIC
void
xfs_destroy_ioend
(
xfs_destroy_ioend
(
xfs_ioend_t
*
ioend
)
xfs_ioend_t
*
ioend
)
{
{
struct
buffer_head
*
bh
,
*
next
;
for
(
bh
=
ioend
->
io_buffer_head
;
bh
;
bh
=
next
)
{
next
=
bh
->
b_private
;
bh
->
b_end_io
(
bh
,
ioend
->
io_uptodate
);
}
vn_iowake
(
ioend
->
io_vnode
);
vn_iowake
(
ioend
->
io_vnode
);
mempool_free
(
ioend
,
xfs_ioend_pool
);
mempool_free
(
ioend
,
xfs_ioend_pool
);
}
}
/*
/*
* Buffered IO write completion for delayed allocate extents.
* TODO: Update ondisk isize now that we know the file data
* has been flushed (i.e. the notorious "NULL file" problem).
*/
STATIC
void
xfs_end_bio_delalloc
(
void
*
data
)
{
xfs_ioend_t
*
ioend
=
data
;
xfs_destroy_ioend
(
ioend
);
}
/*
* Buffered IO write completion for regular, written extents.
*/
STATIC
void
xfs_end_bio_written
(
void
*
data
)
{
xfs_ioend_t
*
ioend
=
data
;
xfs_destroy_ioend
(
ioend
);
}
/*
* IO write completion for unwritten extents.
*
* Issue transactions to convert a buffer range from unwritten
* Issue transactions to convert a buffer range from unwritten
* to written extents.
* to written extents.
*/
*/
...
@@ -123,21 +161,10 @@ xfs_end_bio_unwritten(
...
@@ -123,21 +161,10 @@ xfs_end_bio_unwritten(
vnode_t
*
vp
=
ioend
->
io_vnode
;
vnode_t
*
vp
=
ioend
->
io_vnode
;
xfs_off_t
offset
=
ioend
->
io_offset
;
xfs_off_t
offset
=
ioend
->
io_offset
;
size_t
size
=
ioend
->
io_size
;
size_t
size
=
ioend
->
io_size
;
struct
buffer_head
*
bh
,
*
next
;
int
error
;
int
error
;
if
(
ioend
->
io_uptodate
)
if
(
ioend
->
io_uptodate
)
VOP_BMAP
(
vp
,
offset
,
size
,
BMAPI_UNWRITTEN
,
NULL
,
NULL
,
error
);
VOP_BMAP
(
vp
,
offset
,
size
,
BMAPI_UNWRITTEN
,
NULL
,
NULL
,
error
);
/* ioend->io_buffer_head is only non-NULL for buffered I/O */
for
(
bh
=
ioend
->
io_buffer_head
;
bh
;
bh
=
next
)
{
next
=
bh
->
b_private
;
bh
->
b_end_io
=
NULL
;
clear_buffer_unwritten
(
bh
);
end_buffer_async_write
(
bh
,
ioend
->
io_uptodate
);
}
xfs_destroy_ioend
(
ioend
);
xfs_destroy_ioend
(
ioend
);
}
}
...
@@ -149,7 +176,8 @@ xfs_end_bio_unwritten(
...
@@ -149,7 +176,8 @@ xfs_end_bio_unwritten(
*/
*/
STATIC
xfs_ioend_t
*
STATIC
xfs_ioend_t
*
xfs_alloc_ioend
(
xfs_alloc_ioend
(
struct
inode
*
inode
)
struct
inode
*
inode
,
unsigned
int
type
)
{
{
xfs_ioend_t
*
ioend
;
xfs_ioend_t
*
ioend
;
...
@@ -162,45 +190,25 @@ xfs_alloc_ioend(
...
@@ -162,45 +190,25 @@ xfs_alloc_ioend(
*/
*/
atomic_set
(
&
ioend
->
io_remaining
,
1
);
atomic_set
(
&
ioend
->
io_remaining
,
1
);
ioend
->
io_uptodate
=
1
;
/* cleared if any I/O fails */
ioend
->
io_uptodate
=
1
;
/* cleared if any I/O fails */
ioend
->
io_list
=
NULL
;
ioend
->
io_type
=
type
;
ioend
->
io_vnode
=
LINVFS_GET_VP
(
inode
);
ioend
->
io_vnode
=
LINVFS_GET_VP
(
inode
);
ioend
->
io_buffer_head
=
NULL
;
ioend
->
io_buffer_head
=
NULL
;
ioend
->
io_buffer_tail
=
NULL
;
atomic_inc
(
&
ioend
->
io_vnode
->
v_iocount
);
atomic_inc
(
&
ioend
->
io_vnode
->
v_iocount
);
ioend
->
io_offset
=
0
;
ioend
->
io_offset
=
0
;
ioend
->
io_size
=
0
;
ioend
->
io_size
=
0
;
INIT_WORK
(
&
ioend
->
io_work
,
xfs_end_bio_unwritten
,
ioend
);
if
(
type
==
IOMAP_UNWRITTEN
)
INIT_WORK
(
&
ioend
->
io_work
,
xfs_end_bio_unwritten
,
ioend
);
else
if
(
type
==
IOMAP_DELAY
)
INIT_WORK
(
&
ioend
->
io_work
,
xfs_end_bio_delalloc
,
ioend
);
else
INIT_WORK
(
&
ioend
->
io_work
,
xfs_end_bio_written
,
ioend
);
return
ioend
;
return
ioend
;
}
}
void
linvfs_unwritten_done
(
struct
buffer_head
*
bh
,
int
uptodate
)
{
xfs_ioend_t
*
ioend
=
bh
->
b_private
;
static
spinlock_t
unwritten_done_lock
=
SPIN_LOCK_UNLOCKED
;
unsigned
long
flags
;
ASSERT
(
buffer_unwritten
(
bh
));
bh
->
b_end_io
=
NULL
;
if
(
!
uptodate
)
ioend
->
io_uptodate
=
0
;
/*
* Deep magic here. We reuse b_private in the buffer_heads to build
* a chain for completing the I/O from user context after we've issued
* a transaction to convert the unwritten extent.
*/
spin_lock_irqsave
(
&
unwritten_done_lock
,
flags
);
bh
->
b_private
=
ioend
->
io_buffer_head
;
ioend
->
io_buffer_head
=
bh
;
spin_unlock_irqrestore
(
&
unwritten_done_lock
,
flags
);
xfs_finish_ioend
(
ioend
);
}
STATIC
int
STATIC
int
xfs_map_blocks
(
xfs_map_blocks
(
struct
inode
*
inode
,
struct
inode
*
inode
,
...
@@ -218,138 +226,260 @@ xfs_map_blocks(
...
@@ -218,138 +226,260 @@ xfs_map_blocks(
return
-
error
;
return
-
error
;
}
}
STATIC
inline
int
xfs_iomap_valid
(
xfs_iomap_t
*
iomapp
,
loff_t
offset
)
{
return
offset
>=
iomapp
->
iomap_offset
&&
offset
<
iomapp
->
iomap_offset
+
iomapp
->
iomap_bsize
;
}
/*
/*
* Finds the corresponding mapping in block @map array of the
* BIO completion handler for buffered IO.
* given @offset within a @page.
*/
*/
STATIC
xfs_iomap_t
*
STATIC
int
xfs_offset_to_map
(
xfs_end_bio
(
struct
bio
*
bio
,
unsigned
int
bytes_done
,
int
error
)
{
xfs_ioend_t
*
ioend
=
bio
->
bi_private
;
if
(
bio
->
bi_size
)
return
1
;
ASSERT
(
ioend
);
ASSERT
(
atomic_read
(
&
bio
->
bi_cnt
)
>=
1
);
/* Toss bio and pass work off to an xfsdatad thread */
if
(
!
test_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
))
ioend
->
io_uptodate
=
0
;
bio
->
bi_private
=
NULL
;
bio
->
bi_end_io
=
NULL
;
bio_put
(
bio
);
xfs_finish_ioend
(
ioend
);
return
0
;
}
STATIC
void
xfs_submit_ioend_bio
(
xfs_ioend_t
*
ioend
,
struct
bio
*
bio
)
{
atomic_inc
(
&
ioend
->
io_remaining
);
bio
->
bi_private
=
ioend
;
bio
->
bi_end_io
=
xfs_end_bio
;
submit_bio
(
WRITE
,
bio
);
ASSERT
(
!
bio_flagged
(
bio
,
BIO_EOPNOTSUPP
));
bio_put
(
bio
);
}
STATIC
struct
bio
*
xfs_alloc_ioend_bio
(
struct
buffer_head
*
bh
)
{
struct
bio
*
bio
;
int
nvecs
=
bio_get_nr_vecs
(
bh
->
b_bdev
);
do
{
bio
=
bio_alloc
(
GFP_NOIO
,
nvecs
);
nvecs
>>=
1
;
}
while
(
!
bio
);
ASSERT
(
bio
->
bi_private
==
NULL
);
bio
->
bi_sector
=
bh
->
b_blocknr
*
(
bh
->
b_size
>>
9
);
bio
->
bi_bdev
=
bh
->
b_bdev
;
bio_get
(
bio
);
return
bio
;
}
STATIC
void
xfs_start_buffer_writeback
(
struct
buffer_head
*
bh
)
{
ASSERT
(
buffer_mapped
(
bh
));
ASSERT
(
buffer_locked
(
bh
));
ASSERT
(
!
buffer_delay
(
bh
));
ASSERT
(
!
buffer_unwritten
(
bh
));
mark_buffer_async_write
(
bh
);
set_buffer_uptodate
(
bh
);
clear_buffer_dirty
(
bh
);
}
STATIC
void
xfs_start_page_writeback
(
struct
page
*
page
,
struct
page
*
page
,
xfs_iomap_t
*
iomapp
,
struct
writeback_control
*
wbc
,
unsigned
long
offset
)
int
clear_dirty
,
int
buffers
)
{
ASSERT
(
PageLocked
(
page
));
ASSERT
(
!
PageWriteback
(
page
));
set_page_writeback
(
page
);
if
(
clear_dirty
)
clear_page_dirty
(
page
);
unlock_page
(
page
);
if
(
!
buffers
)
{
end_page_writeback
(
page
);
wbc
->
pages_skipped
++
;
/* We didn't write this page */
}
}
static
inline
int
bio_add_buffer
(
struct
bio
*
bio
,
struct
buffer_head
*
bh
)
{
return
bio_add_page
(
bio
,
bh
->
b_page
,
bh
->
b_size
,
bh_offset
(
bh
));
}
/*
* Submit all of the bios for all of the ioends we have saved up,
* covering the initial writepage page and also any probed pages.
*/
STATIC
void
xfs_submit_ioend
(
xfs_ioend_t
*
ioend
)
{
xfs_ioend_t
*
next
;
struct
buffer_head
*
bh
;
struct
bio
*
bio
;
sector_t
lastblock
=
0
;
do
{
next
=
ioend
->
io_list
;
bio
=
NULL
;
for
(
bh
=
ioend
->
io_buffer_head
;
bh
;
bh
=
bh
->
b_private
)
{
xfs_start_buffer_writeback
(
bh
);
if
(
!
bio
)
{
retry:
bio
=
xfs_alloc_ioend_bio
(
bh
);
}
else
if
(
bh
->
b_blocknr
!=
lastblock
+
1
)
{
xfs_submit_ioend_bio
(
ioend
,
bio
);
goto
retry
;
}
if
(
bio_add_buffer
(
bio
,
bh
)
!=
bh
->
b_size
)
{
xfs_submit_ioend_bio
(
ioend
,
bio
);
goto
retry
;
}
lastblock
=
bh
->
b_blocknr
;
}
if
(
bio
)
xfs_submit_ioend_bio
(
ioend
,
bio
);
xfs_finish_ioend
(
ioend
);
}
while
((
ioend
=
next
)
!=
NULL
);
}
/*
* Cancel submission of all buffer_heads so far in this endio.
* Toss the endio too. Only ever called for the initial page
* in a writepage request, so only ever one page.
*/
STATIC
void
xfs_cancel_ioend
(
xfs_ioend_t
*
ioend
)
{
xfs_ioend_t
*
next
;
struct
buffer_head
*
bh
,
*
next_bh
;
do
{
next
=
ioend
->
io_list
;
bh
=
ioend
->
io_buffer_head
;
do
{
next_bh
=
bh
->
b_private
;
clear_buffer_async_write
(
bh
);
unlock_buffer
(
bh
);
}
while
((
bh
=
next_bh
)
!=
NULL
);
vn_iowake
(
ioend
->
io_vnode
);
mempool_free
(
ioend
,
xfs_ioend_pool
);
}
while
((
ioend
=
next
)
!=
NULL
);
}
/*
* Test to see if we've been building up a completion structure for
* earlier buffers -- if so, we try to append to this ioend if we
* can, otherwise we finish off any current ioend and start another.
* Return true if we've finished the given ioend.
*/
STATIC
void
xfs_add_to_ioend
(
struct
inode
*
inode
,
struct
buffer_head
*
bh
,
xfs_off_t
offset
,
unsigned
int
type
,
xfs_ioend_t
**
result
,
int
need_ioend
)
{
{
loff_t
full_offset
;
/* offset from start of file */
xfs_ioend_t
*
ioend
=
*
result
;
ASSERT
(
offset
<
PAGE_CACHE_SIZE
);
if
(
!
ioend
||
need_ioend
||
type
!=
ioend
->
io_type
)
{
xfs_ioend_t
*
previous
=
*
result
;
full_offset
=
page
->
index
;
/* NB: using 64bit number */
ioend
=
xfs_alloc_ioend
(
inode
,
type
);
full_offset
<<=
PAGE_CACHE_SHIFT
;
/* offset from file start */
ioend
->
io_offset
=
offset
;
full_offset
+=
offset
;
/* offset from page start */
ioend
->
io_buffer_head
=
bh
;
ioend
->
io_buffer_tail
=
bh
;
if
(
previous
)
previous
->
io_list
=
ioend
;
*
result
=
ioend
;
}
else
{
ioend
->
io_buffer_tail
->
b_private
=
bh
;
ioend
->
io_buffer_tail
=
bh
;
}
if
(
full_offset
<
iomapp
->
iomap_offset
)
bh
->
b_private
=
NULL
;
return
NULL
;
ioend
->
io_size
+=
bh
->
b_size
;
if
(
iomapp
->
iomap_offset
+
(
iomapp
->
iomap_bsize
-
1
)
>=
full_offset
)
return
iomapp
;
return
NULL
;
}
}
STATIC
void
STATIC
void
xfs_map_at_offset
(
xfs_map_at_offset
(
struct
page
*
page
,
struct
buffer_head
*
bh
,
struct
buffer_head
*
bh
,
unsigned
long
offset
,
loff_t
offset
,
int
block_bits
,
int
block_bits
,
xfs_iomap_t
*
iomapp
)
xfs_iomap_t
*
iomapp
)
{
{
xfs_daddr_t
bn
;
xfs_daddr_t
bn
;
loff_t
delta
;
int
sector_shift
;
int
sector_shift
;
ASSERT
(
!
(
iomapp
->
iomap_flags
&
IOMAP_HOLE
));
ASSERT
(
!
(
iomapp
->
iomap_flags
&
IOMAP_HOLE
));
ASSERT
(
!
(
iomapp
->
iomap_flags
&
IOMAP_DELAY
));
ASSERT
(
!
(
iomapp
->
iomap_flags
&
IOMAP_DELAY
));
ASSERT
(
iomapp
->
iomap_bn
!=
IOMAP_DADDR_NULL
);
ASSERT
(
iomapp
->
iomap_bn
!=
IOMAP_DADDR_NULL
);
delta
=
page
->
index
;
delta
<<=
PAGE_CACHE_SHIFT
;
delta
+=
offset
;
delta
-=
iomapp
->
iomap_offset
;
delta
>>=
block_bits
;
sector_shift
=
block_bits
-
BBSHIFT
;
sector_shift
=
block_bits
-
BBSHIFT
;
bn
=
iomapp
->
iomap_bn
>>
sector_shift
;
bn
=
(
iomapp
->
iomap_bn
>>
sector_shift
)
+
bn
+=
delta
;
((
offset
-
iomapp
->
iomap_offset
)
>>
block_bits
);
BUG_ON
(
!
bn
&&
!
(
iomapp
->
iomap_flags
&
IOMAP_REALTIME
));
ASSERT
(
bn
||
(
iomapp
->
iomap_flags
&
IOMAP_REALTIME
));
ASSERT
((
bn
<<
sector_shift
)
>=
iomapp
->
iomap_bn
);
ASSERT
((
bn
<<
sector_shift
)
>=
iomapp
->
iomap_bn
);
lock_buffer
(
bh
);
lock_buffer
(
bh
);
bh
->
b_blocknr
=
bn
;
bh
->
b_blocknr
=
bn
;
bh
->
b_bdev
=
iomapp
->
iomap_target
->
pbr
_bdev
;
bh
->
b_bdev
=
iomapp
->
iomap_target
->
bt
_bdev
;
set_buffer_mapped
(
bh
);
set_buffer_mapped
(
bh
);
clear_buffer_delay
(
bh
);
clear_buffer_delay
(
bh
);
clear_buffer_unwritten
(
bh
);
}
}
/*
/*
* Look for a page at index which is unlocked and contains our
* Look for a page at index that is suitable for clustering.
* unwritten extent flagged buffers at its head. Returns page
* locked and with an extra reference count, and length of the
* unwritten extent component on this page that we can write,
* in units of filesystem blocks.
*/
STATIC
struct
page
*
xfs_probe_unwritten_page
(
struct
address_space
*
mapping
,
pgoff_t
index
,
xfs_iomap_t
*
iomapp
,
xfs_ioend_t
*
ioend
,
unsigned
long
max_offset
,
unsigned
long
*
fsbs
,
unsigned
int
bbits
)
{
struct
page
*
page
;
page
=
find_trylock_page
(
mapping
,
index
);
if
(
!
page
)
return
NULL
;
if
(
PageWriteback
(
page
))
goto
out
;
if
(
page
->
mapping
&&
page_has_buffers
(
page
))
{
struct
buffer_head
*
bh
,
*
head
;
unsigned
long
p_offset
=
0
;
*
fsbs
=
0
;
bh
=
head
=
page_buffers
(
page
);
do
{
if
(
!
buffer_unwritten
(
bh
)
||
!
buffer_uptodate
(
bh
))
break
;
if
(
!
xfs_offset_to_map
(
page
,
iomapp
,
p_offset
))
break
;
if
(
p_offset
>=
max_offset
)
break
;
xfs_map_at_offset
(
page
,
bh
,
p_offset
,
bbits
,
iomapp
);
set_buffer_unwritten_io
(
bh
);
bh
->
b_private
=
ioend
;
p_offset
+=
bh
->
b_size
;
(
*
fsbs
)
++
;
}
while
((
bh
=
bh
->
b_this_page
)
!=
head
);
if
(
p_offset
)
return
page
;
}
out:
unlock_page
(
page
);
return
NULL
;
}
/*
* Look for a page at index which is unlocked and not mapped
* yet - clustering for mmap write case.
*/
*/
STATIC
unsigned
int
STATIC
unsigned
int
xfs_probe_
unmapped_
page
(
xfs_probe_page
(
struct
address_space
*
mapping
,
struct
page
*
page
,
pgoff_t
index
,
unsigned
int
pg_offset
,
unsigned
int
pg_offset
)
int
mapped
)
{
{
struct
page
*
page
;
int
ret
=
0
;
int
ret
=
0
;
page
=
find_trylock_page
(
mapping
,
index
);
if
(
!
page
)
return
0
;
if
(
PageWriteback
(
page
))
if
(
PageWriteback
(
page
))
goto
out
;
return
0
;
if
(
page
->
mapping
&&
PageDirty
(
page
))
{
if
(
page
->
mapping
&&
PageDirty
(
page
))
{
if
(
page_has_buffers
(
page
))
{
if
(
page_has_buffers
(
page
))
{
...
@@ -357,79 +487,101 @@ xfs_probe_unmapped_page(
...
@@ -357,79 +487,101 @@ xfs_probe_unmapped_page(
bh
=
head
=
page_buffers
(
page
);
bh
=
head
=
page_buffers
(
page
);
do
{
do
{
if
(
buffer_mapped
(
bh
)
||
!
buffer_uptodate
(
bh
))
if
(
!
buffer_uptodate
(
bh
))
break
;
if
(
mapped
!=
buffer_mapped
(
bh
))
break
;
break
;
ret
+=
bh
->
b_size
;
ret
+=
bh
->
b_size
;
if
(
ret
>=
pg_offset
)
if
(
ret
>=
pg_offset
)
break
;
break
;
}
while
((
bh
=
bh
->
b_this_page
)
!=
head
);
}
while
((
bh
=
bh
->
b_this_page
)
!=
head
);
}
else
}
else
ret
=
PAGE_CACHE_SIZE
;
ret
=
mapped
?
0
:
PAGE_CACHE_SIZE
;
}
}
out:
unlock_page
(
page
);
return
ret
;
return
ret
;
}
}
STATIC
unsigned
in
t
STATIC
size_
t
xfs_probe_
unmapped_
cluster
(
xfs_probe_cluster
(
struct
inode
*
inode
,
struct
inode
*
inode
,
struct
page
*
startpage
,
struct
page
*
startpage
,
struct
buffer_head
*
bh
,
struct
buffer_head
*
bh
,
struct
buffer_head
*
head
)
struct
buffer_head
*
head
,
int
mapped
)
{
{
struct
pagevec
pvec
;
pgoff_t
tindex
,
tlast
,
tloff
;
pgoff_t
tindex
,
tlast
,
tloff
;
unsigned
int
pg_offset
,
len
,
total
=
0
;
size_t
total
=
0
;
struct
address_space
*
mapping
=
inode
->
i_mapping
;
int
done
=
0
,
i
;
/* First sum forwards in this page */
/* First sum forwards in this page */
do
{
do
{
if
(
buffer_mapped
(
bh
))
if
(
mapped
!=
buffer_mapped
(
bh
))
break
;
return
total
;
total
+=
bh
->
b_size
;
total
+=
bh
->
b_size
;
}
while
((
bh
=
bh
->
b_this_page
)
!=
head
);
}
while
((
bh
=
bh
->
b_this_page
)
!=
head
);
/* If we reached the end of the page, sum forwards in
/* if we reached the end of the page, sum forwards in following pages */
* following pages.
tlast
=
i_size_read
(
inode
)
>>
PAGE_CACHE_SHIFT
;
*/
tindex
=
startpage
->
index
+
1
;
if
(
bh
==
head
)
{
tlast
=
i_size_read
(
inode
)
>>
PAGE_CACHE_SHIFT
;
/* Prune this back to avoid pathological behavior */
/* Prune this back to avoid pathological behavior */
tloff
=
min
(
tlast
,
startpage
->
index
+
64
);
tloff
=
min
(
tlast
,
startpage
->
index
+
64
);
for
(
tindex
=
startpage
->
index
+
1
;
tindex
<
tloff
;
tindex
++
)
{
pagevec_init
(
&
pvec
,
0
);
len
=
xfs_probe_unmapped_page
(
mapping
,
tindex
,
while
(
!
done
&&
tindex
<=
tloff
)
{
PAGE_CACHE_SIZE
);
unsigned
len
=
min_t
(
pgoff_t
,
PAGEVEC_SIZE
,
tlast
-
tindex
+
1
);
if
(
!
len
)
return
total
;
if
(
!
pagevec_lookup
(
&
pvec
,
inode
->
i_mapping
,
tindex
,
len
))
break
;
for
(
i
=
0
;
i
<
pagevec_count
(
&
pvec
);
i
++
)
{
struct
page
*
page
=
pvec
.
pages
[
i
];
size_t
pg_offset
,
len
=
0
;
if
(
tindex
==
tlast
)
{
pg_offset
=
i_size_read
(
inode
)
&
(
PAGE_CACHE_SIZE
-
1
);
if
(
!
pg_offset
)
{
done
=
1
;
break
;
}
}
else
pg_offset
=
PAGE_CACHE_SIZE
;
if
(
page
->
index
==
tindex
&&
!
TestSetPageLocked
(
page
))
{
len
=
xfs_probe_page
(
page
,
pg_offset
,
mapped
);
unlock_page
(
page
);
}
if
(
!
len
)
{
done
=
1
;
break
;
}
total
+=
len
;
total
+=
len
;
tindex
++
;
}
}
if
(
tindex
==
tlast
&&
(
pg_offset
=
i_size_read
(
inode
)
&
(
PAGE_CACHE_SIZE
-
1
)))
{
pagevec_release
(
&
pvec
);
total
+=
xfs_probe_unmapped_page
(
mapping
,
cond_resched
();
tindex
,
pg_offset
);
}
}
}
return
total
;
return
total
;
}
}
/*
/*
* Probe for a given page (index) in the inode and test if it is delayed
* Test if a given page is suitable for writing as part of an unwritten
* and without unwritten buffers. Returns page locked and with an extra
* or delayed allocate extent.
* reference count.
*/
*/
STATIC
struct
page
*
STATIC
int
xfs_
probe_delalloc
_page
(
xfs_
is_delayed
_page
(
struct
inode
*
inod
e
,
struct
page
*
pag
e
,
pgoff_t
index
)
unsigned
int
type
)
{
{
struct
page
*
page
;
page
=
find_trylock_page
(
inode
->
i_mapping
,
index
);
if
(
!
page
)
return
NULL
;
if
(
PageWriteback
(
page
))
if
(
PageWriteback
(
page
))
goto
out
;
return
0
;
if
(
page
->
mapping
&&
page_has_buffers
(
page
))
{
if
(
page
->
mapping
&&
page_has_buffers
(
page
))
{
struct
buffer_head
*
bh
,
*
head
;
struct
buffer_head
*
bh
,
*
head
;
...
@@ -437,243 +589,156 @@ xfs_probe_delalloc_page(
...
@@ -437,243 +589,156 @@ xfs_probe_delalloc_page(
bh
=
head
=
page_buffers
(
page
);
bh
=
head
=
page_buffers
(
page
);
do
{
do
{
if
(
buffer_unwritten
(
bh
))
{
if
(
buffer_unwritten
(
bh
))
acceptable
=
0
;
acceptable
=
(
type
==
IOMAP_UNWRITTEN
);
else
if
(
buffer_delay
(
bh
))
acceptable
=
(
type
==
IOMAP_DELAY
);
else
if
(
buffer_mapped
(
bh
))
acceptable
=
(
type
==
0
);
else
break
;
break
;
}
else
if
(
buffer_delay
(
bh
))
{
acceptable
=
1
;
}
}
while
((
bh
=
bh
->
b_this_page
)
!=
head
);
}
while
((
bh
=
bh
->
b_this_page
)
!=
head
);
if
(
acceptable
)
if
(
acceptable
)
return
page
;
return
1
;
}
out:
unlock_page
(
page
);
return
NULL
;
}
STATIC
int
xfs_map_unwritten
(
struct
inode
*
inode
,
struct
page
*
start_page
,
struct
buffer_head
*
head
,
struct
buffer_head
*
curr
,
unsigned
long
p_offset
,
int
block_bits
,
xfs_iomap_t
*
iomapp
,
struct
writeback_control
*
wbc
,
int
startio
,
int
all_bh
)
{
struct
buffer_head
*
bh
=
curr
;
xfs_iomap_t
*
tmp
;
xfs_ioend_t
*
ioend
;
loff_t
offset
;
unsigned
long
nblocks
=
0
;
offset
=
start_page
->
index
;
offset
<<=
PAGE_CACHE_SHIFT
;
offset
+=
p_offset
;
ioend
=
xfs_alloc_ioend
(
inode
);
/* First map forwards in the page consecutive buffers
* covering this unwritten extent
*/
do
{
if
(
!
buffer_unwritten
(
bh
))
break
;
tmp
=
xfs_offset_to_map
(
start_page
,
iomapp
,
p_offset
);
if
(
!
tmp
)
break
;
xfs_map_at_offset
(
start_page
,
bh
,
p_offset
,
block_bits
,
iomapp
);
set_buffer_unwritten_io
(
bh
);
bh
->
b_private
=
ioend
;
p_offset
+=
bh
->
b_size
;
nblocks
++
;
}
while
((
bh
=
bh
->
b_this_page
)
!=
head
);
atomic_add
(
nblocks
,
&
ioend
->
io_remaining
);
/* If we reached the end of the page, map forwards in any
* following pages which are also covered by this extent.
*/
if
(
bh
==
head
)
{
struct
address_space
*
mapping
=
inode
->
i_mapping
;
pgoff_t
tindex
,
tloff
,
tlast
;
unsigned
long
bs
;
unsigned
int
pg_offset
,
bbits
=
inode
->
i_blkbits
;
struct
page
*
page
;
tlast
=
i_size_read
(
inode
)
>>
PAGE_CACHE_SHIFT
;
tloff
=
(
iomapp
->
iomap_offset
+
iomapp
->
iomap_bsize
)
>>
PAGE_CACHE_SHIFT
;
tloff
=
min
(
tlast
,
tloff
);
for
(
tindex
=
start_page
->
index
+
1
;
tindex
<
tloff
;
tindex
++
)
{
page
=
xfs_probe_unwritten_page
(
mapping
,
tindex
,
iomapp
,
ioend
,
PAGE_CACHE_SIZE
,
&
bs
,
bbits
);
if
(
!
page
)
break
;
nblocks
+=
bs
;
atomic_add
(
bs
,
&
ioend
->
io_remaining
);
xfs_convert_page
(
inode
,
page
,
iomapp
,
wbc
,
ioend
,
startio
,
all_bh
);
/* stop if converting the next page might add
* enough blocks that the corresponding byte
* count won't fit in our ulong page buf length */
if
(
nblocks
>=
((
ULONG_MAX
-
PAGE_SIZE
)
>>
block_bits
))
goto
enough
;
}
if
(
tindex
==
tlast
&&
(
pg_offset
=
(
i_size_read
(
inode
)
&
(
PAGE_CACHE_SIZE
-
1
))))
{
page
=
xfs_probe_unwritten_page
(
mapping
,
tindex
,
iomapp
,
ioend
,
pg_offset
,
&
bs
,
bbits
);
if
(
page
)
{
nblocks
+=
bs
;
atomic_add
(
bs
,
&
ioend
->
io_remaining
);
xfs_convert_page
(
inode
,
page
,
iomapp
,
wbc
,
ioend
,
startio
,
all_bh
);
if
(
nblocks
>=
((
ULONG_MAX
-
PAGE_SIZE
)
>>
block_bits
))
goto
enough
;
}
}
}
}
enough:
ioend
->
io_size
=
(
xfs_off_t
)
nblocks
<<
block_bits
;
ioend
->
io_offset
=
offset
;
xfs_finish_ioend
(
ioend
);
return
0
;
return
0
;
}
}
STATIC
void
xfs_submit_page
(
struct
page
*
page
,
struct
writeback_control
*
wbc
,
struct
buffer_head
*
bh_arr
[],
int
bh_count
,
int
probed_page
,
int
clear_dirty
)
{
struct
buffer_head
*
bh
;
int
i
;
BUG_ON
(
PageWriteback
(
page
));
if
(
bh_count
)
set_page_writeback
(
page
);
if
(
clear_dirty
)
clear_page_dirty
(
page
);
unlock_page
(
page
);
if
(
bh_count
)
{
for
(
i
=
0
;
i
<
bh_count
;
i
++
)
{
bh
=
bh_arr
[
i
];
mark_buffer_async_write
(
bh
);
if
(
buffer_unwritten
(
bh
))
set_buffer_unwritten_io
(
bh
);
set_buffer_uptodate
(
bh
);
clear_buffer_dirty
(
bh
);
}
for
(
i
=
0
;
i
<
bh_count
;
i
++
)
submit_bh
(
WRITE
,
bh_arr
[
i
]);
if
(
probed_page
&&
clear_dirty
)
wbc
->
nr_to_write
--
;
/* Wrote an "extra" page */
}
}
/*
/*
* Allocate & map buffers for page given the extent map. Write it out.
* Allocate & map buffers for page given the extent map. Write it out.
* except for the original page of a writepage, this is called on
* except for the original page of a writepage, this is called on
* delalloc/unwritten pages only, for the original page it is possible
* delalloc/unwritten pages only, for the original page it is possible
* that the page has no mapping at all.
* that the page has no mapping at all.
*/
*/
STATIC
void
STATIC
int
xfs_convert_page
(
xfs_convert_page
(
struct
inode
*
inode
,
struct
inode
*
inode
,
struct
page
*
page
,
struct
page
*
page
,
xfs_iomap_t
*
iomapp
,
loff_t
tindex
,
xfs_iomap_t
*
mp
,
xfs_ioend_t
**
ioendp
,
struct
writeback_control
*
wbc
,
struct
writeback_control
*
wbc
,
void
*
private
,
int
startio
,
int
startio
,
int
all_bh
)
int
all_bh
)
{
{
struct
buffer_head
*
bh
_arr
[
MAX_BUF_PER_PAGE
],
*
bh
,
*
head
;
struct
buffer_head
*
bh
,
*
head
;
xfs_
iomap_t
*
mp
=
iomapp
,
*
tmp
;
xfs_
off_t
end_offset
;
unsigned
long
offset
,
end
_offset
;
unsigned
long
p
_offset
;
int
index
=
0
;
unsigned
int
type
;
int
bbits
=
inode
->
i_blkbits
;
int
bbits
=
inode
->
i_blkbits
;
int
len
,
page_dirty
;
int
len
,
page_dirty
;
int
count
=
0
,
done
=
0
,
uptodate
=
1
;
xfs_off_t
offset
=
page_offset
(
page
);
end_offset
=
(
i_size_read
(
inode
)
&
(
PAGE_CACHE_SIZE
-
1
));
if
(
page
->
index
!=
tindex
)
goto
fail
;
if
(
TestSetPageLocked
(
page
))
goto
fail
;
if
(
PageWriteback
(
page
))
goto
fail_unlock_page
;
if
(
page
->
mapping
!=
inode
->
i_mapping
)
goto
fail_unlock_page
;
if
(
!
xfs_is_delayed_page
(
page
,
(
*
ioendp
)
->
io_type
))
goto
fail_unlock_page
;
/*
/*
* page_dirty is initially a count of buffers on the page before
* page_dirty is initially a count of buffers on the page before
* EOF and is decrememted as we move each into a cleanable state.
* EOF and is decrememted as we move each into a cleanable state.
*
* Derivation:
*
* End offset is the highest offset that this page should represent.
* If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
* will evaluate non-zero and be less than PAGE_CACHE_SIZE and
* hence give us the correct page_dirty count. On any other page,
* it will be zero and in that case we need page_dirty to be the
* count of buffers on the page.
*/
*/
end_offset
=
min_t
(
unsigned
long
long
,
(
xfs_off_t
)(
page
->
index
+
1
)
<<
PAGE_CACHE_SHIFT
,
i_size_read
(
inode
));
len
=
1
<<
inode
->
i_blkbits
;
len
=
1
<<
inode
->
i_blkbits
;
end_offset
=
max
(
end_offset
,
PAGE_CACHE_SIZE
);
p_offset
=
min_t
(
unsigned
long
,
end_offset
&
(
PAGE_CACHE_SIZE
-
1
),
end_offset
=
roundup
(
end_offset
,
len
);
PAGE_CACHE_SIZE
);
page_dirty
=
end_offset
/
len
;
p_offset
=
p_offset
?
roundup
(
p_offset
,
len
)
:
PAGE_CACHE_SIZE
;
page_dirty
=
p_offset
/
len
;
offset
=
0
;
bh
=
head
=
page_buffers
(
page
);
bh
=
head
=
page_buffers
(
page
);
do
{
do
{
if
(
offset
>=
end_offset
)
if
(
offset
>=
end_offset
)
break
;
break
;
if
(
!
(
PageUptodate
(
page
)
||
buffer_uptodate
(
bh
)))
if
(
!
buffer_uptodate
(
bh
))
uptodate
=
0
;
if
(
!
(
PageUptodate
(
page
)
||
buffer_uptodate
(
bh
)))
{
done
=
1
;
continue
;
continue
;
if
(
buffer_mapped
(
bh
)
&&
all_bh
&&
}
!
(
buffer_unwritten
(
bh
)
||
buffer_delay
(
bh
)))
{
if
(
buffer_unwritten
(
bh
)
||
buffer_delay
(
bh
))
{
if
(
buffer_unwritten
(
bh
))
type
=
IOMAP_UNWRITTEN
;
else
type
=
IOMAP_DELAY
;
if
(
!
xfs_iomap_valid
(
mp
,
offset
))
{
done
=
1
;
continue
;
}
ASSERT
(
!
(
mp
->
iomap_flags
&
IOMAP_HOLE
));
ASSERT
(
!
(
mp
->
iomap_flags
&
IOMAP_DELAY
));
xfs_map_at_offset
(
bh
,
offset
,
bbits
,
mp
);
if
(
startio
)
{
if
(
startio
)
{
xfs_add_to_ioend
(
inode
,
bh
,
offset
,
type
,
ioendp
,
done
);
}
else
{
set_buffer_dirty
(
bh
);
unlock_buffer
(
bh
);
mark_buffer_dirty
(
bh
);
}
page_dirty
--
;
count
++
;
}
else
{
type
=
0
;
if
(
buffer_mapped
(
bh
)
&&
all_bh
&&
startio
)
{
lock_buffer
(
bh
);
lock_buffer
(
bh
);
bh_arr
[
index
++
]
=
bh
;
xfs_add_to_ioend
(
inode
,
bh
,
offset
,
type
,
ioendp
,
done
);
count
++
;
page_dirty
--
;
page_dirty
--
;
}
else
{
done
=
1
;
}
}
continue
;
}
}
tmp
=
xfs_offset_to_map
(
page
,
mp
,
offset
);
}
while
(
offset
+=
len
,
(
bh
=
bh
->
b_this_page
)
!=
head
);
if
(
!
tmp
)
continue
;
ASSERT
(
!
(
tmp
->
iomap_flags
&
IOMAP_HOLE
));
ASSERT
(
!
(
tmp
->
iomap_flags
&
IOMAP_DELAY
));
/* If this is a new unwritten extent buffer (i.e. one
if
(
uptodate
&&
bh
==
head
)
* that we haven't passed in private data for, we must
SetPageUptodate
(
page
);
* now map this buffer too.
*/
if
(
startio
)
{
if
(
buffer_unwritten
(
bh
)
&&
!
bh
->
b_end_io
)
{
if
(
count
)
{
ASSERT
(
tmp
->
iomap_flags
&
IOMAP_UNWRITTEN
);
struct
backing_dev_info
*
bdi
;
xfs_map_unwritten
(
inode
,
page
,
head
,
bh
,
offset
,
bbits
,
tmp
,
wbc
,
startio
,
all_bh
);
bdi
=
inode
->
i_mapping
->
backing_dev_info
;
}
else
if
(
!
(
buffer_unwritten
(
bh
)
&&
buffer_locked
(
bh
)))
{
if
(
bdi_write_congested
(
bdi
))
{
xfs_map_at_offset
(
page
,
bh
,
offset
,
bbits
,
tmp
);
wbc
->
encountered_congestion
=
1
;
if
(
buffer_unwritten
(
bh
))
{
done
=
1
;
set_buffer_unwritten_io
(
bh
);
}
else
if
(
--
wbc
->
nr_to_write
<=
0
)
{
bh
->
b_private
=
private
;
done
=
1
;
ASSERT
(
private
);
}
}
}
}
if
(
startio
)
{
xfs_start_page_writeback
(
page
,
wbc
,
!
page_dirty
,
count
);
bh_arr
[
index
++
]
=
bh
;
}
else
{
set_buffer_dirty
(
bh
);
unlock_buffer
(
bh
);
mark_buffer_dirty
(
bh
);
}
page_dirty
--
;
}
while
(
offset
+=
len
,
(
bh
=
bh
->
b_this_page
)
!=
head
);
if
(
startio
&&
index
)
{
xfs_submit_page
(
page
,
wbc
,
bh_arr
,
index
,
1
,
!
page_dirty
);
}
else
{
unlock_page
(
page
);
}
}
return
done
;
fail_unlock_page:
unlock_page
(
page
);
fail:
return
1
;
}
}
/*
/*
...
@@ -685,19 +750,31 @@ xfs_cluster_write(
...
@@ -685,19 +750,31 @@ xfs_cluster_write(
struct
inode
*
inode
,
struct
inode
*
inode
,
pgoff_t
tindex
,
pgoff_t
tindex
,
xfs_iomap_t
*
iomapp
,
xfs_iomap_t
*
iomapp
,
xfs_ioend_t
**
ioendp
,
struct
writeback_control
*
wbc
,
struct
writeback_control
*
wbc
,
int
startio
,
int
startio
,
int
all_bh
,
int
all_bh
,
pgoff_t
tlast
)
pgoff_t
tlast
)
{
{
struct
page
*
page
;
struct
pagevec
pvec
;
int
done
=
0
,
i
;
for
(;
tindex
<=
tlast
;
tindex
++
)
{
pagevec_init
(
&
pvec
,
0
);
page
=
xfs_probe_delalloc_page
(
inode
,
tindex
);
while
(
!
done
&&
tindex
<=
tlast
)
{
if
(
!
page
)
unsigned
len
=
min_t
(
pgoff_t
,
PAGEVEC_SIZE
,
tlast
-
tindex
+
1
);
if
(
!
pagevec_lookup
(
&
pvec
,
inode
->
i_mapping
,
tindex
,
len
))
break
;
break
;
xfs_convert_page
(
inode
,
page
,
iomapp
,
wbc
,
NULL
,
startio
,
all_bh
);
for
(
i
=
0
;
i
<
pagevec_count
(
&
pvec
);
i
++
)
{
done
=
xfs_convert_page
(
inode
,
pvec
.
pages
[
i
],
tindex
++
,
iomapp
,
ioendp
,
wbc
,
startio
,
all_bh
);
if
(
done
)
break
;
}
pagevec_release
(
&
pvec
);
cond_resched
();
}
}
}
}
...
@@ -728,18 +805,22 @@ xfs_page_state_convert(
...
@@ -728,18 +805,22 @@ xfs_page_state_convert(
int
startio
,
int
startio
,
int
unmapped
)
/* also implies page uptodate */
int
unmapped
)
/* also implies page uptodate */
{
{
struct
buffer_head
*
bh_arr
[
MAX_BUF_PER_PAGE
],
*
bh
,
*
head
;
struct
buffer_head
*
bh
,
*
head
;
xfs_iomap_t
*
iomp
,
iomap
;
xfs_iomap_t
iomap
;
xfs_ioend_t
*
ioend
=
NULL
,
*
iohead
=
NULL
;
loff_t
offset
;
loff_t
offset
;
unsigned
long
p_offset
=
0
;
unsigned
long
p_offset
=
0
;
unsigned
int
type
;
__uint64_t
end_offset
;
__uint64_t
end_offset
;
pgoff_t
end_index
,
last_index
,
tlast
;
pgoff_t
end_index
,
last_index
,
tlast
;
int
len
,
err
,
i
,
cnt
=
0
,
uptodate
=
1
;
ssize_t
size
,
len
;
int
flags
;
int
flags
,
err
,
iomap_valid
=
0
,
uptodate
=
1
;
int
page_dirty
;
int
page_dirty
,
count
=
0
,
trylock_flag
=
0
;
int
all_bh
=
unmapped
;
/* wait for other IO threads? */
/* wait for other IO threads? */
flags
=
(
startio
&&
wbc
->
sync_mode
!=
WB_SYNC_NONE
)
?
0
:
BMAPI_TRYLOCK
;
if
(
startio
&&
(
wbc
->
sync_mode
==
WB_SYNC_NONE
&&
wbc
->
nonblocking
))
trylock_flag
|=
BMAPI_TRYLOCK
;
/* Is this page beyond the end of the file? */
/* Is this page beyond the end of the file? */
offset
=
i_size_read
(
inode
);
offset
=
i_size_read
(
inode
);
...
@@ -754,161 +835,173 @@ xfs_page_state_convert(
...
@@ -754,161 +835,173 @@ xfs_page_state_convert(
}
}
}
}
end_offset
=
min_t
(
unsigned
long
long
,
(
loff_t
)(
page
->
index
+
1
)
<<
PAGE_CACHE_SHIFT
,
offset
);
offset
=
(
loff_t
)
page
->
index
<<
PAGE_CACHE_SHIFT
;
/*
/*
* page_dirty is initially a count of buffers on the page before
* page_dirty is initially a count of buffers on the page before
* EOF and is decrememted as we move each into a cleanable state.
* EOF and is decrememted as we move each into a cleanable state.
*/
*
* Derivation:
*
* End offset is the highest offset that this page should represent.
* If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
* will evaluate non-zero and be less than PAGE_CACHE_SIZE and
* hence give us the correct page_dirty count. On any other page,
* it will be zero and in that case we need page_dirty to be the
* count of buffers on the page.
*/
end_offset
=
min_t
(
unsigned
long
long
,
(
xfs_off_t
)(
page
->
index
+
1
)
<<
PAGE_CACHE_SHIFT
,
offset
);
len
=
1
<<
inode
->
i_blkbits
;
len
=
1
<<
inode
->
i_blkbits
;
p_offset
=
max
(
p_offset
,
PAGE_CACHE_SIZE
);
p_offset
=
min_t
(
unsigned
long
,
end_offset
&
(
PAGE_CACHE_SIZE
-
1
),
p_offset
=
roundup
(
p_offset
,
len
);
PAGE_CACHE_SIZE
);
p_offset
=
p_offset
?
roundup
(
p_offset
,
len
)
:
PAGE_CACHE_SIZE
;
page_dirty
=
p_offset
/
len
;
page_dirty
=
p_offset
/
len
;
iomp
=
NULL
;
p_offset
=
0
;
bh
=
head
=
page_buffers
(
page
);
bh
=
head
=
page_buffers
(
page
);
offset
=
page_offset
(
page
);
flags
=
-
1
;
type
=
0
;
/* TODO: cleanup count and page_dirty */
do
{
do
{
if
(
offset
>=
end_offset
)
if
(
offset
>=
end_offset
)
break
;
break
;
if
(
!
buffer_uptodate
(
bh
))
if
(
!
buffer_uptodate
(
bh
))
uptodate
=
0
;
uptodate
=
0
;
if
(
!
(
PageUptodate
(
page
)
||
buffer_uptodate
(
bh
))
&&
!
startio
)
if
(
!
(
PageUptodate
(
page
)
||
buffer_uptodate
(
bh
))
&&
!
startio
)
{
/*
* the iomap is actually still valid, but the ioend
* isn't. shouldn't happen too often.
*/
iomap_valid
=
0
;
continue
;
continue
;
if
(
iomp
)
{
iomp
=
xfs_offset_to_map
(
page
,
&
iomap
,
p_offset
);
}
}
if
(
iomap_valid
)
iomap_valid
=
xfs_iomap_valid
(
&
iomap
,
offset
);
/*
/*
* First case, map an unwritten extent and prepare for
* First case, map an unwritten extent and prepare for
* extent state conversion transaction on completion.
* extent state conversion transaction on completion.
*/
*
if
(
buffer_unwritten
(
bh
))
{
* Second case, allocate space for a delalloc buffer.
if
(
!
startio
)
* We can return EAGAIN here in the release page case.
continue
;
*
if
(
!
iomp
)
{
* Third case, an unmapped buffer was found, and we are
err
=
xfs_map_blocks
(
inode
,
offset
,
len
,
&
iomap
,
* in a path where we need to write the whole page out.
BMAPI_WRITE
|
BMAPI_IGNSTATE
);
*/
if
(
err
)
{
if
(
buffer_unwritten
(
bh
)
||
buffer_delay
(
bh
)
||
goto
error
;
((
buffer_uptodate
(
bh
)
||
PageUptodate
(
page
))
&&
}
!
buffer_mapped
(
bh
)
&&
(
unmapped
||
startio
)))
{
iomp
=
xfs_offset_to_map
(
page
,
&
iomap
,
/*
p_offset
);
* Make sure we don't use a read-only iomap
*/
if
(
flags
==
BMAPI_READ
)
iomap_valid
=
0
;
if
(
buffer_unwritten
(
bh
))
{
type
=
IOMAP_UNWRITTEN
;
flags
=
BMAPI_WRITE
|
BMAPI_IGNSTATE
;
}
else
if
(
buffer_delay
(
bh
))
{
type
=
IOMAP_DELAY
;
flags
=
BMAPI_ALLOCATE
;
if
(
!
startio
)
flags
|=
trylock_flag
;
}
else
{
type
=
IOMAP_NEW
;
flags
=
BMAPI_WRITE
|
BMAPI_MMAP
;
}
}
if
(
iomp
)
{
if
(
!
bh
->
b_end_io
)
{
if
(
!
iomap_valid
)
{
err
=
xfs_map_unwritten
(
inode
,
page
,
if
(
type
==
IOMAP_NEW
)
{
head
,
bh
,
p_offset
,
size
=
xfs_probe_cluster
(
inode
,
inode
->
i_blkbits
,
iomp
,
page
,
bh
,
head
,
0
);
wbc
,
startio
,
unmapped
);
if
(
err
)
{
goto
error
;
}
}
else
{
}
else
{
s
et_bit
(
BH_Lock
,
&
bh
->
b_state
)
;
s
ize
=
len
;
}
}
BUG_ON
(
!
buffer_locked
(
bh
));
bh_arr
[
cnt
++
]
=
bh
;
err
=
xfs_map_blocks
(
inode
,
offset
,
size
,
page_dirty
--
;
&
iomap
,
flags
);
}
if
(
err
)
/*
* Second case, allocate space for a delalloc buffer.
* We can return EAGAIN here in the release page case.
*/
}
else
if
(
buffer_delay
(
bh
))
{
if
(
!
iomp
)
{
err
=
xfs_map_blocks
(
inode
,
offset
,
len
,
&
iomap
,
BMAPI_ALLOCATE
|
flags
);
if
(
err
)
{
goto
error
;
goto
error
;
}
iomap_valid
=
xfs_iomap_valid
(
&
iomap
,
offset
);
iomp
=
xfs_offset_to_map
(
page
,
&
iomap
,
p_offset
);
}
}
if
(
iom
p
)
{
if
(
iom
ap_valid
)
{
xfs_map_at_offset
(
page
,
bh
,
p_
offset
,
xfs_map_at_offset
(
bh
,
offset
,
inode
->
i_blkbits
,
iom
p
);
inode
->
i_blkbits
,
&
ioma
p
);
if
(
startio
)
{
if
(
startio
)
{
bh_arr
[
cnt
++
]
=
bh
;
xfs_add_to_ioend
(
inode
,
bh
,
offset
,
type
,
&
ioend
,
!
iomap_valid
);
}
else
{
}
else
{
set_buffer_dirty
(
bh
);
set_buffer_dirty
(
bh
);
unlock_buffer
(
bh
);
unlock_buffer
(
bh
);
mark_buffer_dirty
(
bh
);
mark_buffer_dirty
(
bh
);
}
}
page_dirty
--
;
page_dirty
--
;
count
++
;
}
}
else
if
(
buffer_uptodate
(
bh
)
&&
startio
)
{
/*
* we got here because the buffer is already mapped.
* That means it must already have extents allocated
* underneath it. Map the extent by reading it.
*/
if
(
!
iomap_valid
||
type
!=
0
)
{
flags
=
BMAPI_READ
;
size
=
xfs_probe_cluster
(
inode
,
page
,
bh
,
head
,
1
);
err
=
xfs_map_blocks
(
inode
,
offset
,
size
,
&
iomap
,
flags
);
if
(
err
)
goto
error
;
iomap_valid
=
xfs_iomap_valid
(
&
iomap
,
offset
);
}
}
}
else
if
((
buffer_uptodate
(
bh
)
||
PageUptodate
(
page
))
&&
(
unmapped
||
startio
))
{
if
(
!
buffer_mapped
(
bh
))
{
type
=
0
;
int
size
;
if
(
!
test_and_set_bit
(
BH_Lock
,
&
bh
->
b_state
))
{
ASSERT
(
buffer_mapped
(
bh
));
/*
if
(
iomap_valid
)
* Getting here implies an unmapped buffer
all_bh
=
1
;
* was found, and we are in a path where we
xfs_add_to_ioend
(
inode
,
bh
,
offset
,
type
,
* need to write the whole page out.
&
ioend
,
!
iomap_valid
);
*/
page_dirty
--
;
if
(
!
iomp
)
{
count
++
;
size
=
xfs_probe_unmapped_cluster
(
}
else
{
inode
,
page
,
bh
,
head
);
iomap_valid
=
0
;
err
=
xfs_map_blocks
(
inode
,
offset
,
size
,
&
iomap
,
BMAPI_WRITE
|
BMAPI_MMAP
);
if
(
err
)
{
goto
error
;
}
iomp
=
xfs_offset_to_map
(
page
,
&
iomap
,
p_offset
);
}
if
(
iomp
)
{
xfs_map_at_offset
(
page
,
bh
,
p_offset
,
inode
->
i_blkbits
,
iomp
);
if
(
startio
)
{
bh_arr
[
cnt
++
]
=
bh
;
}
else
{
set_buffer_dirty
(
bh
);
unlock_buffer
(
bh
);
mark_buffer_dirty
(
bh
);
}
page_dirty
--
;
}
}
else
if
(
startio
)
{
if
(
buffer_uptodate
(
bh
)
&&
!
test_and_set_bit
(
BH_Lock
,
&
bh
->
b_state
))
{
bh_arr
[
cnt
++
]
=
bh
;
page_dirty
--
;
}
}
}
}
else
if
((
buffer_uptodate
(
bh
)
||
PageUptodate
(
page
))
&&
(
unmapped
||
startio
))
{
iomap_valid
=
0
;
}
}
}
while
(
offset
+=
len
,
p_offset
+=
len
,
((
bh
=
bh
->
b_this_page
)
!=
head
));
if
(
!
iohead
)
iohead
=
ioend
;
}
while
(
offset
+=
len
,
((
bh
=
bh
->
b_this_page
)
!=
head
));
if
(
uptodate
&&
bh
==
head
)
if
(
uptodate
&&
bh
==
head
)
SetPageUptodate
(
page
);
SetPageUptodate
(
page
);
if
(
startio
)
{
if
(
startio
)
xfs_submit_page
(
page
,
wbc
,
bh_arr
,
cnt
,
0
,
!
page_dirty
);
xfs_start_page_writeback
(
page
,
wbc
,
1
,
count
);
}
if
(
io
mp
)
{
if
(
io
end
&&
iomap_valid
)
{
offset
=
(
iom
p
->
iomap_offset
+
iomp
->
iomap_bsize
-
1
)
>>
offset
=
(
iom
ap
.
iomap_offset
+
iomap
.
iomap_bsize
-
1
)
>>
PAGE_CACHE_SHIFT
;
PAGE_CACHE_SHIFT
;
tlast
=
min_t
(
pgoff_t
,
offset
,
last_index
);
tlast
=
min_t
(
pgoff_t
,
offset
,
last_index
);
xfs_cluster_write
(
inode
,
page
->
index
+
1
,
iomp
,
wbc
,
xfs_cluster_write
(
inode
,
page
->
index
+
1
,
&
iomap
,
&
ioend
,
startio
,
unmapped
,
tlast
);
wbc
,
startio
,
all_bh
,
tlast
);
}
}
if
(
iohead
)
xfs_submit_ioend
(
iohead
);
return
page_dirty
;
return
page_dirty
;
error:
error:
for
(
i
=
0
;
i
<
cnt
;
i
++
)
{
if
(
iohead
)
unlock_buffer
(
bh_arr
[
i
]);
xfs_cancel_ioend
(
iohead
);
}
/*
/*
* If it's delalloc and we have nowhere to put it,
* If it's delalloc and we have nowhere to put it,
...
@@ -916,9 +1009,8 @@ xfs_page_state_convert(
...
@@ -916,9 +1009,8 @@ xfs_page_state_convert(
* us to try again.
* us to try again.
*/
*/
if
(
err
!=
-
EAGAIN
)
{
if
(
err
!=
-
EAGAIN
)
{
if
(
!
unmapped
)
{
if
(
!
unmapped
)
block_invalidatepage
(
page
,
0
);
block_invalidatepage
(
page
,
0
);
}
ClearPageUptodate
(
page
);
ClearPageUptodate
(
page
);
}
}
return
err
;
return
err
;
...
@@ -982,7 +1074,7 @@ __linvfs_get_block(
...
@@ -982,7 +1074,7 @@ __linvfs_get_block(
}
}
/* If this is a realtime file, data might be on a new device */
/* If this is a realtime file, data might be on a new device */
bh_result
->
b_bdev
=
iomap
.
iomap_target
->
pbr
_bdev
;
bh_result
->
b_bdev
=
iomap
.
iomap_target
->
bt
_bdev
;
/* If we previously allocated a block out beyond eof and
/* If we previously allocated a block out beyond eof and
* we are now coming back to use it then we will need to
* we are now coming back to use it then we will need to
...
@@ -1094,10 +1186,10 @@ linvfs_direct_IO(
...
@@ -1094,10 +1186,10 @@ linvfs_direct_IO(
if
(
error
)
if
(
error
)
return
-
error
;
return
-
error
;
iocb
->
private
=
xfs_alloc_ioend
(
inode
);
iocb
->
private
=
xfs_alloc_ioend
(
inode
,
IOMAP_UNWRITTEN
);
ret
=
blockdev_direct_IO_own_locking
(
rw
,
iocb
,
inode
,
ret
=
blockdev_direct_IO_own_locking
(
rw
,
iocb
,
inode
,
iomap
.
iomap_target
->
pbr
_bdev
,
iomap
.
iomap_target
->
bt
_bdev
,
iov
,
offset
,
nr_segs
,
iov
,
offset
,
nr_segs
,
linvfs_get_blocks_direct
,
linvfs_get_blocks_direct
,
linvfs_end_io_direct
);
linvfs_end_io_direct
);
...
...
fs/xfs/linux-2.6/xfs_aops.h
浏览文件 @
9f5974c8
...
@@ -23,14 +23,24 @@ extern mempool_t *xfs_ioend_pool;
...
@@ -23,14 +23,24 @@ extern mempool_t *xfs_ioend_pool;
typedef
void
(
*
xfs_ioend_func_t
)(
void
*
);
typedef
void
(
*
xfs_ioend_func_t
)(
void
*
);
/*
* xfs_ioend struct manages large extent writes for XFS.
* It can manage several multi-page bio's at once.
*/
typedef
struct
xfs_ioend
{
typedef
struct
xfs_ioend
{
struct
xfs_ioend
*
io_list
;
/* next ioend in chain */
unsigned
int
io_type
;
/* delalloc / unwritten */
unsigned
int
io_uptodate
;
/* I/O status register */
unsigned
int
io_uptodate
;
/* I/O status register */
atomic_t
io_remaining
;
/* hold count */
atomic_t
io_remaining
;
/* hold count */
struct
vnode
*
io_vnode
;
/* file being written to */
struct
vnode
*
io_vnode
;
/* file being written to */
struct
buffer_head
*
io_buffer_head
;
/* buffer linked list head */
struct
buffer_head
*
io_buffer_head
;
/* buffer linked list head */
struct
buffer_head
*
io_buffer_tail
;
/* buffer linked list tail */
size_t
io_size
;
/* size of the extent */
size_t
io_size
;
/* size of the extent */
xfs_off_t
io_offset
;
/* offset in the file */
xfs_off_t
io_offset
;
/* offset in the file */
struct
work_struct
io_work
;
/* xfsdatad work queue */
struct
work_struct
io_work
;
/* xfsdatad work queue */
}
xfs_ioend_t
;
}
xfs_ioend_t
;
extern
struct
address_space_operations
linvfs_aops
;
extern
int
linvfs_get_block
(
struct
inode
*
,
sector_t
,
struct
buffer_head
*
,
int
);
#endif
/* __XFS_IOPS_H__ */
#endif
/* __XFS_IOPS_H__ */
fs/xfs/linux-2.6/xfs_buf.c
浏览文件 @
9f5974c8
...
@@ -31,76 +31,77 @@
...
@@ -31,76 +31,77 @@
#include <linux/kthread.h>
#include <linux/kthread.h>
#include "xfs_linux.h"
#include "xfs_linux.h"
STATIC
kmem_cache_t
*
pagebuf_zone
;
STATIC
kmem_zone_t
*
xfs_buf_zone
;
STATIC
kmem_shaker_t
pagebuf_shake
;
STATIC
kmem_shaker_t
xfs_buf_shake
;
STATIC
int
xfsbufd
(
void
*
);
STATIC
int
xfsbufd_wakeup
(
int
,
gfp_t
);
STATIC
int
xfsbufd_wakeup
(
int
,
gfp_t
);
STATIC
void
page
buf_delwri_queue
(
xfs_buf_t
*
,
int
);
STATIC
void
xfs_
buf_delwri_queue
(
xfs_buf_t
*
,
int
);
STATIC
struct
workqueue_struct
*
xfslogd_workqueue
;
STATIC
struct
workqueue_struct
*
xfslogd_workqueue
;
struct
workqueue_struct
*
xfsdatad_workqueue
;
struct
workqueue_struct
*
xfsdatad_workqueue
;
#ifdef
PAGE
BUF_TRACE
#ifdef
XFS_
BUF_TRACE
void
void
page
buf_trace
(
xfs_
buf_trace
(
xfs_buf_t
*
pb
,
xfs_buf_t
*
bp
,
char
*
id
,
char
*
id
,
void
*
data
,
void
*
data
,
void
*
ra
)
void
*
ra
)
{
{
ktrace_enter
(
page
buf_trace_buf
,
ktrace_enter
(
xfs_
buf_trace_buf
,
pb
,
id
,
bp
,
id
,
(
void
*
)(
unsigned
long
)
pb
->
p
b_flags
,
(
void
*
)(
unsigned
long
)
bp
->
b_flags
,
(
void
*
)(
unsigned
long
)
pb
->
p
b_hold
.
counter
,
(
void
*
)(
unsigned
long
)
bp
->
b_hold
.
counter
,
(
void
*
)(
unsigned
long
)
pb
->
p
b_sema
.
count
.
counter
,
(
void
*
)(
unsigned
long
)
bp
->
b_sema
.
count
.
counter
,
(
void
*
)
current
,
(
void
*
)
current
,
data
,
ra
,
data
,
ra
,
(
void
*
)(
unsigned
long
)((
pb
->
p
b_file_offset
>>
32
)
&
0xffffffff
),
(
void
*
)(
unsigned
long
)((
bp
->
b_file_offset
>>
32
)
&
0xffffffff
),
(
void
*
)(
unsigned
long
)(
pb
->
p
b_file_offset
&
0xffffffff
),
(
void
*
)(
unsigned
long
)(
bp
->
b_file_offset
&
0xffffffff
),
(
void
*
)(
unsigned
long
)
pb
->
p
b_buffer_length
,
(
void
*
)(
unsigned
long
)
bp
->
b_buffer_length
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
);
NULL
,
NULL
,
NULL
,
NULL
,
NULL
);
}
}
ktrace_t
*
page
buf_trace_buf
;
ktrace_t
*
xfs_
buf_trace_buf
;
#define
PAGE
BUF_TRACE_SIZE 4096
#define
XFS_
BUF_TRACE_SIZE 4096
#define
PB_TRACE(pb
, id, data) \
#define
XB_TRACE(bp
, id, data) \
pagebuf_trace(pb
, id, (void *)data, (void *)__builtin_return_address(0))
xfs_buf_trace(bp
, id, (void *)data, (void *)__builtin_return_address(0))
#else
#else
#define
PB_TRACE(pb
, id, data) do { } while (0)
#define
XB_TRACE(bp
, id, data) do { } while (0)
#endif
#endif
#ifdef
PAGE
BUF_LOCK_TRACKING
#ifdef
XFS_
BUF_LOCK_TRACKING
# define
PB_SET_OWNER(pb) ((pb)->p
b_last_holder = current->pid)
# define
XB_SET_OWNER(bp) ((bp)->
b_last_holder = current->pid)
# define
PB_CLEAR_OWNER(pb) ((pb)->p
b_last_holder = -1)
# define
XB_CLEAR_OWNER(bp) ((bp)->
b_last_holder = -1)
# define
PB_GET_OWNER(pb) ((pb)->p
b_last_holder)
# define
XB_GET_OWNER(bp) ((bp)->
b_last_holder)
#else
#else
# define
PB_SET_OWNER(pb
) do { } while (0)
# define
XB_SET_OWNER(bp
) do { } while (0)
# define
PB_CLEAR_OWNER(pb
) do { } while (0)
# define
XB_CLEAR_OWNER(bp
) do { } while (0)
# define
PB_GET_OWNER(pb
) do { } while (0)
# define
XB_GET_OWNER(bp
) do { } while (0)
#endif
#endif
#define
p
b_to_gfp(flags) \
#define
x
b_to_gfp(flags) \
((((flags) &
P
BF_READ_AHEAD) ? __GFP_NORETRY : \
((((flags) &
X
BF_READ_AHEAD) ? __GFP_NORETRY : \
((flags) &
P
BF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
((flags) &
X
BF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
#define
p
b_to_km(flags) \
#define
x
b_to_km(flags) \
(((flags) &
P
BF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
(((flags) &
X
BF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
#define
page
buf_allocate(flags) \
#define
xfs_
buf_allocate(flags) \
kmem_zone_alloc(
pagebuf_zone, p
b_to_km(flags))
kmem_zone_alloc(
xfs_buf_zone, x
b_to_km(flags))
#define
pagebuf_deallocate(pb
) \
#define
xfs_buf_deallocate(bp
) \
kmem_zone_free(
pagebuf_zone, (pb
));
kmem_zone_free(
xfs_buf_zone, (bp
));
/*
/*
*
Page Region interfaces.
*
Page Region interfaces.
*
*
*
For pages in filesystems where the blocksize is smaller than the
*
For pages in filesystems where the blocksize is smaller than the
*
pagesize, we use the page->private field (long) to hold a bitmap
*
pagesize, we use the page->private field (long) to hold a bitmap
* of uptodate regions within the page.
*
of uptodate regions within the page.
*
*
*
Each such region is "bytes per page / bits per long" bytes long.
*
Each such region is "bytes per page / bits per long" bytes long.
*
*
*
NBPPR == number-of-bytes-per-page-region
*
NBPPR == number-of-bytes-per-page-region
*
BTOPR == bytes-to-page-region (rounded up)
*
BTOPR == bytes-to-page-region (rounded up)
*
BTOPRT == bytes-to-page-region-truncated (rounded down)
*
BTOPRT == bytes-to-page-region-truncated (rounded down)
*/
*/
#if (BITS_PER_LONG == 32)
#if (BITS_PER_LONG == 32)
#define PRSHIFT (PAGE_CACHE_SHIFT - 5)
/* (32 == 1<<5) */
#define PRSHIFT (PAGE_CACHE_SHIFT - 5)
/* (32 == 1<<5) */
...
@@ -159,7 +160,7 @@ test_page_region(
...
@@ -159,7 +160,7 @@ test_page_region(
}
}
/*
/*
*
Mapping of multi-page buffers into contiguous virtual space
*
Mapping of multi-page buffers into contiguous virtual space
*/
*/
typedef
struct
a_list
{
typedef
struct
a_list
{
...
@@ -172,7 +173,7 @@ STATIC int as_list_len;
...
@@ -172,7 +173,7 @@ STATIC int as_list_len;
STATIC
DEFINE_SPINLOCK
(
as_lock
);
STATIC
DEFINE_SPINLOCK
(
as_lock
);
/*
/*
*
Try to batch vunmaps because they are costly.
*
Try to batch vunmaps because they are costly.
*/
*/
STATIC
void
STATIC
void
free_address
(
free_address
(
...
@@ -215,83 +216,83 @@ purge_addresses(void)
...
@@ -215,83 +216,83 @@ purge_addresses(void)
}
}
/*
/*
* Internal
pagebuf
object manipulation
* Internal
xfs_buf_t
object manipulation
*/
*/
STATIC
void
STATIC
void
_
page
buf_initialize
(
_
xfs_
buf_initialize
(
xfs_buf_t
*
pb
,
xfs_buf_t
*
bp
,
xfs_buftarg_t
*
target
,
xfs_buftarg_t
*
target
,
loff_t
range_base
,
xfs_off_t
range_base
,
size_t
range_length
,
size_t
range_length
,
page_buf_flags_t
flags
)
xfs_buf_flags_t
flags
)
{
{
/*
/*
* We don't want certain flags to appear in
pb->p
b_flags.
* We don't want certain flags to appear in b_flags.
*/
*/
flags
&=
~
(
PBF_LOCK
|
PBF_MAPPED
|
PBF_DONT_BLOCK
|
P
BF_READ_AHEAD
);
flags
&=
~
(
XBF_LOCK
|
XBF_MAPPED
|
XBF_DONT_BLOCK
|
X
BF_READ_AHEAD
);
memset
(
pb
,
0
,
sizeof
(
xfs_buf_t
));
memset
(
bp
,
0
,
sizeof
(
xfs_buf_t
));
atomic_set
(
&
pb
->
p
b_hold
,
1
);
atomic_set
(
&
bp
->
b_hold
,
1
);
init_MUTEX_LOCKED
(
&
pb
->
p
b_iodonesema
);
init_MUTEX_LOCKED
(
&
bp
->
b_iodonesema
);
INIT_LIST_HEAD
(
&
pb
->
p
b_list
);
INIT_LIST_HEAD
(
&
bp
->
b_list
);
INIT_LIST_HEAD
(
&
pb
->
p
b_hash_list
);
INIT_LIST_HEAD
(
&
bp
->
b_hash_list
);
init_MUTEX_LOCKED
(
&
pb
->
p
b_sema
);
/* held, no waiters */
init_MUTEX_LOCKED
(
&
bp
->
b_sema
);
/* held, no waiters */
PB_SET_OWNER
(
pb
);
XB_SET_OWNER
(
bp
);
pb
->
p
b_target
=
target
;
bp
->
b_target
=
target
;
pb
->
p
b_file_offset
=
range_base
;
bp
->
b_file_offset
=
range_base
;
/*
/*
* Set buffer_length and count_desired to the same value initially.
* Set buffer_length and count_desired to the same value initially.
* I/O routines should use count_desired, which will be the same in
* I/O routines should use count_desired, which will be the same in
* most cases but may be reset (e.g. XFS recovery).
* most cases but may be reset (e.g. XFS recovery).
*/
*/
pb
->
pb_buffer_length
=
pb
->
p
b_count_desired
=
range_length
;
bp
->
b_buffer_length
=
bp
->
b_count_desired
=
range_length
;
pb
->
p
b_flags
=
flags
;
bp
->
b_flags
=
flags
;
pb
->
p
b_bn
=
XFS_BUF_DADDR_NULL
;
bp
->
b_bn
=
XFS_BUF_DADDR_NULL
;
atomic_set
(
&
pb
->
p
b_pin_count
,
0
);
atomic_set
(
&
bp
->
b_pin_count
,
0
);
init_waitqueue_head
(
&
pb
->
p
b_waiters
);
init_waitqueue_head
(
&
bp
->
b_waiters
);
XFS_STATS_INC
(
p
b_create
);
XFS_STATS_INC
(
x
b_create
);
PB_TRACE
(
pb
,
"initialize"
,
target
);
XB_TRACE
(
bp
,
"initialize"
,
target
);
}
}
/*
/*
*
Allocate a page array capable of holding a specified number
*
Allocate a page array capable of holding a specified number
*
of pages, and point the page buf at it.
*
of pages, and point the page buf at it.
*/
*/
STATIC
int
STATIC
int
_
page
buf_get_pages
(
_
xfs_
buf_get_pages
(
xfs_buf_t
*
pb
,
xfs_buf_t
*
bp
,
int
page_count
,
int
page_count
,
page_buf_flags_t
flags
)
xfs_buf_flags_t
flags
)
{
{
/* Make sure that we have a page list */
/* Make sure that we have a page list */
if
(
pb
->
p
b_pages
==
NULL
)
{
if
(
bp
->
b_pages
==
NULL
)
{
pb
->
pb_offset
=
page_buf_poff
(
pb
->
p
b_file_offset
);
bp
->
b_offset
=
xfs_buf_poff
(
bp
->
b_file_offset
);
pb
->
p
b_page_count
=
page_count
;
bp
->
b_page_count
=
page_count
;
if
(
page_count
<=
P
B_PAGES
)
{
if
(
page_count
<=
X
B_PAGES
)
{
pb
->
pb_pages
=
pb
->
p
b_page_array
;
bp
->
b_pages
=
bp
->
b_page_array
;
}
else
{
}
else
{
pb
->
p
b_pages
=
kmem_alloc
(
sizeof
(
struct
page
*
)
*
bp
->
b_pages
=
kmem_alloc
(
sizeof
(
struct
page
*
)
*
page_count
,
p
b_to_km
(
flags
));
page_count
,
x
b_to_km
(
flags
));
if
(
pb
->
p
b_pages
==
NULL
)
if
(
bp
->
b_pages
==
NULL
)
return
-
ENOMEM
;
return
-
ENOMEM
;
}
}
memset
(
pb
->
p
b_pages
,
0
,
sizeof
(
struct
page
*
)
*
page_count
);
memset
(
bp
->
b_pages
,
0
,
sizeof
(
struct
page
*
)
*
page_count
);
}
}
return
0
;
return
0
;
}
}
/*
/*
* Frees
pb_pages if it was malloc
ed.
* Frees
b_pages if it was allocat
ed.
*/
*/
STATIC
void
STATIC
void
_
page
buf_free_pages
(
_
xfs_
buf_free_pages
(
xfs_buf_t
*
bp
)
xfs_buf_t
*
bp
)
{
{
if
(
bp
->
pb_pages
!=
bp
->
p
b_page_array
)
{
if
(
bp
->
b_pages
!=
bp
->
b_page_array
)
{
kmem_free
(
bp
->
p
b_pages
,
kmem_free
(
bp
->
b_pages
,
bp
->
p
b_page_count
*
sizeof
(
struct
page
*
));
bp
->
b_page_count
*
sizeof
(
struct
page
*
));
}
}
}
}
...
@@ -299,79 +300,79 @@ _pagebuf_free_pages(
...
@@ -299,79 +300,79 @@ _pagebuf_free_pages(
* Releases the specified buffer.
* Releases the specified buffer.
*
*
* The modification state of any associated pages is left unchanged.
* The modification state of any associated pages is left unchanged.
* The buffer most not be on any hash - use
page
buf_rele instead for
* The buffer most not be on any hash - use
xfs_
buf_rele instead for
* hashed and refcounted buffers
* hashed and refcounted buffers
*/
*/
void
void
page
buf_free
(
xfs_
buf_free
(
xfs_buf_t
*
bp
)
xfs_buf_t
*
bp
)
{
{
P
B_TRACE
(
bp
,
"free"
,
0
);
X
B_TRACE
(
bp
,
"free"
,
0
);
ASSERT
(
list_empty
(
&
bp
->
p
b_hash_list
));
ASSERT
(
list_empty
(
&
bp
->
b_hash_list
));
if
(
bp
->
pb_flags
&
_P
BF_PAGE_CACHE
)
{
if
(
bp
->
b_flags
&
_X
BF_PAGE_CACHE
)
{
uint
i
;
uint
i
;
if
((
bp
->
pb_flags
&
PBF_MAPPED
)
&&
(
bp
->
p
b_page_count
>
1
))
if
((
bp
->
b_flags
&
XBF_MAPPED
)
&&
(
bp
->
b_page_count
>
1
))
free_address
(
bp
->
pb_addr
-
bp
->
p
b_offset
);
free_address
(
bp
->
b_addr
-
bp
->
b_offset
);
for
(
i
=
0
;
i
<
bp
->
p
b_page_count
;
i
++
)
for
(
i
=
0
;
i
<
bp
->
b_page_count
;
i
++
)
page_cache_release
(
bp
->
p
b_pages
[
i
]);
page_cache_release
(
bp
->
b_pages
[
i
]);
_
page
buf_free_pages
(
bp
);
_
xfs_
buf_free_pages
(
bp
);
}
else
if
(
bp
->
pb_flags
&
_P
BF_KMEM_ALLOC
)
{
}
else
if
(
bp
->
b_flags
&
_X
BF_KMEM_ALLOC
)
{
/*
/*
* XXX(hch): bp->
p
b_count_desired might be incorrect (see
* XXX(hch): bp->b_count_desired might be incorrect (see
*
page
buf_associate_memory for details), but fortunately
*
xfs_
buf_associate_memory for details), but fortunately
* the Linux version of kmem_free ignores the len argument..
* the Linux version of kmem_free ignores the len argument..
*/
*/
kmem_free
(
bp
->
pb_addr
,
bp
->
p
b_count_desired
);
kmem_free
(
bp
->
b_addr
,
bp
->
b_count_desired
);
_
page
buf_free_pages
(
bp
);
_
xfs_
buf_free_pages
(
bp
);
}
}
page
buf_deallocate
(
bp
);
xfs_
buf_deallocate
(
bp
);
}
}
/*
/*
* Finds all pages for buffer in question and builds it's page list.
* Finds all pages for buffer in question and builds it's page list.
*/
*/
STATIC
int
STATIC
int
_
page
buf_lookup_pages
(
_
xfs_
buf_lookup_pages
(
xfs_buf_t
*
bp
,
xfs_buf_t
*
bp
,
uint
flags
)
uint
flags
)
{
{
struct
address_space
*
mapping
=
bp
->
pb_target
->
pbr
_mapping
;
struct
address_space
*
mapping
=
bp
->
b_target
->
bt
_mapping
;
size_t
blocksize
=
bp
->
pb_target
->
pbr
_bsize
;
size_t
blocksize
=
bp
->
b_target
->
bt
_bsize
;
size_t
size
=
bp
->
p
b_count_desired
;
size_t
size
=
bp
->
b_count_desired
;
size_t
nbytes
,
offset
;
size_t
nbytes
,
offset
;
gfp_t
gfp_mask
=
p
b_to_gfp
(
flags
);
gfp_t
gfp_mask
=
x
b_to_gfp
(
flags
);
unsigned
short
page_count
,
i
;
unsigned
short
page_count
,
i
;
pgoff_t
first
;
pgoff_t
first
;
loff_t
end
;
xfs_off_t
end
;
int
error
;
int
error
;
end
=
bp
->
pb_file_offset
+
bp
->
p
b_buffer_length
;
end
=
bp
->
b_file_offset
+
bp
->
b_buffer_length
;
page_count
=
page_buf_btoc
(
end
)
-
page_buf_btoct
(
bp
->
p
b_file_offset
);
page_count
=
xfs_buf_btoc
(
end
)
-
xfs_buf_btoct
(
bp
->
b_file_offset
);
error
=
_
page
buf_get_pages
(
bp
,
page_count
,
flags
);
error
=
_
xfs_
buf_get_pages
(
bp
,
page_count
,
flags
);
if
(
unlikely
(
error
))
if
(
unlikely
(
error
))
return
error
;
return
error
;
bp
->
pb_flags
|=
_P
BF_PAGE_CACHE
;
bp
->
b_flags
|=
_X
BF_PAGE_CACHE
;
offset
=
bp
->
p
b_offset
;
offset
=
bp
->
b_offset
;
first
=
bp
->
p
b_file_offset
>>
PAGE_CACHE_SHIFT
;
first
=
bp
->
b_file_offset
>>
PAGE_CACHE_SHIFT
;
for
(
i
=
0
;
i
<
bp
->
p
b_page_count
;
i
++
)
{
for
(
i
=
0
;
i
<
bp
->
b_page_count
;
i
++
)
{
struct
page
*
page
;
struct
page
*
page
;
uint
retries
=
0
;
uint
retries
=
0
;
retry:
retry:
page
=
find_or_create_page
(
mapping
,
first
+
i
,
gfp_mask
);
page
=
find_or_create_page
(
mapping
,
first
+
i
,
gfp_mask
);
if
(
unlikely
(
page
==
NULL
))
{
if
(
unlikely
(
page
==
NULL
))
{
if
(
flags
&
P
BF_READ_AHEAD
)
{
if
(
flags
&
X
BF_READ_AHEAD
)
{
bp
->
p
b_page_count
=
i
;
bp
->
b_page_count
=
i
;
for
(
i
=
0
;
i
<
bp
->
p
b_page_count
;
i
++
)
for
(
i
=
0
;
i
<
bp
->
b_page_count
;
i
++
)
unlock_page
(
bp
->
p
b_pages
[
i
]);
unlock_page
(
bp
->
b_pages
[
i
]);
return
-
ENOMEM
;
return
-
ENOMEM
;
}
}
...
@@ -387,13 +388,13 @@ _pagebuf_lookup_pages(
...
@@ -387,13 +388,13 @@ _pagebuf_lookup_pages(
"deadlock in %s (mode:0x%x)
\n
"
,
"deadlock in %s (mode:0x%x)
\n
"
,
__FUNCTION__
,
gfp_mask
);
__FUNCTION__
,
gfp_mask
);
XFS_STATS_INC
(
p
b_page_retries
);
XFS_STATS_INC
(
x
b_page_retries
);
xfsbufd_wakeup
(
0
,
gfp_mask
);
xfsbufd_wakeup
(
0
,
gfp_mask
);
blk_congestion_wait
(
WRITE
,
HZ
/
50
);
blk_congestion_wait
(
WRITE
,
HZ
/
50
);
goto
retry
;
goto
retry
;
}
}
XFS_STATS_INC
(
p
b_page_found
);
XFS_STATS_INC
(
x
b_page_found
);
nbytes
=
min_t
(
size_t
,
size
,
PAGE_CACHE_SIZE
-
offset
);
nbytes
=
min_t
(
size_t
,
size
,
PAGE_CACHE_SIZE
-
offset
);
size
-=
nbytes
;
size
-=
nbytes
;
...
@@ -401,27 +402,27 @@ _pagebuf_lookup_pages(
...
@@ -401,27 +402,27 @@ _pagebuf_lookup_pages(
if
(
!
PageUptodate
(
page
))
{
if
(
!
PageUptodate
(
page
))
{
page_count
--
;
page_count
--
;
if
(
blocksize
>=
PAGE_CACHE_SIZE
)
{
if
(
blocksize
>=
PAGE_CACHE_SIZE
)
{
if
(
flags
&
P
BF_READ
)
if
(
flags
&
X
BF_READ
)
bp
->
p
b_locked
=
1
;
bp
->
b_locked
=
1
;
}
else
if
(
!
PagePrivate
(
page
))
{
}
else
if
(
!
PagePrivate
(
page
))
{
if
(
test_page_region
(
page
,
offset
,
nbytes
))
if
(
test_page_region
(
page
,
offset
,
nbytes
))
page_count
++
;
page_count
++
;
}
}
}
}
bp
->
p
b_pages
[
i
]
=
page
;
bp
->
b_pages
[
i
]
=
page
;
offset
=
0
;
offset
=
0
;
}
}
if
(
!
bp
->
p
b_locked
)
{
if
(
!
bp
->
b_locked
)
{
for
(
i
=
0
;
i
<
bp
->
p
b_page_count
;
i
++
)
for
(
i
=
0
;
i
<
bp
->
b_page_count
;
i
++
)
unlock_page
(
bp
->
p
b_pages
[
i
]);
unlock_page
(
bp
->
b_pages
[
i
]);
}
}
if
(
page_count
==
bp
->
p
b_page_count
)
if
(
page_count
==
bp
->
b_page_count
)
bp
->
pb_flags
|=
P
BF_DONE
;
bp
->
b_flags
|=
X
BF_DONE
;
P
B_TRACE
(
bp
,
"lookup_pages"
,
(
long
)
page_count
);
X
B_TRACE
(
bp
,
"lookup_pages"
,
(
long
)
page_count
);
return
error
;
return
error
;
}
}
...
@@ -429,23 +430,23 @@ _pagebuf_lookup_pages(
...
@@ -429,23 +430,23 @@ _pagebuf_lookup_pages(
* Map buffer into kernel address-space if nessecary.
* Map buffer into kernel address-space if nessecary.
*/
*/
STATIC
int
STATIC
int
_
page
buf_map_pages
(
_
xfs_
buf_map_pages
(
xfs_buf_t
*
bp
,
xfs_buf_t
*
bp
,
uint
flags
)
uint
flags
)
{
{
/* A single page buffer is always mappable */
/* A single page buffer is always mappable */
if
(
bp
->
p
b_page_count
==
1
)
{
if
(
bp
->
b_page_count
==
1
)
{
bp
->
pb_addr
=
page_address
(
bp
->
pb_pages
[
0
])
+
bp
->
p
b_offset
;
bp
->
b_addr
=
page_address
(
bp
->
b_pages
[
0
])
+
bp
->
b_offset
;
bp
->
pb_flags
|=
P
BF_MAPPED
;
bp
->
b_flags
|=
X
BF_MAPPED
;
}
else
if
(
flags
&
P
BF_MAPPED
)
{
}
else
if
(
flags
&
X
BF_MAPPED
)
{
if
(
as_list_len
>
64
)
if
(
as_list_len
>
64
)
purge_addresses
();
purge_addresses
();
bp
->
pb_addr
=
vmap
(
bp
->
pb_pages
,
bp
->
p
b_page_count
,
bp
->
b_addr
=
vmap
(
bp
->
b_pages
,
bp
->
b_page_count
,
VM_MAP
,
PAGE_KERNEL
);
VM_MAP
,
PAGE_KERNEL
);
if
(
unlikely
(
bp
->
p
b_addr
==
NULL
))
if
(
unlikely
(
bp
->
b_addr
==
NULL
))
return
-
ENOMEM
;
return
-
ENOMEM
;
bp
->
pb_addr
+=
bp
->
p
b_offset
;
bp
->
b_addr
+=
bp
->
b_offset
;
bp
->
pb_flags
|=
P
BF_MAPPED
;
bp
->
b_flags
|=
X
BF_MAPPED
;
}
}
return
0
;
return
0
;
...
@@ -456,9 +457,7 @@ _pagebuf_map_pages(
...
@@ -456,9 +457,7 @@ _pagebuf_map_pages(
*/
*/
/*
/*
* _pagebuf_find
* Look up, and creates if absent, a lockable buffer for
*
* Looks up, and creates if absent, a lockable buffer for
* a given range of an inode. The buffer is returned
* a given range of an inode. The buffer is returned
* locked. If other overlapping buffers exist, they are
* locked. If other overlapping buffers exist, they are
* released before the new buffer is created and locked,
* released before the new buffer is created and locked,
...
@@ -466,55 +465,55 @@ _pagebuf_map_pages(
...
@@ -466,55 +465,55 @@ _pagebuf_map_pages(
* are unlocked. No I/O is implied by this call.
* are unlocked. No I/O is implied by this call.
*/
*/
xfs_buf_t
*
xfs_buf_t
*
_
page
buf_find
(
_
xfs_
buf_find
(
xfs_buftarg_t
*
btp
,
/* block device target */
xfs_buftarg_t
*
btp
,
/* block device target */
loff_t
ioff
,
/* starting offset of range */
xfs_off_t
ioff
,
/* starting offset of range */
size_t
isize
,
/* length of range */
size_t
isize
,
/* length of range */
page_buf_flags_t
flags
,
/* PBF_TRYLOCK */
xfs_buf_flags_t
flags
,
xfs_buf_t
*
new_
pb
)
/* newly allocated buffer */
xfs_buf_t
*
new_
bp
)
{
{
loff_t
range_base
;
xfs_off_t
range_base
;
size_t
range_length
;
size_t
range_length
;
xfs_bufhash_t
*
hash
;
xfs_bufhash_t
*
hash
;
xfs_buf_t
*
pb
,
*
n
;
xfs_buf_t
*
bp
,
*
n
;
range_base
=
(
ioff
<<
BBSHIFT
);
range_base
=
(
ioff
<<
BBSHIFT
);
range_length
=
(
isize
<<
BBSHIFT
);
range_length
=
(
isize
<<
BBSHIFT
);
/* Check for IOs smaller than the sector size / not sector aligned */
/* Check for IOs smaller than the sector size / not sector aligned */
ASSERT
(
!
(
range_length
<
(
1
<<
btp
->
pbr
_sshift
)));
ASSERT
(
!
(
range_length
<
(
1
<<
btp
->
bt
_sshift
)));
ASSERT
(
!
(
range_base
&
(
loff_t
)
btp
->
pbr
_smask
));
ASSERT
(
!
(
range_base
&
(
xfs_off_t
)
btp
->
bt
_smask
));
hash
=
&
btp
->
bt_hash
[
hash_long
((
unsigned
long
)
ioff
,
btp
->
bt_hashshift
)];
hash
=
&
btp
->
bt_hash
[
hash_long
((
unsigned
long
)
ioff
,
btp
->
bt_hashshift
)];
spin_lock
(
&
hash
->
bh_lock
);
spin_lock
(
&
hash
->
bh_lock
);
list_for_each_entry_safe
(
pb
,
n
,
&
hash
->
bh_list
,
p
b_hash_list
)
{
list_for_each_entry_safe
(
bp
,
n
,
&
hash
->
bh_list
,
b_hash_list
)
{
ASSERT
(
btp
==
pb
->
p
b_target
);
ASSERT
(
btp
==
bp
->
b_target
);
if
(
pb
->
p
b_file_offset
==
range_base
&&
if
(
bp
->
b_file_offset
==
range_base
&&
pb
->
p
b_buffer_length
==
range_length
)
{
bp
->
b_buffer_length
==
range_length
)
{
/*
/*
* If we look at something bring it to the
* If we look at something
,
bring it to the
* front of the list for next time.
* front of the list for next time.
*/
*/
atomic_inc
(
&
pb
->
p
b_hold
);
atomic_inc
(
&
bp
->
b_hold
);
list_move
(
&
pb
->
p
b_hash_list
,
&
hash
->
bh_list
);
list_move
(
&
bp
->
b_hash_list
,
&
hash
->
bh_list
);
goto
found
;
goto
found
;
}
}
}
}
/* No match found */
/* No match found */
if
(
new_
pb
)
{
if
(
new_
bp
)
{
_
pagebuf_initialize
(
new_pb
,
btp
,
range_base
,
_
xfs_buf_initialize
(
new_bp
,
btp
,
range_base
,
range_length
,
flags
);
range_length
,
flags
);
new_
pb
->
p
b_hash
=
hash
;
new_
bp
->
b_hash
=
hash
;
list_add
(
&
new_
pb
->
p
b_hash_list
,
&
hash
->
bh_list
);
list_add
(
&
new_
bp
->
b_hash_list
,
&
hash
->
bh_list
);
}
else
{
}
else
{
XFS_STATS_INC
(
p
b_miss_locked
);
XFS_STATS_INC
(
x
b_miss_locked
);
}
}
spin_unlock
(
&
hash
->
bh_lock
);
spin_unlock
(
&
hash
->
bh_lock
);
return
new_
pb
;
return
new_
bp
;
found:
found:
spin_unlock
(
&
hash
->
bh_lock
);
spin_unlock
(
&
hash
->
bh_lock
);
...
@@ -523,74 +522,72 @@ _pagebuf_find(
...
@@ -523,74 +522,72 @@ _pagebuf_find(
* if this does not work then we need to drop the
* if this does not work then we need to drop the
* spinlock and do a hard attempt on the semaphore.
* spinlock and do a hard attempt on the semaphore.
*/
*/
if
(
down_trylock
(
&
pb
->
p
b_sema
))
{
if
(
down_trylock
(
&
bp
->
b_sema
))
{
if
(
!
(
flags
&
P
BF_TRYLOCK
))
{
if
(
!
(
flags
&
X
BF_TRYLOCK
))
{
/* wait for buffer ownership */
/* wait for buffer ownership */
PB_TRACE
(
pb
,
"get_lock"
,
0
);
XB_TRACE
(
bp
,
"get_lock"
,
0
);
pagebuf_lock
(
pb
);
xfs_buf_lock
(
bp
);
XFS_STATS_INC
(
p
b_get_locked_waited
);
XFS_STATS_INC
(
x
b_get_locked_waited
);
}
else
{
}
else
{
/* We asked for a trylock and failed, no need
/* We asked for a trylock and failed, no need
* to look at file offset and length here, we
* to look at file offset and length here, we
* know that this
pagebuf
at least overlaps our
* know that this
buffer
at least overlaps our
*
pagebuf
and is locked, therefore our buffer
*
buffer
and is locked, therefore our buffer
* either does not exist, or is this buffer
* either does not exist, or is this buffer
.
*/
*/
xfs_buf_rele
(
bp
);
pagebuf_rele
(
pb
);
XFS_STATS_INC
(
xb_busy_locked
);
XFS_STATS_INC
(
pb_busy_locked
);
return
NULL
;
return
(
NULL
);
}
}
}
else
{
}
else
{
/* trylock worked */
/* trylock worked */
PB_SET_OWNER
(
pb
);
XB_SET_OWNER
(
bp
);
}
}
if
(
pb
->
pb_flags
&
P
BF_STALE
)
{
if
(
bp
->
b_flags
&
X
BF_STALE
)
{
ASSERT
((
pb
->
pb_flags
&
_P
BF_DELWRI_Q
)
==
0
);
ASSERT
((
bp
->
b_flags
&
_X
BF_DELWRI_Q
)
==
0
);
pb
->
pb_flags
&=
P
BF_MAPPED
;
bp
->
b_flags
&=
X
BF_MAPPED
;
}
}
PB_TRACE
(
pb
,
"got_lock"
,
0
);
XB_TRACE
(
bp
,
"got_lock"
,
0
);
XFS_STATS_INC
(
p
b_get_locked
);
XFS_STATS_INC
(
x
b_get_locked
);
return
(
pb
)
;
return
bp
;
}
}
/*
/*
* xfs_buf_get_flags assembles a buffer covering the specified range.
* Assembles a buffer covering the specified range.
*
* Storage in memory for all portions of the buffer will be allocated,
* Storage in memory for all portions of the buffer will be allocated,
* although backing storage may not be.
* although backing storage may not be.
*/
*/
xfs_buf_t
*
xfs_buf_t
*
xfs_buf_get_flags
(
/* allocate a buffer */
xfs_buf_get_flags
(
xfs_buftarg_t
*
target
,
/* target for buffer */
xfs_buftarg_t
*
target
,
/* target for buffer */
loff_t
ioff
,
/* starting offset of range */
xfs_off_t
ioff
,
/* starting offset of range */
size_t
isize
,
/* length of range */
size_t
isize
,
/* length of range */
page_buf_flags_t
flags
)
/* PBF_TRYLOCK */
xfs_buf_flags_t
flags
)
{
{
xfs_buf_t
*
pb
,
*
new_pb
;
xfs_buf_t
*
bp
,
*
new_bp
;
int
error
=
0
,
i
;
int
error
=
0
,
i
;
new_
pb
=
page
buf_allocate
(
flags
);
new_
bp
=
xfs_
buf_allocate
(
flags
);
if
(
unlikely
(
!
new_
pb
))
if
(
unlikely
(
!
new_
bp
))
return
NULL
;
return
NULL
;
pb
=
_pagebuf_find
(
target
,
ioff
,
isize
,
flags
,
new_pb
);
bp
=
_xfs_buf_find
(
target
,
ioff
,
isize
,
flags
,
new_bp
);
if
(
pb
==
new_pb
)
{
if
(
bp
==
new_bp
)
{
error
=
_
pagebuf_lookup_pages
(
pb
,
flags
);
error
=
_
xfs_buf_lookup_pages
(
bp
,
flags
);
if
(
error
)
if
(
error
)
goto
no_buffer
;
goto
no_buffer
;
}
else
{
}
else
{
pagebuf_deallocate
(
new_pb
);
xfs_buf_deallocate
(
new_bp
);
if
(
unlikely
(
pb
==
NULL
))
if
(
unlikely
(
bp
==
NULL
))
return
NULL
;
return
NULL
;
}
}
for
(
i
=
0
;
i
<
pb
->
p
b_page_count
;
i
++
)
for
(
i
=
0
;
i
<
bp
->
b_page_count
;
i
++
)
mark_page_accessed
(
pb
->
p
b_pages
[
i
]);
mark_page_accessed
(
bp
->
b_pages
[
i
]);
if
(
!
(
pb
->
pb_flags
&
P
BF_MAPPED
))
{
if
(
!
(
bp
->
b_flags
&
X
BF_MAPPED
))
{
error
=
_
pagebuf_map_pages
(
pb
,
flags
);
error
=
_
xfs_buf_map_pages
(
bp
,
flags
);
if
(
unlikely
(
error
))
{
if
(
unlikely
(
error
))
{
printk
(
KERN_WARNING
"%s: failed to map pages
\n
"
,
printk
(
KERN_WARNING
"%s: failed to map pages
\n
"
,
__FUNCTION__
);
__FUNCTION__
);
...
@@ -598,97 +595,97 @@ xfs_buf_get_flags( /* allocate a buffer */
...
@@ -598,97 +595,97 @@ xfs_buf_get_flags( /* allocate a buffer */
}
}
}
}
XFS_STATS_INC
(
p
b_get
);
XFS_STATS_INC
(
x
b_get
);
/*
/*
* Always fill in the block number now, the mapped cases can do
* Always fill in the block number now, the mapped cases can do
* their own overlay of this later.
* their own overlay of this later.
*/
*/
pb
->
p
b_bn
=
ioff
;
bp
->
b_bn
=
ioff
;
pb
->
pb_count_desired
=
pb
->
p
b_buffer_length
;
bp
->
b_count_desired
=
bp
->
b_buffer_length
;
PB_TRACE
(
pb
,
"get"
,
(
unsigned
long
)
flags
);
XB_TRACE
(
bp
,
"get"
,
(
unsigned
long
)
flags
);
return
pb
;
return
bp
;
no_buffer:
no_buffer:
if
(
flags
&
(
PBF_LOCK
|
P
BF_TRYLOCK
))
if
(
flags
&
(
XBF_LOCK
|
X
BF_TRYLOCK
))
pagebuf_unlock
(
pb
);
xfs_buf_unlock
(
bp
);
pagebuf_rele
(
pb
);
xfs_buf_rele
(
bp
);
return
NULL
;
return
NULL
;
}
}
xfs_buf_t
*
xfs_buf_t
*
xfs_buf_read_flags
(
xfs_buf_read_flags
(
xfs_buftarg_t
*
target
,
xfs_buftarg_t
*
target
,
loff_t
ioff
,
xfs_off_t
ioff
,
size_t
isize
,
size_t
isize
,
page_buf_flags_t
flags
)
xfs_buf_flags_t
flags
)
{
{
xfs_buf_t
*
pb
;
xfs_buf_t
*
bp
;
flags
|=
P
BF_READ
;
flags
|=
X
BF_READ
;
pb
=
xfs_buf_get_flags
(
target
,
ioff
,
isize
,
flags
);
bp
=
xfs_buf_get_flags
(
target
,
ioff
,
isize
,
flags
);
if
(
pb
)
{
if
(
bp
)
{
if
(
!
XFS_BUF_ISDONE
(
pb
))
{
if
(
!
XFS_BUF_ISDONE
(
bp
))
{
PB_TRACE
(
pb
,
"read"
,
(
unsigned
long
)
flags
);
XB_TRACE
(
bp
,
"read"
,
(
unsigned
long
)
flags
);
XFS_STATS_INC
(
p
b_get_read
);
XFS_STATS_INC
(
x
b_get_read
);
pagebuf_iostart
(
pb
,
flags
);
xfs_buf_iostart
(
bp
,
flags
);
}
else
if
(
flags
&
P
BF_ASYNC
)
{
}
else
if
(
flags
&
X
BF_ASYNC
)
{
PB_TRACE
(
pb
,
"read_async"
,
(
unsigned
long
)
flags
);
XB_TRACE
(
bp
,
"read_async"
,
(
unsigned
long
)
flags
);
/*
/*
* Read ahead call which is already satisfied,
* Read ahead call which is already satisfied,
* drop the buffer
* drop the buffer
*/
*/
goto
no_buffer
;
goto
no_buffer
;
}
else
{
}
else
{
PB_TRACE
(
pb
,
"read_done"
,
(
unsigned
long
)
flags
);
XB_TRACE
(
bp
,
"read_done"
,
(
unsigned
long
)
flags
);
/* We do not want read in the flags */
/* We do not want read in the flags */
pb
->
pb_flags
&=
~
P
BF_READ
;
bp
->
b_flags
&=
~
X
BF_READ
;
}
}
}
}
return
pb
;
return
bp
;
no_buffer:
no_buffer:
if
(
flags
&
(
PBF_LOCK
|
P
BF_TRYLOCK
))
if
(
flags
&
(
XBF_LOCK
|
X
BF_TRYLOCK
))
pagebuf_unlock
(
pb
);
xfs_buf_unlock
(
bp
);
pagebuf_rele
(
pb
);
xfs_buf_rele
(
bp
);
return
NULL
;
return
NULL
;
}
}
/*
/*
*
If we are not low on memory then do the readahead in a deadlock
*
If we are not low on memory then do the readahead in a deadlock
*
safe manner.
*
safe manner.
*/
*/
void
void
page
buf_readahead
(
xfs_
buf_readahead
(
xfs_buftarg_t
*
target
,
xfs_buftarg_t
*
target
,
loff_t
ioff
,
xfs_off_t
ioff
,
size_t
isize
,
size_t
isize
,
page_buf_flags_t
flags
)
xfs_buf_flags_t
flags
)
{
{
struct
backing_dev_info
*
bdi
;
struct
backing_dev_info
*
bdi
;
bdi
=
target
->
pbr
_mapping
->
backing_dev_info
;
bdi
=
target
->
bt
_mapping
->
backing_dev_info
;
if
(
bdi_read_congested
(
bdi
))
if
(
bdi_read_congested
(
bdi
))
return
;
return
;
flags
|=
(
PBF_TRYLOCK
|
PBF_ASYNC
|
P
BF_READ_AHEAD
);
flags
|=
(
XBF_TRYLOCK
|
XBF_ASYNC
|
X
BF_READ_AHEAD
);
xfs_buf_read_flags
(
target
,
ioff
,
isize
,
flags
);
xfs_buf_read_flags
(
target
,
ioff
,
isize
,
flags
);
}
}
xfs_buf_t
*
xfs_buf_t
*
page
buf_get_empty
(
xfs_
buf_get_empty
(
size_t
len
,
size_t
len
,
xfs_buftarg_t
*
target
)
xfs_buftarg_t
*
target
)
{
{
xfs_buf_t
*
pb
;
xfs_buf_t
*
bp
;
pb
=
page
buf_allocate
(
0
);
bp
=
xfs_
buf_allocate
(
0
);
if
(
pb
)
if
(
bp
)
_
pagebuf_initialize
(
pb
,
target
,
0
,
len
,
0
);
_
xfs_buf_initialize
(
bp
,
target
,
0
,
len
,
0
);
return
pb
;
return
bp
;
}
}
static
inline
struct
page
*
static
inline
struct
page
*
...
@@ -704,8 +701,8 @@ mem_to_page(
...
@@ -704,8 +701,8 @@ mem_to_page(
}
}
int
int
page
buf_associate_memory
(
xfs_
buf_associate_memory
(
xfs_buf_t
*
pb
,
xfs_buf_t
*
bp
,
void
*
mem
,
void
*
mem
,
size_t
len
)
size_t
len
)
{
{
...
@@ -722,40 +719,40 @@ pagebuf_associate_memory(
...
@@ -722,40 +719,40 @@ pagebuf_associate_memory(
page_count
++
;
page_count
++
;
/* Free any previous set of page pointers */
/* Free any previous set of page pointers */
if
(
pb
->
p
b_pages
)
if
(
bp
->
b_pages
)
_
pagebuf_free_pages
(
pb
);
_
xfs_buf_free_pages
(
bp
);
pb
->
p
b_pages
=
NULL
;
bp
->
b_pages
=
NULL
;
pb
->
p
b_addr
=
mem
;
bp
->
b_addr
=
mem
;
rval
=
_
pagebuf_get_pages
(
pb
,
page_count
,
0
);
rval
=
_
xfs_buf_get_pages
(
bp
,
page_count
,
0
);
if
(
rval
)
if
(
rval
)
return
rval
;
return
rval
;
pb
->
p
b_offset
=
offset
;
bp
->
b_offset
=
offset
;
ptr
=
(
size_t
)
mem
&
PAGE_CACHE_MASK
;
ptr
=
(
size_t
)
mem
&
PAGE_CACHE_MASK
;
end
=
PAGE_CACHE_ALIGN
((
size_t
)
mem
+
len
);
end
=
PAGE_CACHE_ALIGN
((
size_t
)
mem
+
len
);
end_cur
=
end
;
end_cur
=
end
;
/* set up first page */
/* set up first page */
pb
->
p
b_pages
[
0
]
=
mem_to_page
(
mem
);
bp
->
b_pages
[
0
]
=
mem_to_page
(
mem
);
ptr
+=
PAGE_CACHE_SIZE
;
ptr
+=
PAGE_CACHE_SIZE
;
pb
->
p
b_page_count
=
++
i
;
bp
->
b_page_count
=
++
i
;
while
(
ptr
<
end
)
{
while
(
ptr
<
end
)
{
pb
->
p
b_pages
[
i
]
=
mem_to_page
((
void
*
)
ptr
);
bp
->
b_pages
[
i
]
=
mem_to_page
((
void
*
)
ptr
);
pb
->
p
b_page_count
=
++
i
;
bp
->
b_page_count
=
++
i
;
ptr
+=
PAGE_CACHE_SIZE
;
ptr
+=
PAGE_CACHE_SIZE
;
}
}
pb
->
p
b_locked
=
0
;
bp
->
b_locked
=
0
;
pb
->
pb_count_desired
=
pb
->
p
b_buffer_length
=
len
;
bp
->
b_count_desired
=
bp
->
b_buffer_length
=
len
;
pb
->
pb_flags
|=
P
BF_MAPPED
;
bp
->
b_flags
|=
X
BF_MAPPED
;
return
0
;
return
0
;
}
}
xfs_buf_t
*
xfs_buf_t
*
pagebuf_get_no_d
addr
(
xfs_buf_get_no
addr
(
size_t
len
,
size_t
len
,
xfs_buftarg_t
*
target
)
xfs_buftarg_t
*
target
)
{
{
...
@@ -764,10 +761,10 @@ pagebuf_get_no_daddr(
...
@@ -764,10 +761,10 @@ pagebuf_get_no_daddr(
void
*
data
;
void
*
data
;
int
error
;
int
error
;
bp
=
page
buf_allocate
(
0
);
bp
=
xfs_
buf_allocate
(
0
);
if
(
unlikely
(
bp
==
NULL
))
if
(
unlikely
(
bp
==
NULL
))
goto
fail
;
goto
fail
;
_
page
buf_initialize
(
bp
,
target
,
0
,
len
,
0
);
_
xfs_
buf_initialize
(
bp
,
target
,
0
,
len
,
0
);
try_again:
try_again:
data
=
kmem_alloc
(
malloc_len
,
KM_SLEEP
|
KM_MAYFAIL
);
data
=
kmem_alloc
(
malloc_len
,
KM_SLEEP
|
KM_MAYFAIL
);
...
@@ -776,78 +773,73 @@ pagebuf_get_no_daddr(
...
@@ -776,78 +773,73 @@ pagebuf_get_no_daddr(
/* check whether alignment matches.. */
/* check whether alignment matches.. */
if
((
__psunsigned_t
)
data
!=
if
((
__psunsigned_t
)
data
!=
((
__psunsigned_t
)
data
&
~
target
->
pbr
_smask
))
{
((
__psunsigned_t
)
data
&
~
target
->
bt
_smask
))
{
/* .. else double the size and try again */
/* .. else double the size and try again */
kmem_free
(
data
,
malloc_len
);
kmem_free
(
data
,
malloc_len
);
malloc_len
<<=
1
;
malloc_len
<<=
1
;
goto
try_again
;
goto
try_again
;
}
}
error
=
page
buf_associate_memory
(
bp
,
data
,
len
);
error
=
xfs_
buf_associate_memory
(
bp
,
data
,
len
);
if
(
error
)
if
(
error
)
goto
fail_free_mem
;
goto
fail_free_mem
;
bp
->
pb_flags
|=
_P
BF_KMEM_ALLOC
;
bp
->
b_flags
|=
_X
BF_KMEM_ALLOC
;
page
buf_unlock
(
bp
);
xfs_
buf_unlock
(
bp
);
P
B_TRACE
(
bp
,
"no_daddr"
,
data
);
X
B_TRACE
(
bp
,
"no_daddr"
,
data
);
return
bp
;
return
bp
;
fail_free_mem:
fail_free_mem:
kmem_free
(
data
,
malloc_len
);
kmem_free
(
data
,
malloc_len
);
fail_free_buf:
fail_free_buf:
page
buf_free
(
bp
);
xfs_
buf_free
(
bp
);
fail:
fail:
return
NULL
;
return
NULL
;
}
}
/*
/*
* pagebuf_hold
*
* Increment reference count on buffer, to hold the buffer concurrently
* Increment reference count on buffer, to hold the buffer concurrently
* with another thread which may release (free) the buffer asynchronously.
* with another thread which may release (free) the buffer asynchronously.
*
* Must hold the buffer already to call this function.
* Must hold the buffer already to call this function.
*/
*/
void
void
page
buf_hold
(
xfs_
buf_hold
(
xfs_buf_t
*
pb
)
xfs_buf_t
*
bp
)
{
{
atomic_inc
(
&
pb
->
p
b_hold
);
atomic_inc
(
&
bp
->
b_hold
);
PB_TRACE
(
pb
,
"hold"
,
0
);
XB_TRACE
(
bp
,
"hold"
,
0
);
}
}
/*
/*
* pagebuf_rele
* Releases a hold on the specified buffer. If the
*
* the hold count is 1, calls xfs_buf_free.
* pagebuf_rele releases a hold on the specified buffer. If the
* the hold count is 1, pagebuf_rele calls pagebuf_free.
*/
*/
void
void
page
buf_rele
(
xfs_
buf_rele
(
xfs_buf_t
*
pb
)
xfs_buf_t
*
bp
)
{
{
xfs_bufhash_t
*
hash
=
pb
->
p
b_hash
;
xfs_bufhash_t
*
hash
=
bp
->
b_hash
;
PB_TRACE
(
pb
,
"rele"
,
pb
->
p
b_relse
);
XB_TRACE
(
bp
,
"rele"
,
bp
->
b_relse
);
if
(
atomic_dec_and_lock
(
&
pb
->
p
b_hold
,
&
hash
->
bh_lock
))
{
if
(
atomic_dec_and_lock
(
&
bp
->
b_hold
,
&
hash
->
bh_lock
))
{
if
(
pb
->
p
b_relse
)
{
if
(
bp
->
b_relse
)
{
atomic_inc
(
&
pb
->
p
b_hold
);
atomic_inc
(
&
bp
->
b_hold
);
spin_unlock
(
&
hash
->
bh_lock
);
spin_unlock
(
&
hash
->
bh_lock
);
(
*
(
pb
->
pb_relse
))
(
pb
);
(
*
(
bp
->
b_relse
))
(
bp
);
}
else
if
(
pb
->
pb_flags
&
P
BF_FS_MANAGED
)
{
}
else
if
(
bp
->
b_flags
&
X
BF_FS_MANAGED
)
{
spin_unlock
(
&
hash
->
bh_lock
);
spin_unlock
(
&
hash
->
bh_lock
);
}
else
{
}
else
{
ASSERT
(
!
(
pb
->
pb_flags
&
(
PBF_DELWRI
|
_P
BF_DELWRI_Q
)));
ASSERT
(
!
(
bp
->
b_flags
&
(
XBF_DELWRI
|
_X
BF_DELWRI_Q
)));
list_del_init
(
&
pb
->
p
b_hash_list
);
list_del_init
(
&
bp
->
b_hash_list
);
spin_unlock
(
&
hash
->
bh_lock
);
spin_unlock
(
&
hash
->
bh_lock
);
pagebuf_free
(
pb
);
xfs_buf_free
(
bp
);
}
}
}
else
{
}
else
{
/*
/*
* Catch reference count leaks
* Catch reference count leaks
*/
*/
ASSERT
(
atomic_read
(
&
pb
->
p
b_hold
)
>=
0
);
ASSERT
(
atomic_read
(
&
bp
->
b_hold
)
>=
0
);
}
}
}
}
...
@@ -863,168 +855,122 @@ pagebuf_rele(
...
@@ -863,168 +855,122 @@ pagebuf_rele(
*/
*/
/*
/*
* pagebuf_cond_lock
* Locks a buffer object, if it is not already locked.
*
* Note that this in no way locks the underlying pages, so it is only
* pagebuf_cond_lock locks a buffer object, if it is not already locked.
* useful for synchronizing concurrent use of buffer objects, not for
* Note that this in no way
* synchronizing independent access to the underlying pages.
* locks the underlying pages, so it is only useful for synchronizing
* concurrent use of page buffer objects, not for synchronizing independent
* access to the underlying pages.
*/
*/
int
int
pagebuf_cond_lock
(
/* lock buffer, if not locked */
xfs_buf_cond_lock
(
/* returns -EBUSY if locked) */
xfs_buf_t
*
bp
)
xfs_buf_t
*
pb
)
{
{
int
locked
;
int
locked
;
locked
=
down_trylock
(
&
pb
->
p
b_sema
)
==
0
;
locked
=
down_trylock
(
&
bp
->
b_sema
)
==
0
;
if
(
locked
)
{
if
(
locked
)
{
PB_SET_OWNER
(
pb
);
XB_SET_OWNER
(
bp
);
}
}
PB_TRACE
(
pb
,
"cond_lock"
,
(
long
)
locked
);
XB_TRACE
(
bp
,
"cond_lock"
,
(
long
)
locked
);
return
(
locked
?
0
:
-
EBUSY
)
;
return
locked
?
0
:
-
EBUSY
;
}
}
#if defined(DEBUG) || defined(XFS_BLI_TRACE)
#if defined(DEBUG) || defined(XFS_BLI_TRACE)
/*
* pagebuf_lock_value
*
* Return lock value for a pagebuf
*/
int
int
page
buf_lock_value
(
xfs_
buf_lock_value
(
xfs_buf_t
*
pb
)
xfs_buf_t
*
bp
)
{
{
return
(
atomic_read
(
&
pb
->
pb_sema
.
count
)
);
return
atomic_read
(
&
bp
->
b_sema
.
count
);
}
}
#endif
#endif
/*
/*
* pagebuf_lock
* Locks a buffer object.
*
* Note that this in no way locks the underlying pages, so it is only
* pagebuf_lock locks a buffer object. Note that this in no way
* useful for synchronizing concurrent use of buffer objects, not for
* locks the underlying pages, so it is only useful for synchronizing
* synchronizing independent access to the underlying pages.
* concurrent use of page buffer objects, not for synchronizing independent
* access to the underlying pages.
*/
*/
int
void
page
buf_lock
(
xfs_
buf_lock
(
xfs_buf_t
*
pb
)
xfs_buf_t
*
bp
)
{
{
PB_TRACE
(
pb
,
"lock"
,
0
);
XB_TRACE
(
bp
,
"lock"
,
0
);
if
(
atomic_read
(
&
pb
->
pb_io_remaining
))
if
(
atomic_read
(
&
bp
->
b_io_remaining
))
blk_run_address_space
(
pb
->
pb_target
->
pbr_mapping
);
blk_run_address_space
(
bp
->
b_target
->
bt_mapping
);
down
(
&
pb
->
pb_sema
);
down
(
&
bp
->
b_sema
);
PB_SET_OWNER
(
pb
);
XB_SET_OWNER
(
bp
);
PB_TRACE
(
pb
,
"locked"
,
0
);
XB_TRACE
(
bp
,
"locked"
,
0
);
return
0
;
}
}
/*
/*
* pagebuf_unlock
* Releases the lock on the buffer object.
*
* pagebuf_unlock releases the lock on the buffer object created by
* pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
* created by pagebuf_pin).
*
* If the buffer is marked delwri but is not queued, do so before we
* If the buffer is marked delwri but is not queued, do so before we
* unlock the buffer as we need to set flags correctly. We also need to
* unlock the buffer as we need to set flags correctly.
We also need to
* take a reference for the delwri queue because the unlocker is going to
* take a reference for the delwri queue because the unlocker is going to
* drop their's and they don't know we just queued it.
* drop their's and they don't know we just queued it.
*/
*/
void
void
pagebuf_unlock
(
/* unlock buffer */
xfs_buf_unlock
(
xfs_buf_t
*
pb
)
/* buffer to unlock */
xfs_buf_t
*
bp
)
{
{
if
((
pb
->
pb_flags
&
(
PBF_DELWRI
|
_PBF_DELWRI_Q
))
==
P
BF_DELWRI
)
{
if
((
bp
->
b_flags
&
(
XBF_DELWRI
|
_XBF_DELWRI_Q
))
==
X
BF_DELWRI
)
{
atomic_inc
(
&
pb
->
p
b_hold
);
atomic_inc
(
&
bp
->
b_hold
);
pb
->
pb_flags
|=
P
BF_ASYNC
;
bp
->
b_flags
|=
X
BF_ASYNC
;
pagebuf_delwri_queue
(
pb
,
0
);
xfs_buf_delwri_queue
(
bp
,
0
);
}
}
PB_CLEAR_OWNER
(
pb
);
XB_CLEAR_OWNER
(
bp
);
up
(
&
pb
->
p
b_sema
);
up
(
&
bp
->
b_sema
);
PB_TRACE
(
pb
,
"unlock"
,
0
);
XB_TRACE
(
bp
,
"unlock"
,
0
);
}
}
/*
/*
* Pinning Buffer Storage in Memory
* Pinning Buffer Storage in Memory
*/
* Ensure that no attempt to force a buffer to disk will succeed.
/*
* pagebuf_pin
*
* pagebuf_pin locks all of the memory represented by a buffer in
* memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for
* the same or different buffers affecting a given page, will
* properly count the number of outstanding "pin" requests. The
* buffer may be released after the pagebuf_pin and a different
* buffer used when calling pagebuf_unpin, if desired.
* pagebuf_pin should be used by the file system when it wants be
* assured that no attempt will be made to force the affected
* memory to disk. It does not assure that a given logical page
* will not be moved to a different physical page.
*/
*/
void
void
page
buf_pin
(
xfs_
buf_pin
(
xfs_buf_t
*
pb
)
xfs_buf_t
*
bp
)
{
{
atomic_inc
(
&
pb
->
p
b_pin_count
);
atomic_inc
(
&
bp
->
b_pin_count
);
PB_TRACE
(
pb
,
"pin"
,
(
long
)
pb
->
p
b_pin_count
.
counter
);
XB_TRACE
(
bp
,
"pin"
,
(
long
)
bp
->
b_pin_count
.
counter
);
}
}
/*
* pagebuf_unpin
*
* pagebuf_unpin reverses the locking of memory performed by
* pagebuf_pin. Note that both functions affected the logical
* pages associated with the buffer, not the buffer itself.
*/
void
void
page
buf_unpin
(
xfs_
buf_unpin
(
xfs_buf_t
*
pb
)
xfs_buf_t
*
bp
)
{
{
if
(
atomic_dec_and_test
(
&
pb
->
pb_pin_count
))
{
if
(
atomic_dec_and_test
(
&
bp
->
b_pin_count
))
wake_up_all
(
&
pb
->
pb_waiters
);
wake_up_all
(
&
bp
->
b_waiters
);
}
XB_TRACE
(
bp
,
"unpin"
,
(
long
)
bp
->
b_pin_count
.
counter
);
PB_TRACE
(
pb
,
"unpin"
,
(
long
)
pb
->
pb_pin_count
.
counter
);
}
}
int
int
page
buf_ispin
(
xfs_
buf_ispin
(
xfs_buf_t
*
pb
)
xfs_buf_t
*
bp
)
{
{
return
atomic_read
(
&
pb
->
p
b_pin_count
);
return
atomic_read
(
&
bp
->
b_pin_count
);
}
}
/*
STATIC
void
* pagebuf_wait_unpin
xfs_buf_wait_unpin
(
*
xfs_buf_t
*
bp
)
* pagebuf_wait_unpin waits until all of the memory associated
* with the buffer is not longer locked in memory. It returns
* immediately if none of the affected pages are locked.
*/
static
inline
void
_pagebuf_wait_unpin
(
xfs_buf_t
*
pb
)
{
{
DECLARE_WAITQUEUE
(
wait
,
current
);
DECLARE_WAITQUEUE
(
wait
,
current
);
if
(
atomic_read
(
&
pb
->
p
b_pin_count
)
==
0
)
if
(
atomic_read
(
&
bp
->
b_pin_count
)
==
0
)
return
;
return
;
add_wait_queue
(
&
pb
->
p
b_waiters
,
&
wait
);
add_wait_queue
(
&
bp
->
b_waiters
,
&
wait
);
for
(;;)
{
for
(;;)
{
set_current_state
(
TASK_UNINTERRUPTIBLE
);
set_current_state
(
TASK_UNINTERRUPTIBLE
);
if
(
atomic_read
(
&
pb
->
p
b_pin_count
)
==
0
)
if
(
atomic_read
(
&
bp
->
b_pin_count
)
==
0
)
break
;
break
;
if
(
atomic_read
(
&
pb
->
p
b_io_remaining
))
if
(
atomic_read
(
&
bp
->
b_io_remaining
))
blk_run_address_space
(
pb
->
pb_target
->
pbr
_mapping
);
blk_run_address_space
(
bp
->
b_target
->
bt
_mapping
);
schedule
();
schedule
();
}
}
remove_wait_queue
(
&
pb
->
p
b_waiters
,
&
wait
);
remove_wait_queue
(
&
bp
->
b_waiters
,
&
wait
);
set_current_state
(
TASK_RUNNING
);
set_current_state
(
TASK_RUNNING
);
}
}
...
@@ -1032,241 +978,216 @@ _pagebuf_wait_unpin(
...
@@ -1032,241 +978,216 @@ _pagebuf_wait_unpin(
* Buffer Utility Routines
* Buffer Utility Routines
*/
*/
/*
* pagebuf_iodone
*
* pagebuf_iodone marks a buffer for which I/O is in progress
* done with respect to that I/O. The pb_iodone routine, if
* present, will be called as a side-effect.
*/
STATIC
void
STATIC
void
page
buf_iodone_work
(
xfs_
buf_iodone_work
(
void
*
v
)
void
*
v
)
{
{
xfs_buf_t
*
bp
=
(
xfs_buf_t
*
)
v
;
xfs_buf_t
*
bp
=
(
xfs_buf_t
*
)
v
;
if
(
bp
->
p
b_iodone
)
if
(
bp
->
b_iodone
)
(
*
(
bp
->
p
b_iodone
))(
bp
);
(
*
(
bp
->
b_iodone
))(
bp
);
else
if
(
bp
->
pb_flags
&
P
BF_ASYNC
)
else
if
(
bp
->
b_flags
&
X
BF_ASYNC
)
xfs_buf_relse
(
bp
);
xfs_buf_relse
(
bp
);
}
}
void
void
pagebuf_iodone
(
xfs_buf_ioend
(
xfs_buf_t
*
pb
,
xfs_buf_t
*
bp
,
int
schedule
)
int
schedule
)
{
{
pb
->
pb_flags
&=
~
(
PBF_READ
|
P
BF_WRITE
);
bp
->
b_flags
&=
~
(
XBF_READ
|
X
BF_WRITE
);
if
(
pb
->
p
b_error
==
0
)
if
(
bp
->
b_error
==
0
)
pb
->
pb_flags
|=
P
BF_DONE
;
bp
->
b_flags
|=
X
BF_DONE
;
PB_TRACE
(
pb
,
"iodone"
,
pb
->
p
b_iodone
);
XB_TRACE
(
bp
,
"iodone"
,
bp
->
b_iodone
);
if
((
pb
->
pb_iodone
)
||
(
pb
->
pb_flags
&
P
BF_ASYNC
))
{
if
((
bp
->
b_iodone
)
||
(
bp
->
b_flags
&
X
BF_ASYNC
))
{
if
(
schedule
)
{
if
(
schedule
)
{
INIT_WORK
(
&
pb
->
pb_iodone_work
,
pagebuf_iodone_work
,
pb
);
INIT_WORK
(
&
bp
->
b_iodone_work
,
xfs_buf_iodone_work
,
bp
);
queue_work
(
xfslogd_workqueue
,
&
pb
->
p
b_iodone_work
);
queue_work
(
xfslogd_workqueue
,
&
bp
->
b_iodone_work
);
}
else
{
}
else
{
pagebuf_iodone_work
(
pb
);
xfs_buf_iodone_work
(
bp
);
}
}
}
else
{
}
else
{
up
(
&
pb
->
p
b_iodonesema
);
up
(
&
bp
->
b_iodonesema
);
}
}
}
}
/*
* pagebuf_ioerror
*
* pagebuf_ioerror sets the error code for a buffer.
*/
void
void
pagebuf_ioerror
(
/* mark/clear buffer error flag */
xfs_buf_ioerror
(
xfs_buf_t
*
pb
,
/* buffer to mark */
xfs_buf_t
*
bp
,
int
error
)
/* error to store (0 if none) */
int
error
)
{
{
ASSERT
(
error
>=
0
&&
error
<=
0xffff
);
ASSERT
(
error
>=
0
&&
error
<=
0xffff
);
pb
->
p
b_error
=
(
unsigned
short
)
error
;
bp
->
b_error
=
(
unsigned
short
)
error
;
PB_TRACE
(
pb
,
"ioerror"
,
(
unsigned
long
)
error
);
XB_TRACE
(
bp
,
"ioerror"
,
(
unsigned
long
)
error
);
}
}
/*
/*
* pagebuf_iostart
* Initiate I/O on a buffer, based on the flags supplied.
*
* The b_iodone routine in the buffer supplied will only be called
* pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
* If necessary, it will arrange for any disk space allocation required,
* and it will break up the request if the block mappings require it.
* The pb_iodone routine in the buffer supplied will only be called
* when all of the subsidiary I/O requests, if any, have been completed.
* when all of the subsidiary I/O requests, if any, have been completed.
* pagebuf_iostart calls the pagebuf_ioinitiate routine or
* pagebuf_iorequest, if the former routine is not defined, to start
* the I/O on a given low-level request.
*/
*/
int
int
pagebuf_iostart
(
/* start I/O on a buffer */
xfs_buf_iostart
(
xfs_buf_t
*
pb
,
/* buffer to start */
xfs_buf_t
*
bp
,
page_buf_flags_t
flags
)
/* PBF_LOCK, PBF_ASYNC, PBF_READ, */
xfs_buf_flags_t
flags
)
/* PBF_WRITE, PBF_DELWRI, */
/* PBF_DONT_BLOCK */
{
{
int
status
=
0
;
int
status
=
0
;
PB_TRACE
(
pb
,
"iostart"
,
(
unsigned
long
)
flags
);
XB_TRACE
(
bp
,
"iostart"
,
(
unsigned
long
)
flags
);
if
(
flags
&
P
BF_DELWRI
)
{
if
(
flags
&
X
BF_DELWRI
)
{
pb
->
pb_flags
&=
~
(
PBF_READ
|
PBF_WRITE
|
P
BF_ASYNC
);
bp
->
b_flags
&=
~
(
XBF_READ
|
XBF_WRITE
|
X
BF_ASYNC
);
pb
->
pb_flags
|=
flags
&
(
PBF_DELWRI
|
P
BF_ASYNC
);
bp
->
b_flags
|=
flags
&
(
XBF_DELWRI
|
X
BF_ASYNC
);
pagebuf_delwri_queue
(
pb
,
1
);
xfs_buf_delwri_queue
(
bp
,
1
);
return
status
;
return
status
;
}
}
pb
->
pb_flags
&=
~
(
PBF_READ
|
PBF_WRITE
|
PBF_ASYNC
|
P
BF_DELWRI
|
\
bp
->
b_flags
&=
~
(
XBF_READ
|
XBF_WRITE
|
XBF_ASYNC
|
X
BF_DELWRI
|
\
PBF_READ_AHEAD
|
_P
BF_RUN_QUEUES
);
XBF_READ_AHEAD
|
_X
BF_RUN_QUEUES
);
pb
->
pb_flags
|=
flags
&
(
PBF_READ
|
PBF_WRITE
|
P
BF_ASYNC
|
\
bp
->
b_flags
|=
flags
&
(
XBF_READ
|
XBF_WRITE
|
X
BF_ASYNC
|
\
PBF_READ_AHEAD
|
_P
BF_RUN_QUEUES
);
XBF_READ_AHEAD
|
_X
BF_RUN_QUEUES
);
BUG_ON
(
pb
->
p
b_bn
==
XFS_BUF_DADDR_NULL
);
BUG_ON
(
bp
->
b_bn
==
XFS_BUF_DADDR_NULL
);
/* For writes allow an alternate strategy routine to precede
/* For writes allow an alternate strategy routine to precede
* the actual I/O request (which may not be issued at all in
* the actual I/O request (which may not be issued at all in
* a shutdown situation, for example).
* a shutdown situation, for example).
*/
*/
status
=
(
flags
&
P
BF_WRITE
)
?
status
=
(
flags
&
X
BF_WRITE
)
?
pagebuf_iostrategy
(
pb
)
:
pagebuf_iorequest
(
pb
);
xfs_buf_iostrategy
(
bp
)
:
xfs_buf_iorequest
(
bp
);
/* Wait for I/O if we are not an async request.
/* Wait for I/O if we are not an async request.
* Note: async I/O request completion will release the buffer,
* Note: async I/O request completion will release the buffer,
* and that can already be done by this point. So using the
* and that can already be done by this point. So using the
* buffer pointer from here on, after async I/O, is invalid.
* buffer pointer from here on, after async I/O, is invalid.
*/
*/
if
(
!
status
&&
!
(
flags
&
P
BF_ASYNC
))
if
(
!
status
&&
!
(
flags
&
X
BF_ASYNC
))
status
=
pagebuf_iowait
(
pb
);
status
=
xfs_buf_iowait
(
bp
);
return
status
;
return
status
;
}
}
/*
* Helper routine for pagebuf_iorequest
*/
STATIC
__inline__
int
STATIC
__inline__
int
_
page
buf_iolocked
(
_
xfs_
buf_iolocked
(
xfs_buf_t
*
pb
)
xfs_buf_t
*
bp
)
{
{
ASSERT
(
pb
->
pb_flags
&
(
PBF_READ
|
P
BF_WRITE
));
ASSERT
(
bp
->
b_flags
&
(
XBF_READ
|
X
BF_WRITE
));
if
(
pb
->
pb_flags
&
P
BF_READ
)
if
(
bp
->
b_flags
&
X
BF_READ
)
return
pb
->
p
b_locked
;
return
bp
->
b_locked
;
return
0
;
return
0
;
}
}
STATIC
__inline__
void
STATIC
__inline__
void
_
pagebuf_iodone
(
_
xfs_buf_ioend
(
xfs_buf_t
*
pb
,
xfs_buf_t
*
bp
,
int
schedule
)
int
schedule
)
{
{
if
(
atomic_dec_and_test
(
&
pb
->
p
b_io_remaining
)
==
1
)
{
if
(
atomic_dec_and_test
(
&
bp
->
b_io_remaining
)
==
1
)
{
pb
->
p
b_locked
=
0
;
bp
->
b_locked
=
0
;
pagebuf_iodone
(
pb
,
schedule
);
xfs_buf_ioend
(
bp
,
schedule
);
}
}
}
}
STATIC
int
STATIC
int
bio_end_io_pagebuf
(
xfs_buf_bio_end_io
(
struct
bio
*
bio
,
struct
bio
*
bio
,
unsigned
int
bytes_done
,
unsigned
int
bytes_done
,
int
error
)
int
error
)
{
{
xfs_buf_t
*
pb
=
(
xfs_buf_t
*
)
bio
->
bi_private
;
xfs_buf_t
*
bp
=
(
xfs_buf_t
*
)
bio
->
bi_private
;
unsigned
int
blocksize
=
pb
->
pb_target
->
pbr
_bsize
;
unsigned
int
blocksize
=
bp
->
b_target
->
bt
_bsize
;
struct
bio_vec
*
bvec
=
bio
->
bi_io_vec
+
bio
->
bi_vcnt
-
1
;
struct
bio_vec
*
bvec
=
bio
->
bi_io_vec
+
bio
->
bi_vcnt
-
1
;
if
(
bio
->
bi_size
)
if
(
bio
->
bi_size
)
return
1
;
return
1
;
if
(
!
test_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
))
if
(
!
test_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
))
pb
->
p
b_error
=
EIO
;
bp
->
b_error
=
EIO
;
do
{
do
{
struct
page
*
page
=
bvec
->
bv_page
;
struct
page
*
page
=
bvec
->
bv_page
;
if
(
unlikely
(
pb
->
p
b_error
))
{
if
(
unlikely
(
bp
->
b_error
))
{
if
(
pb
->
pb_flags
&
P
BF_READ
)
if
(
bp
->
b_flags
&
X
BF_READ
)
ClearPageUptodate
(
page
);
ClearPageUptodate
(
page
);
SetPageError
(
page
);
SetPageError
(
page
);
}
else
if
(
blocksize
=
=
PAGE_CACHE_SIZE
)
{
}
else
if
(
blocksize
>
=
PAGE_CACHE_SIZE
)
{
SetPageUptodate
(
page
);
SetPageUptodate
(
page
);
}
else
if
(
!
PagePrivate
(
page
)
&&
}
else
if
(
!
PagePrivate
(
page
)
&&
(
pb
->
pb_flags
&
_P
BF_PAGE_CACHE
))
{
(
bp
->
b_flags
&
_X
BF_PAGE_CACHE
))
{
set_page_region
(
page
,
bvec
->
bv_offset
,
bvec
->
bv_len
);
set_page_region
(
page
,
bvec
->
bv_offset
,
bvec
->
bv_len
);
}
}
if
(
--
bvec
>=
bio
->
bi_io_vec
)
if
(
--
bvec
>=
bio
->
bi_io_vec
)
prefetchw
(
&
bvec
->
bv_page
->
flags
);
prefetchw
(
&
bvec
->
bv_page
->
flags
);
if
(
_
pagebuf_iolocked
(
pb
))
{
if
(
_
xfs_buf_iolocked
(
bp
))
{
unlock_page
(
page
);
unlock_page
(
page
);
}
}
}
while
(
bvec
>=
bio
->
bi_io_vec
);
}
while
(
bvec
>=
bio
->
bi_io_vec
);
_
pagebuf_iodone
(
pb
,
1
);
_
xfs_buf_ioend
(
bp
,
1
);
bio_put
(
bio
);
bio_put
(
bio
);
return
0
;
return
0
;
}
}
STATIC
void
STATIC
void
_
page
buf_ioapply
(
_
xfs_
buf_ioapply
(
xfs_buf_t
*
pb
)
xfs_buf_t
*
bp
)
{
{
int
i
,
rw
,
map_i
,
total_nr_pages
,
nr_pages
;
int
i
,
rw
,
map_i
,
total_nr_pages
,
nr_pages
;
struct
bio
*
bio
;
struct
bio
*
bio
;
int
offset
=
pb
->
p
b_offset
;
int
offset
=
bp
->
b_offset
;
int
size
=
pb
->
p
b_count_desired
;
int
size
=
bp
->
b_count_desired
;
sector_t
sector
=
pb
->
p
b_bn
;
sector_t
sector
=
bp
->
b_bn
;
unsigned
int
blocksize
=
pb
->
pb_target
->
pbr
_bsize
;
unsigned
int
blocksize
=
bp
->
b_target
->
bt
_bsize
;
int
locking
=
_
pagebuf_iolocked
(
pb
);
int
locking
=
_
xfs_buf_iolocked
(
bp
);
total_nr_pages
=
pb
->
p
b_page_count
;
total_nr_pages
=
bp
->
b_page_count
;
map_i
=
0
;
map_i
=
0
;
if
(
pb
->
pb_flags
&
_P
BF_RUN_QUEUES
)
{
if
(
bp
->
b_flags
&
_X
BF_RUN_QUEUES
)
{
pb
->
pb_flags
&=
~
_P
BF_RUN_QUEUES
;
bp
->
b_flags
&=
~
_X
BF_RUN_QUEUES
;
rw
=
(
pb
->
pb_flags
&
P
BF_READ
)
?
READ_SYNC
:
WRITE_SYNC
;
rw
=
(
bp
->
b_flags
&
X
BF_READ
)
?
READ_SYNC
:
WRITE_SYNC
;
}
else
{
}
else
{
rw
=
(
pb
->
pb_flags
&
P
BF_READ
)
?
READ
:
WRITE
;
rw
=
(
bp
->
b_flags
&
X
BF_READ
)
?
READ
:
WRITE
;
}
}
if
(
pb
->
pb_flags
&
P
BF_ORDERED
)
{
if
(
bp
->
b_flags
&
X
BF_ORDERED
)
{
ASSERT
(
!
(
pb
->
pb_flags
&
P
BF_READ
));
ASSERT
(
!
(
bp
->
b_flags
&
X
BF_READ
));
rw
=
WRITE_BARRIER
;
rw
=
WRITE_BARRIER
;
}
}
/* Special code path for reading a sub page size
pagebuf
in --
/* Special code path for reading a sub page size
buffer
in --
* we populate up the whole page, and hence the other metadata
* we populate up the whole page, and hence the other metadata
* in the same page. This optimization is only valid when the
* in the same page. This optimization is only valid when the
* filesystem block size
and the page size are equal
.
* filesystem block size
is not smaller than the page size
.
*/
*/
if
((
pb
->
p
b_buffer_length
<
PAGE_CACHE_SIZE
)
&&
if
((
bp
->
b_buffer_length
<
PAGE_CACHE_SIZE
)
&&
(
pb
->
pb_flags
&
P
BF_READ
)
&&
locking
&&
(
bp
->
b_flags
&
X
BF_READ
)
&&
locking
&&
(
blocksize
=
=
PAGE_CACHE_SIZE
))
{
(
blocksize
>
=
PAGE_CACHE_SIZE
))
{
bio
=
bio_alloc
(
GFP_NOIO
,
1
);
bio
=
bio_alloc
(
GFP_NOIO
,
1
);
bio
->
bi_bdev
=
pb
->
pb_target
->
pbr
_bdev
;
bio
->
bi_bdev
=
bp
->
b_target
->
bt
_bdev
;
bio
->
bi_sector
=
sector
-
(
offset
>>
BBSHIFT
);
bio
->
bi_sector
=
sector
-
(
offset
>>
BBSHIFT
);
bio
->
bi_end_io
=
bio_end_io_pagebuf
;
bio
->
bi_end_io
=
xfs_buf_bio_end_io
;
bio
->
bi_private
=
pb
;
bio
->
bi_private
=
bp
;
bio_add_page
(
bio
,
pb
->
p
b_pages
[
0
],
PAGE_CACHE_SIZE
,
0
);
bio_add_page
(
bio
,
bp
->
b_pages
[
0
],
PAGE_CACHE_SIZE
,
0
);
size
=
0
;
size
=
0
;
atomic_inc
(
&
pb
->
p
b_io_remaining
);
atomic_inc
(
&
bp
->
b_io_remaining
);
goto
submit_io
;
goto
submit_io
;
}
}
/* Lock down the pages which we need to for the request */
/* Lock down the pages which we need to for the request */
if
(
locking
&&
(
pb
->
pb_flags
&
PBF_WRITE
)
&&
(
pb
->
p
b_locked
==
0
))
{
if
(
locking
&&
(
bp
->
b_flags
&
XBF_WRITE
)
&&
(
bp
->
b_locked
==
0
))
{
for
(
i
=
0
;
size
;
i
++
)
{
for
(
i
=
0
;
size
;
i
++
)
{
int
nbytes
=
PAGE_CACHE_SIZE
-
offset
;
int
nbytes
=
PAGE_CACHE_SIZE
-
offset
;
struct
page
*
page
=
pb
->
p
b_pages
[
i
];
struct
page
*
page
=
bp
->
b_pages
[
i
];
if
(
nbytes
>
size
)
if
(
nbytes
>
size
)
nbytes
=
size
;
nbytes
=
size
;
...
@@ -1276,30 +1197,30 @@ _pagebuf_ioapply(
...
@@ -1276,30 +1197,30 @@ _pagebuf_ioapply(
size
-=
nbytes
;
size
-=
nbytes
;
offset
=
0
;
offset
=
0
;
}
}
offset
=
pb
->
p
b_offset
;
offset
=
bp
->
b_offset
;
size
=
pb
->
p
b_count_desired
;
size
=
bp
->
b_count_desired
;
}
}
next_chunk:
next_chunk:
atomic_inc
(
&
pb
->
p
b_io_remaining
);
atomic_inc
(
&
bp
->
b_io_remaining
);
nr_pages
=
BIO_MAX_SECTORS
>>
(
PAGE_SHIFT
-
BBSHIFT
);
nr_pages
=
BIO_MAX_SECTORS
>>
(
PAGE_SHIFT
-
BBSHIFT
);
if
(
nr_pages
>
total_nr_pages
)
if
(
nr_pages
>
total_nr_pages
)
nr_pages
=
total_nr_pages
;
nr_pages
=
total_nr_pages
;
bio
=
bio_alloc
(
GFP_NOIO
,
nr_pages
);
bio
=
bio_alloc
(
GFP_NOIO
,
nr_pages
);
bio
->
bi_bdev
=
pb
->
pb_target
->
pbr
_bdev
;
bio
->
bi_bdev
=
bp
->
b_target
->
bt
_bdev
;
bio
->
bi_sector
=
sector
;
bio
->
bi_sector
=
sector
;
bio
->
bi_end_io
=
bio_end_io_pagebuf
;
bio
->
bi_end_io
=
xfs_buf_bio_end_io
;
bio
->
bi_private
=
pb
;
bio
->
bi_private
=
bp
;
for
(;
size
&&
nr_pages
;
nr_pages
--
,
map_i
++
)
{
for
(;
size
&&
nr_pages
;
nr_pages
--
,
map_i
++
)
{
int
nbytes
=
PAGE_CACHE_SIZE
-
offset
;
int
rbytes
,
nbytes
=
PAGE_CACHE_SIZE
-
offset
;
if
(
nbytes
>
size
)
if
(
nbytes
>
size
)
nbytes
=
size
;
nbytes
=
size
;
if
(
bio_add_page
(
bio
,
pb
->
pb_pages
[
map_i
],
rbytes
=
bio_add_page
(
bio
,
bp
->
b_pages
[
map_i
],
nbytes
,
offset
);
nbytes
,
offset
)
<
nbytes
)
if
(
rbytes
<
nbytes
)
break
;
break
;
offset
=
0
;
offset
=
0
;
...
@@ -1315,107 +1236,102 @@ _pagebuf_ioapply(
...
@@ -1315,107 +1236,102 @@ _pagebuf_ioapply(
goto
next_chunk
;
goto
next_chunk
;
}
else
{
}
else
{
bio_put
(
bio
);
bio_put
(
bio
);
pagebuf_ioerror
(
pb
,
EIO
);
xfs_buf_ioerror
(
bp
,
EIO
);
}
}
}
}
/*
* pagebuf_iorequest -- the core I/O request routine.
*/
int
int
pagebuf_iorequest
(
/* start real I/O */
xfs_buf_iorequest
(
xfs_buf_t
*
pb
)
/* buffer to convey to device */
xfs_buf_t
*
bp
)
{
{
PB_TRACE
(
pb
,
"iorequest"
,
0
);
XB_TRACE
(
bp
,
"iorequest"
,
0
);
if
(
pb
->
pb_flags
&
P
BF_DELWRI
)
{
if
(
bp
->
b_flags
&
X
BF_DELWRI
)
{
pagebuf_delwri_queue
(
pb
,
1
);
xfs_buf_delwri_queue
(
bp
,
1
);
return
0
;
return
0
;
}
}
if
(
pb
->
pb_flags
&
P
BF_WRITE
)
{
if
(
bp
->
b_flags
&
X
BF_WRITE
)
{
_pagebuf_wait_unpin
(
pb
);
xfs_buf_wait_unpin
(
bp
);
}
}
pagebuf_hold
(
pb
);
xfs_buf_hold
(
bp
);
/* Set the count to 1 initially, this will stop an I/O
/* Set the count to 1 initially, this will stop an I/O
* completion callout which happens before we have started
* completion callout which happens before we have started
* all the I/O from calling
pagebuf_iodone
too early.
* all the I/O from calling
xfs_buf_ioend
too early.
*/
*/
atomic_set
(
&
pb
->
p
b_io_remaining
,
1
);
atomic_set
(
&
bp
->
b_io_remaining
,
1
);
_
pagebuf_ioapply
(
pb
);
_
xfs_buf_ioapply
(
bp
);
_
pagebuf_iodone
(
pb
,
0
);
_
xfs_buf_ioend
(
bp
,
0
);
pagebuf_rele
(
pb
);
xfs_buf_rele
(
bp
);
return
0
;
return
0
;
}
}
/*
/*
* pagebuf_iowait
* Waits for I/O to complete on the buffer supplied.
*
* It returns immediately if no I/O is pending.
* pagebuf_iowait waits for I/O to complete on the buffer supplied.
* It returns the I/O error code, if any, or 0 if there was no error.
* It returns immediately if no I/O is pending. In any case, it returns
* the error code, if any, or 0 if there is no error.
*/
*/
int
int
page
buf_iowait
(
xfs_
buf_iowait
(
xfs_buf_t
*
pb
)
xfs_buf_t
*
bp
)
{
{
PB_TRACE
(
pb
,
"iowait"
,
0
);
XB_TRACE
(
bp
,
"iowait"
,
0
);
if
(
atomic_read
(
&
pb
->
p
b_io_remaining
))
if
(
atomic_read
(
&
bp
->
b_io_remaining
))
blk_run_address_space
(
pb
->
pb_target
->
pbr
_mapping
);
blk_run_address_space
(
bp
->
b_target
->
bt
_mapping
);
down
(
&
pb
->
p
b_iodonesema
);
down
(
&
bp
->
b_iodonesema
);
PB_TRACE
(
pb
,
"iowaited"
,
(
long
)
pb
->
p
b_error
);
XB_TRACE
(
bp
,
"iowaited"
,
(
long
)
bp
->
b_error
);
return
pb
->
p
b_error
;
return
bp
->
b_error
;
}
}
caddr_t
xfs_
caddr_t
page
buf_offset
(
xfs_
buf_offset
(
xfs_buf_t
*
pb
,
xfs_buf_t
*
bp
,
size_t
offset
)
size_t
offset
)
{
{
struct
page
*
page
;
struct
page
*
page
;
offset
+=
pb
->
pb_offset
;
if
(
bp
->
b_flags
&
XBF_MAPPED
)
return
XFS_BUF_PTR
(
bp
)
+
offset
;
page
=
pb
->
pb_pages
[
offset
>>
PAGE_CACHE_SHIFT
];
offset
+=
bp
->
b_offset
;
return
(
caddr_t
)
page_address
(
page
)
+
(
offset
&
(
PAGE_CACHE_SIZE
-
1
));
page
=
bp
->
b_pages
[
offset
>>
PAGE_CACHE_SHIFT
];
return
(
xfs_caddr_t
)
page_address
(
page
)
+
(
offset
&
(
PAGE_CACHE_SIZE
-
1
));
}
}
/*
/*
* pagebuf_iomove
*
* Move data into or out of a buffer.
* Move data into or out of a buffer.
*/
*/
void
void
page
buf_iomove
(
xfs_
buf_iomove
(
xfs_buf_t
*
pb
,
/* buffer to process */
xfs_buf_t
*
bp
,
/* buffer to process */
size_t
boff
,
/* starting buffer offset */
size_t
boff
,
/* starting buffer offset */
size_t
bsize
,
/* length to copy */
size_t
bsize
,
/* length to copy */
caddr_t
data
,
/* data address */
caddr_t
data
,
/* data address */
page_buf_rw_t
mode
)
/* read/write
flag */
xfs_buf_rw_t
mode
)
/* read/write/zero
flag */
{
{
size_t
bend
,
cpoff
,
csize
;
size_t
bend
,
cpoff
,
csize
;
struct
page
*
page
;
struct
page
*
page
;
bend
=
boff
+
bsize
;
bend
=
boff
+
bsize
;
while
(
boff
<
bend
)
{
while
(
boff
<
bend
)
{
page
=
pb
->
pb_pages
[
page_buf_btoct
(
boff
+
pb
->
p
b_offset
)];
page
=
bp
->
b_pages
[
xfs_buf_btoct
(
boff
+
bp
->
b_offset
)];
cpoff
=
page_buf_poff
(
boff
+
pb
->
p
b_offset
);
cpoff
=
xfs_buf_poff
(
boff
+
bp
->
b_offset
);
csize
=
min_t
(
size_t
,
csize
=
min_t
(
size_t
,
PAGE_CACHE_SIZE
-
cpoff
,
pb
->
p
b_count_desired
-
boff
);
PAGE_CACHE_SIZE
-
cpoff
,
bp
->
b_count_desired
-
boff
);
ASSERT
(((
csize
+
cpoff
)
<=
PAGE_CACHE_SIZE
));
ASSERT
(((
csize
+
cpoff
)
<=
PAGE_CACHE_SIZE
));
switch
(
mode
)
{
switch
(
mode
)
{
case
P
BRW_ZERO
:
case
X
BRW_ZERO
:
memset
(
page_address
(
page
)
+
cpoff
,
0
,
csize
);
memset
(
page_address
(
page
)
+
cpoff
,
0
,
csize
);
break
;
break
;
case
P
BRW_READ
:
case
X
BRW_READ
:
memcpy
(
data
,
page_address
(
page
)
+
cpoff
,
csize
);
memcpy
(
data
,
page_address
(
page
)
+
cpoff
,
csize
);
break
;
break
;
case
P
BRW_WRITE
:
case
X
BRW_WRITE
:
memcpy
(
page_address
(
page
)
+
cpoff
,
data
,
csize
);
memcpy
(
page_address
(
page
)
+
cpoff
,
data
,
csize
);
}
}
...
@@ -1425,12 +1341,12 @@ pagebuf_iomove(
...
@@ -1425,12 +1341,12 @@ pagebuf_iomove(
}
}
/*
/*
* Handling of buf
targs
.
* Handling of buf
fer targets (buftargs)
.
*/
*/
/*
/*
*
Wait for any bufs with callbacks that have been submitted but
*
Wait for any bufs with callbacks that have been submitted but
*
have not yet returned... walk the hash list for the target.
*
have not yet returned... walk the hash list for the target.
*/
*/
void
void
xfs_wait_buftarg
(
xfs_wait_buftarg
(
...
@@ -1444,15 +1360,15 @@ xfs_wait_buftarg(
...
@@ -1444,15 +1360,15 @@ xfs_wait_buftarg(
hash
=
&
btp
->
bt_hash
[
i
];
hash
=
&
btp
->
bt_hash
[
i
];
again:
again:
spin_lock
(
&
hash
->
bh_lock
);
spin_lock
(
&
hash
->
bh_lock
);
list_for_each_entry_safe
(
bp
,
n
,
&
hash
->
bh_list
,
p
b_hash_list
)
{
list_for_each_entry_safe
(
bp
,
n
,
&
hash
->
bh_list
,
b_hash_list
)
{
ASSERT
(
btp
==
bp
->
p
b_target
);
ASSERT
(
btp
==
bp
->
b_target
);
if
(
!
(
bp
->
pb_flags
&
P
BF_FS_MANAGED
))
{
if
(
!
(
bp
->
b_flags
&
X
BF_FS_MANAGED
))
{
spin_unlock
(
&
hash
->
bh_lock
);
spin_unlock
(
&
hash
->
bh_lock
);
/*
/*
* Catch superblock reference count leaks
* Catch superblock reference count leaks
* immediately
* immediately
*/
*/
BUG_ON
(
bp
->
p
b_bn
==
0
);
BUG_ON
(
bp
->
b_bn
==
0
);
delay
(
100
);
delay
(
100
);
goto
again
;
goto
again
;
}
}
...
@@ -1462,9 +1378,9 @@ xfs_wait_buftarg(
...
@@ -1462,9 +1378,9 @@ xfs_wait_buftarg(
}
}
/*
/*
*
Allocate buffer hash table for a given target.
*
Allocate buffer hash table for a given target.
*
For devices containing metadata (i.e. not the log/realtime devices)
*
For devices containing metadata (i.e. not the log/realtime devices)
*
we need to allocate a much larger hash table.
*
we need to allocate a much larger hash table.
*/
*/
STATIC
void
STATIC
void
xfs_alloc_bufhash
(
xfs_alloc_bufhash
(
...
@@ -1487,11 +1403,34 @@ STATIC void
...
@@ -1487,11 +1403,34 @@ STATIC void
xfs_free_bufhash
(
xfs_free_bufhash
(
xfs_buftarg_t
*
btp
)
xfs_buftarg_t
*
btp
)
{
{
kmem_free
(
btp
->
bt_hash
,
kmem_free
(
btp
->
bt_hash
,
(
1
<<
btp
->
bt_hashshift
)
*
sizeof
(
xfs_bufhash_t
));
(
1
<<
btp
->
bt_hashshift
)
*
sizeof
(
xfs_bufhash_t
));
btp
->
bt_hash
=
NULL
;
btp
->
bt_hash
=
NULL
;
}
}
/*
* buftarg list for delwrite queue processing
*/
STATIC
LIST_HEAD
(
xfs_buftarg_list
);
STATIC
DEFINE_SPINLOCK
(
xfs_buftarg_lock
);
STATIC
void
xfs_register_buftarg
(
xfs_buftarg_t
*
btp
)
{
spin_lock
(
&
xfs_buftarg_lock
);
list_add
(
&
btp
->
bt_list
,
&
xfs_buftarg_list
);
spin_unlock
(
&
xfs_buftarg_lock
);
}
STATIC
void
xfs_unregister_buftarg
(
xfs_buftarg_t
*
btp
)
{
spin_lock
(
&
xfs_buftarg_lock
);
list_del
(
&
btp
->
bt_list
);
spin_unlock
(
&
xfs_buftarg_lock
);
}
void
void
xfs_free_buftarg
(
xfs_free_buftarg
(
xfs_buftarg_t
*
btp
,
xfs_buftarg_t
*
btp
,
...
@@ -1499,9 +1438,16 @@ xfs_free_buftarg(
...
@@ -1499,9 +1438,16 @@ xfs_free_buftarg(
{
{
xfs_flush_buftarg
(
btp
,
1
);
xfs_flush_buftarg
(
btp
,
1
);
if
(
external
)
if
(
external
)
xfs_blkdev_put
(
btp
->
pbr
_bdev
);
xfs_blkdev_put
(
btp
->
bt
_bdev
);
xfs_free_bufhash
(
btp
);
xfs_free_bufhash
(
btp
);
iput
(
btp
->
pbr_mapping
->
host
);
iput
(
btp
->
bt_mapping
->
host
);
/* Unregister the buftarg first so that we don't get a
* wakeup finding a non-existent task
*/
xfs_unregister_buftarg
(
btp
);
kthread_stop
(
btp
->
bt_task
);
kmem_free
(
btp
,
sizeof
(
*
btp
));
kmem_free
(
btp
,
sizeof
(
*
btp
));
}
}
...
@@ -1512,11 +1458,11 @@ xfs_setsize_buftarg_flags(
...
@@ -1512,11 +1458,11 @@ xfs_setsize_buftarg_flags(
unsigned
int
sectorsize
,
unsigned
int
sectorsize
,
int
verbose
)
int
verbose
)
{
{
btp
->
pbr
_bsize
=
blocksize
;
btp
->
bt
_bsize
=
blocksize
;
btp
->
pbr
_sshift
=
ffs
(
sectorsize
)
-
1
;
btp
->
bt
_sshift
=
ffs
(
sectorsize
)
-
1
;
btp
->
pbr
_smask
=
sectorsize
-
1
;
btp
->
bt
_smask
=
sectorsize
-
1
;
if
(
set_blocksize
(
btp
->
pbr
_bdev
,
sectorsize
))
{
if
(
set_blocksize
(
btp
->
bt
_bdev
,
sectorsize
))
{
printk
(
KERN_WARNING
printk
(
KERN_WARNING
"XFS: Cannot set_blocksize to %u on device %s
\n
"
,
"XFS: Cannot set_blocksize to %u on device %s
\n
"
,
sectorsize
,
XFS_BUFTARG_NAME
(
btp
));
sectorsize
,
XFS_BUFTARG_NAME
(
btp
));
...
@@ -1536,10 +1482,10 @@ xfs_setsize_buftarg_flags(
...
@@ -1536,10 +1482,10 @@ xfs_setsize_buftarg_flags(
}
}
/*
/*
*
When allocating the initial buffer target we have not yet
*
When allocating the initial buffer target we have not yet
*
read in the superblock, so don't know what sized sectors
*
read in the superblock, so don't know what sized sectors
*
are being used is at this early stage. Play safe.
*
are being used is at this early stage. Play safe.
*/
*/
STATIC
int
STATIC
int
xfs_setsize_buftarg_early
(
xfs_setsize_buftarg_early
(
xfs_buftarg_t
*
btp
,
xfs_buftarg_t
*
btp
,
...
@@ -1587,10 +1533,30 @@ xfs_mapping_buftarg(
...
@@ -1587,10 +1533,30 @@ xfs_mapping_buftarg(
mapping
->
a_ops
=
&
mapping_aops
;
mapping
->
a_ops
=
&
mapping_aops
;
mapping
->
backing_dev_info
=
bdi
;
mapping
->
backing_dev_info
=
bdi
;
mapping_set_gfp_mask
(
mapping
,
GFP_NOFS
);
mapping_set_gfp_mask
(
mapping
,
GFP_NOFS
);
btp
->
pbr
_mapping
=
mapping
;
btp
->
bt
_mapping
=
mapping
;
return
0
;
return
0
;
}
}
STATIC
int
xfs_alloc_delwrite_queue
(
xfs_buftarg_t
*
btp
)
{
int
error
=
0
;
INIT_LIST_HEAD
(
&
btp
->
bt_list
);
INIT_LIST_HEAD
(
&
btp
->
bt_delwrite_queue
);
spinlock_init
(
&
btp
->
bt_delwrite_lock
,
"delwri_lock"
);
btp
->
bt_flags
=
0
;
btp
->
bt_task
=
kthread_run
(
xfsbufd
,
btp
,
"xfsbufd"
);
if
(
IS_ERR
(
btp
->
bt_task
))
{
error
=
PTR_ERR
(
btp
->
bt_task
);
goto
out_error
;
}
xfs_register_buftarg
(
btp
);
out_error:
return
error
;
}
xfs_buftarg_t
*
xfs_buftarg_t
*
xfs_alloc_buftarg
(
xfs_alloc_buftarg
(
struct
block_device
*
bdev
,
struct
block_device
*
bdev
,
...
@@ -1600,12 +1566,14 @@ xfs_alloc_buftarg(
...
@@ -1600,12 +1566,14 @@ xfs_alloc_buftarg(
btp
=
kmem_zalloc
(
sizeof
(
*
btp
),
KM_SLEEP
);
btp
=
kmem_zalloc
(
sizeof
(
*
btp
),
KM_SLEEP
);
btp
->
pbr
_dev
=
bdev
->
bd_dev
;
btp
->
bt
_dev
=
bdev
->
bd_dev
;
btp
->
pbr
_bdev
=
bdev
;
btp
->
bt
_bdev
=
bdev
;
if
(
xfs_setsize_buftarg_early
(
btp
,
bdev
))
if
(
xfs_setsize_buftarg_early
(
btp
,
bdev
))
goto
error
;
goto
error
;
if
(
xfs_mapping_buftarg
(
btp
,
bdev
))
if
(
xfs_mapping_buftarg
(
btp
,
bdev
))
goto
error
;
goto
error
;
if
(
xfs_alloc_delwrite_queue
(
btp
))
goto
error
;
xfs_alloc_bufhash
(
btp
,
external
);
xfs_alloc_bufhash
(
btp
,
external
);
return
btp
;
return
btp
;
...
@@ -1616,83 +1584,81 @@ xfs_alloc_buftarg(
...
@@ -1616,83 +1584,81 @@ xfs_alloc_buftarg(
/*
/*
*
Pagebuf d
elayed write buffer handling
*
D
elayed write buffer handling
*/
*/
STATIC
LIST_HEAD
(
pbd_delwrite_queue
);
STATIC
DEFINE_SPINLOCK
(
pbd_delwrite_lock
);
STATIC
void
STATIC
void
page
buf_delwri_queue
(
xfs_
buf_delwri_queue
(
xfs_buf_t
*
pb
,
xfs_buf_t
*
bp
,
int
unlock
)
int
unlock
)
{
{
PB_TRACE
(
pb
,
"delwri_q"
,
(
long
)
unlock
);
struct
list_head
*
dwq
=
&
bp
->
b_target
->
bt_delwrite_queue
;
ASSERT
((
pb
->
pb_flags
&
(
PBF_DELWRI
|
PBF_ASYNC
))
==
spinlock_t
*
dwlk
=
&
bp
->
b_target
->
bt_delwrite_lock
;
(
PBF_DELWRI
|
PBF_ASYNC
));
XB_TRACE
(
bp
,
"delwri_q"
,
(
long
)
unlock
);
ASSERT
((
bp
->
b_flags
&
(
XBF_DELWRI
|
XBF_ASYNC
))
==
(
XBF_DELWRI
|
XBF_ASYNC
));
spin_lock
(
&
pbd_delwrite_loc
k
);
spin_lock
(
dwl
k
);
/* If already in the queue, dequeue and place at tail */
/* If already in the queue, dequeue and place at tail */
if
(
!
list_empty
(
&
pb
->
pb_list
))
{
if
(
!
list_empty
(
&
bp
->
b_list
))
{
ASSERT
(
pb
->
pb_flags
&
_PBF_DELWRI_Q
);
ASSERT
(
bp
->
b_flags
&
_XBF_DELWRI_Q
);
if
(
unlock
)
{
if
(
unlock
)
atomic_dec
(
&
pb
->
pb_hold
);
atomic_dec
(
&
bp
->
b_hold
);
}
list_del
(
&
bp
->
b_list
);
list_del
(
&
pb
->
pb_list
);
}
}
pb
->
pb_flags
|=
_P
BF_DELWRI_Q
;
bp
->
b_flags
|=
_X
BF_DELWRI_Q
;
list_add_tail
(
&
pb
->
pb_list
,
&
pbd_delwrite_queue
);
list_add_tail
(
&
bp
->
b_list
,
dwq
);
pb
->
p
b_queuetime
=
jiffies
;
bp
->
b_queuetime
=
jiffies
;
spin_unlock
(
&
pbd_delwrite_loc
k
);
spin_unlock
(
dwl
k
);
if
(
unlock
)
if
(
unlock
)
pagebuf_unlock
(
pb
);
xfs_buf_unlock
(
bp
);
}
}
void
void
page
buf_delwri_dequeue
(
xfs_
buf_delwri_dequeue
(
xfs_buf_t
*
pb
)
xfs_buf_t
*
bp
)
{
{
spinlock_t
*
dwlk
=
&
bp
->
b_target
->
bt_delwrite_lock
;
int
dequeued
=
0
;
int
dequeued
=
0
;
spin_lock
(
&
pbd_delwrite_loc
k
);
spin_lock
(
dwl
k
);
if
((
pb
->
pb_flags
&
PBF_DELWRI
)
&&
!
list_empty
(
&
pb
->
p
b_list
))
{
if
((
bp
->
b_flags
&
XBF_DELWRI
)
&&
!
list_empty
(
&
bp
->
b_list
))
{
ASSERT
(
pb
->
pb_flags
&
_P
BF_DELWRI_Q
);
ASSERT
(
bp
->
b_flags
&
_X
BF_DELWRI_Q
);
list_del_init
(
&
pb
->
p
b_list
);
list_del_init
(
&
bp
->
b_list
);
dequeued
=
1
;
dequeued
=
1
;
}
}
pb
->
pb_flags
&=
~
(
PBF_DELWRI
|
_P
BF_DELWRI_Q
);
bp
->
b_flags
&=
~
(
XBF_DELWRI
|
_X
BF_DELWRI_Q
);
spin_unlock
(
&
pbd_delwrite_loc
k
);
spin_unlock
(
dwl
k
);
if
(
dequeued
)
if
(
dequeued
)
pagebuf_rele
(
pb
);
xfs_buf_rele
(
bp
);
PB_TRACE
(
pb
,
"delwri_dq"
,
(
long
)
dequeued
);
XB_TRACE
(
bp
,
"delwri_dq"
,
(
long
)
dequeued
);
}
}
STATIC
void
STATIC
void
page
buf_runall_queues
(
xfs_
buf_runall_queues
(
struct
workqueue_struct
*
queue
)
struct
workqueue_struct
*
queue
)
{
{
flush_workqueue
(
queue
);
flush_workqueue
(
queue
);
}
}
/* Defines for pagebuf daemon */
STATIC
struct
task_struct
*
xfsbufd_task
;
STATIC
int
xfsbufd_force_flush
;
STATIC
int
xfsbufd_force_sleep
;
STATIC
int
STATIC
int
xfsbufd_wakeup
(
xfsbufd_wakeup
(
int
priority
,
int
priority
,
gfp_t
mask
)
gfp_t
mask
)
{
{
if
(
xfsbufd_force_sleep
)
xfs_buftarg_t
*
btp
;
return
0
;
xfsbufd_force_flush
=
1
;
spin_lock
(
&
xfs_buftarg_lock
);
barrier
();
list_for_each_entry
(
btp
,
&
xfs_buftarg_list
,
bt_list
)
{
wake_up_process
(
xfsbufd_task
);
if
(
test_bit
(
XBT_FORCE_SLEEP
,
&
btp
->
bt_flags
))
continue
;
set_bit
(
XBT_FORCE_FLUSH
,
&
btp
->
bt_flags
);
wake_up_process
(
btp
->
bt_task
);
}
spin_unlock
(
&
xfs_buftarg_lock
);
return
0
;
return
0
;
}
}
...
@@ -1702,67 +1668,70 @@ xfsbufd(
...
@@ -1702,67 +1668,70 @@ xfsbufd(
{
{
struct
list_head
tmp
;
struct
list_head
tmp
;
unsigned
long
age
;
unsigned
long
age
;
xfs_buftarg_t
*
target
;
xfs_buftarg_t
*
target
=
(
xfs_buftarg_t
*
)
data
;
xfs_buf_t
*
pb
,
*
n
;
xfs_buf_t
*
bp
,
*
n
;
struct
list_head
*
dwq
=
&
target
->
bt_delwrite_queue
;
spinlock_t
*
dwlk
=
&
target
->
bt_delwrite_lock
;
current
->
flags
|=
PF_MEMALLOC
;
current
->
flags
|=
PF_MEMALLOC
;
INIT_LIST_HEAD
(
&
tmp
);
INIT_LIST_HEAD
(
&
tmp
);
do
{
do
{
if
(
unlikely
(
freezing
(
current
)))
{
if
(
unlikely
(
freezing
(
current
)))
{
xfsbufd_force_sleep
=
1
;
set_bit
(
XBT_FORCE_SLEEP
,
&
target
->
bt_flags
)
;
refrigerator
();
refrigerator
();
}
else
{
}
else
{
xfsbufd_force_sleep
=
0
;
clear_bit
(
XBT_FORCE_SLEEP
,
&
target
->
bt_flags
)
;
}
}
schedule_timeout_interruptible
(
schedule_timeout_interruptible
(
xfs_buf_timer_centisecs
*
msecs_to_jiffies
(
10
));
xfs_buf_timer_centisecs
*
msecs_to_jiffies
(
10
));
age
=
xfs_buf_age_centisecs
*
msecs_to_jiffies
(
10
);
age
=
xfs_buf_age_centisecs
*
msecs_to_jiffies
(
10
);
spin_lock
(
&
pbd_delwrite_lock
);
spin_lock
(
dwlk
);
list_for_each_entry_safe
(
pb
,
n
,
&
pbd_delwrite_queue
,
pb_list
)
{
list_for_each_entry_safe
(
bp
,
n
,
dwq
,
b_list
)
{
PB_TRACE
(
pb
,
"walkq1"
,
(
long
)
pagebuf_ispin
(
pb
));
XB_TRACE
(
bp
,
"walkq1"
,
(
long
)
xfs_buf_ispin
(
bp
));
ASSERT
(
pb
->
pb_flags
&
PBF_DELWRI
);
ASSERT
(
bp
->
b_flags
&
XBF_DELWRI
);
if
(
!
pagebuf_ispin
(
pb
)
&&
!
pagebuf_cond_lock
(
pb
))
{
if
(
!
xfs_buf_ispin
(
bp
)
&&
!
xfs_buf_cond_lock
(
bp
))
{
if
(
!
xfsbufd_force_flush
&&
if
(
!
test_bit
(
XBT_FORCE_FLUSH
,
&
target
->
bt_flags
)
&&
time_before
(
jiffies
,
time_before
(
jiffies
,
pb
->
p
b_queuetime
+
age
))
{
bp
->
b_queuetime
+
age
))
{
pagebuf_unlock
(
pb
);
xfs_buf_unlock
(
bp
);
break
;
break
;
}
}
pb
->
pb_flags
&=
~
(
PBF_DELWRI
|
_P
BF_DELWRI_Q
);
bp
->
b_flags
&=
~
(
XBF_DELWRI
|
_X
BF_DELWRI_Q
);
pb
->
pb_flags
|=
P
BF_WRITE
;
bp
->
b_flags
|=
X
BF_WRITE
;
list_move
(
&
pb
->
p
b_list
,
&
tmp
);
list_move
(
&
bp
->
b_list
,
&
tmp
);
}
}
}
}
spin_unlock
(
&
pbd_delwrite_loc
k
);
spin_unlock
(
dwl
k
);
while
(
!
list_empty
(
&
tmp
))
{
while
(
!
list_empty
(
&
tmp
))
{
pb
=
list_entry
(
tmp
.
next
,
xfs_buf_t
,
p
b_list
);
bp
=
list_entry
(
tmp
.
next
,
xfs_buf_t
,
b_list
);
target
=
pb
->
pb_target
;
ASSERT
(
target
==
bp
->
b_target
)
;
list_del_init
(
&
pb
->
p
b_list
);
list_del_init
(
&
bp
->
b_list
);
pagebuf_iostrategy
(
pb
);
xfs_buf_iostrategy
(
bp
);
blk_run_address_space
(
target
->
pbr
_mapping
);
blk_run_address_space
(
target
->
bt
_mapping
);
}
}
if
(
as_list_len
>
0
)
if
(
as_list_len
>
0
)
purge_addresses
();
purge_addresses
();
xfsbufd_force_flush
=
0
;
clear_bit
(
XBT_FORCE_FLUSH
,
&
target
->
bt_flags
)
;
}
while
(
!
kthread_should_stop
());
}
while
(
!
kthread_should_stop
());
return
0
;
return
0
;
}
}
/*
/*
*
Go through all incore buffers, and release buffers if they belong to
*
Go through all incore buffers, and release buffers if they belong to
*
the given device. This is used in filesystem error handling to
*
the given device. This is used in filesystem error handling to
*
preserve the consistency of its metadata.
*
preserve the consistency of its metadata.
*/
*/
int
int
xfs_flush_buftarg
(
xfs_flush_buftarg
(
...
@@ -1770,73 +1739,72 @@ xfs_flush_buftarg(
...
@@ -1770,73 +1739,72 @@ xfs_flush_buftarg(
int
wait
)
int
wait
)
{
{
struct
list_head
tmp
;
struct
list_head
tmp
;
xfs_buf_t
*
pb
,
*
n
;
xfs_buf_t
*
bp
,
*
n
;
int
pincount
=
0
;
int
pincount
=
0
;
struct
list_head
*
dwq
=
&
target
->
bt_delwrite_queue
;
spinlock_t
*
dwlk
=
&
target
->
bt_delwrite_lock
;
page
buf_runall_queues
(
xfsdatad_workqueue
);
xfs_
buf_runall_queues
(
xfsdatad_workqueue
);
page
buf_runall_queues
(
xfslogd_workqueue
);
xfs_
buf_runall_queues
(
xfslogd_workqueue
);
INIT_LIST_HEAD
(
&
tmp
);
INIT_LIST_HEAD
(
&
tmp
);
spin_lock
(
&
pbd_delwrite_lock
);
spin_lock
(
dwlk
);
list_for_each_entry_safe
(
pb
,
n
,
&
pbd_delwrite_queue
,
pb_list
)
{
list_for_each_entry_safe
(
bp
,
n
,
dwq
,
b_list
)
{
ASSERT
(
bp
->
b_target
==
target
);
if
(
pb
->
pb_target
!=
target
)
ASSERT
(
bp
->
b_flags
&
(
XBF_DELWRI
|
_XBF_DELWRI_Q
));
continue
;
XB_TRACE
(
bp
,
"walkq2"
,
(
long
)
xfs_buf_ispin
(
bp
));
if
(
xfs_buf_ispin
(
bp
))
{
ASSERT
(
pb
->
pb_flags
&
(
PBF_DELWRI
|
_PBF_DELWRI_Q
));
PB_TRACE
(
pb
,
"walkq2"
,
(
long
)
pagebuf_ispin
(
pb
));
if
(
pagebuf_ispin
(
pb
))
{
pincount
++
;
pincount
++
;
continue
;
continue
;
}
}
list_move
(
&
pb
->
p
b_list
,
&
tmp
);
list_move
(
&
bp
->
b_list
,
&
tmp
);
}
}
spin_unlock
(
&
pbd_delwrite_loc
k
);
spin_unlock
(
dwl
k
);
/*
/*
* Dropped the delayed write list lock, now walk the temporary list
* Dropped the delayed write list lock, now walk the temporary list
*/
*/
list_for_each_entry_safe
(
pb
,
n
,
&
tmp
,
p
b_list
)
{
list_for_each_entry_safe
(
bp
,
n
,
&
tmp
,
b_list
)
{
pagebuf_lock
(
pb
);
xfs_buf_lock
(
bp
);
pb
->
pb_flags
&=
~
(
PBF_DELWRI
|
_P
BF_DELWRI_Q
);
bp
->
b_flags
&=
~
(
XBF_DELWRI
|
_X
BF_DELWRI_Q
);
pb
->
pb_flags
|=
P
BF_WRITE
;
bp
->
b_flags
|=
X
BF_WRITE
;
if
(
wait
)
if
(
wait
)
pb
->
pb_flags
&=
~
P
BF_ASYNC
;
bp
->
b_flags
&=
~
X
BF_ASYNC
;
else
else
list_del_init
(
&
pb
->
p
b_list
);
list_del_init
(
&
bp
->
b_list
);
pagebuf_iostrategy
(
pb
);
xfs_buf_iostrategy
(
bp
);
}
}
/*
/*
* Remaining list items must be flushed before returning
* Remaining list items must be flushed before returning
*/
*/
while
(
!
list_empty
(
&
tmp
))
{
while
(
!
list_empty
(
&
tmp
))
{
pb
=
list_entry
(
tmp
.
next
,
xfs_buf_t
,
p
b_list
);
bp
=
list_entry
(
tmp
.
next
,
xfs_buf_t
,
b_list
);
list_del_init
(
&
pb
->
p
b_list
);
list_del_init
(
&
bp
->
b_list
);
xfs_iowait
(
pb
);
xfs_iowait
(
bp
);
xfs_buf_relse
(
pb
);
xfs_buf_relse
(
bp
);
}
}
if
(
wait
)
if
(
wait
)
blk_run_address_space
(
target
->
pbr
_mapping
);
blk_run_address_space
(
target
->
bt
_mapping
);
return
pincount
;
return
pincount
;
}
}
int
__init
int
__init
page
buf_init
(
void
)
xfs_
buf_init
(
void
)
{
{
int
error
=
-
ENOMEM
;
int
error
=
-
ENOMEM
;
#ifdef
PAGE
BUF_TRACE
#ifdef
XFS_
BUF_TRACE
pagebuf_trace_buf
=
ktrace_alloc
(
PAGE
BUF_TRACE_SIZE
,
KM_SLEEP
);
xfs_buf_trace_buf
=
ktrace_alloc
(
XFS_
BUF_TRACE_SIZE
,
KM_SLEEP
);
#endif
#endif
page
buf_zone
=
kmem_zone_init
(
sizeof
(
xfs_buf_t
),
"xfs_buf"
);
xfs_
buf_zone
=
kmem_zone_init
(
sizeof
(
xfs_buf_t
),
"xfs_buf"
);
if
(
!
page
buf_zone
)
if
(
!
xfs_
buf_zone
)
goto
out_free_trace_buf
;
goto
out_free_trace_buf
;
xfslogd_workqueue
=
create_workqueue
(
"xfslogd"
);
xfslogd_workqueue
=
create_workqueue
(
"xfslogd"
);
...
@@ -1847,42 +1815,33 @@ pagebuf_init(void)
...
@@ -1847,42 +1815,33 @@ pagebuf_init(void)
if
(
!
xfsdatad_workqueue
)
if
(
!
xfsdatad_workqueue
)
goto
out_destroy_xfslogd_workqueue
;
goto
out_destroy_xfslogd_workqueue
;
xfsbufd_task
=
kthread_run
(
xfsbufd
,
NULL
,
"xfsbufd"
);
xfs_buf_shake
=
kmem_shake_register
(
xfsbufd_wakeup
);
if
(
IS_ERR
(
xfsbufd_task
))
{
if
(
!
xfs_buf_shake
)
error
=
PTR_ERR
(
xfsbufd_task
);
goto
out_destroy_xfsdatad_workqueue
;
goto
out_destroy_xfsdatad_workqueue
;
}
pagebuf_shake
=
kmem_shake_register
(
xfsbufd_wakeup
);
if
(
!
pagebuf_shake
)
goto
out_stop_xfsbufd
;
return
0
;
return
0
;
out_stop_xfsbufd:
kthread_stop
(
xfsbufd_task
);
out_destroy_xfsdatad_workqueue:
out_destroy_xfsdatad_workqueue:
destroy_workqueue
(
xfsdatad_workqueue
);
destroy_workqueue
(
xfsdatad_workqueue
);
out_destroy_xfslogd_workqueue:
out_destroy_xfslogd_workqueue:
destroy_workqueue
(
xfslogd_workqueue
);
destroy_workqueue
(
xfslogd_workqueue
);
out_free_buf_zone:
out_free_buf_zone:
kmem_zone_destroy
(
page
buf_zone
);
kmem_zone_destroy
(
xfs_
buf_zone
);
out_free_trace_buf:
out_free_trace_buf:
#ifdef
PAGE
BUF_TRACE
#ifdef
XFS_
BUF_TRACE
ktrace_free
(
page
buf_trace_buf
);
ktrace_free
(
xfs_
buf_trace_buf
);
#endif
#endif
return
error
;
return
error
;
}
}
void
void
page
buf_terminate
(
void
)
xfs_
buf_terminate
(
void
)
{
{
kmem_shake_deregister
(
pagebuf_shake
);
kmem_shake_deregister
(
xfs_buf_shake
);
kthread_stop
(
xfsbufd_task
);
destroy_workqueue
(
xfsdatad_workqueue
);
destroy_workqueue
(
xfsdatad_workqueue
);
destroy_workqueue
(
xfslogd_workqueue
);
destroy_workqueue
(
xfslogd_workqueue
);
kmem_zone_destroy
(
page
buf_zone
);
kmem_zone_destroy
(
xfs_
buf_zone
);
#ifdef
PAGE
BUF_TRACE
#ifdef
XFS_
BUF_TRACE
ktrace_free
(
page
buf_trace_buf
);
ktrace_free
(
xfs_
buf_trace_buf
);
#endif
#endif
}
}
fs/xfs/linux-2.6/xfs_buf.h
浏览文件 @
9f5974c8
...
@@ -32,44 +32,47 @@
...
@@ -32,44 +32,47 @@
* Base types
* Base types
*/
*/
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
#define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
#define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
#define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
#define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
#define page_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
#define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
#define page_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
#define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
typedef
enum
page_buf_rw_e
{
typedef
enum
{
PBRW_READ
=
1
,
/* transfer into target memory */
XBRW_READ
=
1
,
/* transfer into target memory */
PBRW_WRITE
=
2
,
/* transfer from target memory */
XBRW_WRITE
=
2
,
/* transfer from target memory */
PBRW_ZERO
=
3
/* Zero target memory */
XBRW_ZERO
=
3
,
/* Zero target memory */
}
page_buf_rw_t
;
}
xfs_buf_rw_t
;
typedef
enum
{
typedef
enum
page_buf_flags_e
{
/* pb_flags values */
XBF_READ
=
(
1
<<
0
),
/* buffer intended for reading from device */
PBF_READ
=
(
1
<<
0
),
/* buffer intended for reading from device */
XBF_WRITE
=
(
1
<<
1
),
/* buffer intended for writing to device */
PBF_WRITE
=
(
1
<<
1
),
/* buffer intended for writing to device */
XBF_MAPPED
=
(
1
<<
2
),
/* buffer mapped (b_addr valid) */
PBF_MAPPED
=
(
1
<<
2
),
/* buffer mapped (pb_addr valid) */
XBF_ASYNC
=
(
1
<<
4
),
/* initiator will not wait for completion */
PBF_ASYNC
=
(
1
<<
4
),
/* initiator will not wait for completion */
XBF_DONE
=
(
1
<<
5
),
/* all pages in the buffer uptodate */
PBF_DONE
=
(
1
<<
5
),
/* all pages in the buffer uptodate */
XBF_DELWRI
=
(
1
<<
6
),
/* buffer has dirty pages */
PBF_DELWRI
=
(
1
<<
6
),
/* buffer has dirty pages */
XBF_STALE
=
(
1
<<
7
),
/* buffer has been staled, do not find it */
PBF_STALE
=
(
1
<<
7
),
/* buffer has been staled, do not find it */
XBF_FS_MANAGED
=
(
1
<<
8
),
/* filesystem controls freeing memory */
PBF_FS_MANAGED
=
(
1
<<
8
),
/* filesystem controls freeing memory */
XBF_ORDERED
=
(
1
<<
11
),
/* use ordered writes */
PBF_ORDERED
=
(
1
<<
11
),
/* use ordered writes */
XBF_READ_AHEAD
=
(
1
<<
12
),
/* asynchronous read-ahead */
PBF_READ_AHEAD
=
(
1
<<
12
),
/* asynchronous read-ahead */
/* flags used only as arguments to access routines */
/* flags used only as arguments to access routines */
P
BF_LOCK
=
(
1
<<
14
),
/* lock requested */
X
BF_LOCK
=
(
1
<<
14
),
/* lock requested */
P
BF_TRYLOCK
=
(
1
<<
15
),
/* lock requested, but do not wait */
X
BF_TRYLOCK
=
(
1
<<
15
),
/* lock requested, but do not wait */
P
BF_DONT_BLOCK
=
(
1
<<
16
),
/* do not block in current thread */
X
BF_DONT_BLOCK
=
(
1
<<
16
),
/* do not block in current thread */
/* flags used only internally */
/* flags used only internally */
_
P
BF_PAGE_CACHE
=
(
1
<<
17
),
/* backed by pagecache */
_
X
BF_PAGE_CACHE
=
(
1
<<
17
),
/* backed by pagecache */
_
P
BF_KMEM_ALLOC
=
(
1
<<
18
),
/* backed by kmem_alloc() */
_
X
BF_KMEM_ALLOC
=
(
1
<<
18
),
/* backed by kmem_alloc() */
_
P
BF_RUN_QUEUES
=
(
1
<<
19
),
/* run block device task queue */
_
X
BF_RUN_QUEUES
=
(
1
<<
19
),
/* run block device task queue */
_
P
BF_DELWRI_Q
=
(
1
<<
21
),
/* buffer on delwri queue */
_
X
BF_DELWRI_Q
=
(
1
<<
21
),
/* buffer on delwri queue */
}
page
_buf_flags_t
;
}
xfs
_buf_flags_t
;
typedef
enum
{
XBT_FORCE_SLEEP
=
(
0
<<
1
),
XBT_FORCE_FLUSH
=
(
1
<<
1
),
}
xfs_buftarg_flags_t
;
typedef
struct
xfs_bufhash
{
typedef
struct
xfs_bufhash
{
struct
list_head
bh_list
;
struct
list_head
bh_list
;
...
@@ -77,477 +80,350 @@ typedef struct xfs_bufhash {
...
@@ -77,477 +80,350 @@ typedef struct xfs_bufhash {
}
xfs_bufhash_t
;
}
xfs_bufhash_t
;
typedef
struct
xfs_buftarg
{
typedef
struct
xfs_buftarg
{
dev_t
pbr
_dev
;
dev_t
bt
_dev
;
struct
block_device
*
pbr
_bdev
;
struct
block_device
*
bt
_bdev
;
struct
address_space
*
pbr
_mapping
;
struct
address_space
*
bt
_mapping
;
unsigned
int
pbr
_bsize
;
unsigned
int
bt
_bsize
;
unsigned
int
pbr
_sshift
;
unsigned
int
bt
_sshift
;
size_t
pbr
_smask
;
size_t
bt
_smask
;
/* per
-
device buffer hash table */
/* per
device buffer hash table */
uint
bt_hashmask
;
uint
bt_hashmask
;
uint
bt_hashshift
;
uint
bt_hashshift
;
xfs_bufhash_t
*
bt_hash
;
xfs_bufhash_t
*
bt_hash
;
/* per device delwri queue */
struct
task_struct
*
bt_task
;
struct
list_head
bt_list
;
struct
list_head
bt_delwrite_queue
;
spinlock_t
bt_delwrite_lock
;
unsigned
long
bt_flags
;
}
xfs_buftarg_t
;
}
xfs_buftarg_t
;
/*
/*
* xfs_buf_t: Buffer structure for page cache-based buffers
* xfs_buf_t: Buffer structure for pagecache-based buffers
*
* This buffer structure is used by the pagecache buffer management routines
* to refer to an assembly of pages forming a logical buffer.
*
*
* This buffer structure is used by the page cache buffer management routines
* The buffer structure is used on a temporary basis only, and discarded when
* to refer to an assembly of pages forming a logical buffer. The actual I/O
* released. The real data storage is recorded in the pagecache. Buffers are
* is performed with buffer_head structures, as required by drivers.
*
* The buffer structure is used on temporary basis only, and discarded when
* released. The real data storage is recorded in the page cache. Metadata is
* hashed to the block device on which the file system resides.
* hashed to the block device on which the file system resides.
*/
*/
struct
xfs_buf
;
struct
xfs_buf
;
typedef
void
(
*
xfs_buf_iodone_t
)(
struct
xfs_buf
*
);
typedef
void
(
*
xfs_buf_relse_t
)(
struct
xfs_buf
*
);
typedef
int
(
*
xfs_buf_bdstrat_t
)(
struct
xfs_buf
*
);
/* call-back function on I/O completion */
#define XB_PAGES 2
typedef
void
(
*
page_buf_iodone_t
)(
struct
xfs_buf
*
);
/* call-back function on I/O completion */
typedef
void
(
*
page_buf_relse_t
)(
struct
xfs_buf
*
);
/* pre-write function */
typedef
int
(
*
page_buf_bdstrat_t
)(
struct
xfs_buf
*
);
#define PB_PAGES 2
typedef
struct
xfs_buf
{
typedef
struct
xfs_buf
{
struct
semaphore
pb_sema
;
/* semaphore for lockables
*/
struct
semaphore
b_sema
;
/* semaphore for lockables
*/
unsigned
long
pb_queuetime
;
/* time buffer was queued
*/
unsigned
long
b_queuetime
;
/* time buffer was queued
*/
atomic_t
pb_pin_count
;
/* pin count
*/
atomic_t
b_pin_count
;
/* pin count
*/
wait_queue_head_t
pb_waiters
;
/* unpin waiters
*/
wait_queue_head_t
b_waiters
;
/* unpin waiters
*/
struct
list_head
p
b_list
;
struct
list_head
b_list
;
page_buf_flags_t
p
b_flags
;
/* status flags */
xfs_buf_flags_t
b_flags
;
/* status flags */
struct
list_head
p
b_hash_list
;
/* hash table list */
struct
list_head
b_hash_list
;
/* hash table list */
xfs_bufhash_t
*
p
b_hash
;
/* hash table list start */
xfs_bufhash_t
*
b_hash
;
/* hash table list start */
xfs_buftarg_t
*
p
b_target
;
/* buffer target (device) */
xfs_buftarg_t
*
b_target
;
/* buffer target (device) */
atomic_t
pb_hold
;
/* reference count */
atomic_t
b_hold
;
/* reference count */
xfs_daddr_t
p
b_bn
;
/* block number for I/O */
xfs_daddr_t
b_bn
;
/* block number for I/O */
loff_t
p
b_file_offset
;
/* offset in file */
xfs_off_t
b_file_offset
;
/* offset in file */
size_t
pb_buffer_length
;
/* size of buffer in bytes */
size_t
b_buffer_length
;
/* size of buffer in bytes */
size_t
pb_count_desired
;
/* desired transfer size */
size_t
b_count_desired
;
/* desired transfer size */
void
*
p
b_addr
;
/* virtual address of buffer */
void
*
b_addr
;
/* virtual address of buffer */
struct
work_struct
p
b_iodone_work
;
struct
work_struct
b_iodone_work
;
atomic_t
pb_io_remaining
;
/* #outstanding I/O requests */
atomic_t
b_io_remaining
;
/* #outstanding I/O requests */
page_buf_iodone_t
p
b_iodone
;
/* I/O completion function */
xfs_buf_iodone_t
b_iodone
;
/* I/O completion function */
page_buf_relse_t
p
b_relse
;
/* releasing function */
xfs_buf_relse_t
b_relse
;
/* releasing function */
page_buf_bdstrat_t
p
b_strat
;
/* pre-write function */
xfs_buf_bdstrat_t
b_strat
;
/* pre-write function */
struct
semaphore
p
b_iodonesema
;
/* Semaphore for I/O waiters */
struct
semaphore
b_iodonesema
;
/* Semaphore for I/O waiters */
void
*
p
b_fspriv
;
void
*
b_fspriv
;
void
*
p
b_fspriv2
;
void
*
b_fspriv2
;
void
*
p
b_fspriv3
;
void
*
b_fspriv3
;
unsigned
short
p
b_error
;
/* error code on I/O */
unsigned
short
b_error
;
/* error code on I/O */
unsigned
short
p
b_locked
;
/* page array is locked */
unsigned
short
b_locked
;
/* page array is locked */
unsigned
int
p
b_page_count
;
/* size of page array */
unsigned
int
b_page_count
;
/* size of page array */
unsigned
int
p
b_offset
;
/* page offset in first page */
unsigned
int
b_offset
;
/* page offset in first page */
struct
page
**
p
b_pages
;
/* array of page pointers */
struct
page
**
b_pages
;
/* array of page pointers */
struct
page
*
pb_page_array
[
P
B_PAGES
];
/* inline pages */
struct
page
*
b_page_array
[
X
B_PAGES
];
/* inline pages */
#ifdef
PAGE
BUF_LOCK_TRACKING
#ifdef
XFS_
BUF_LOCK_TRACKING
int
p
b_last_holder
;
int
b_last_holder
;
#endif
#endif
}
xfs_buf_t
;
}
xfs_buf_t
;
/* Finding and Reading Buffers */
/* Finding and Reading Buffers */
extern
xfs_buf_t
*
_xfs_buf_find
(
xfs_buftarg_t
*
,
xfs_off_t
,
size_t
,
extern
xfs_buf_t
*
_pagebuf_find
(
/* find buffer for block if */
xfs_buf_flags_t
,
xfs_buf_t
*
);
/* the block is in memory */
xfs_buftarg_t
*
,
/* inode for block */
loff_t
,
/* starting offset of range */
size_t
,
/* length of range */
page_buf_flags_t
,
/* PBF_LOCK */
xfs_buf_t
*
);
/* newly allocated buffer */
#define xfs_incore(buftarg,blkno,len,lockit) \
#define xfs_incore(buftarg,blkno,len,lockit) \
_pagebuf_find(buftarg, blkno ,len, lockit, NULL)
_xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
extern
xfs_buf_t
*
xfs_buf_get_flags
(
/* allocate a buffer */
xfs_buftarg_t
*
,
/* inode for buffer */
loff_t
,
/* starting offset of range */
size_t
,
/* length of range */
page_buf_flags_t
);
/* PBF_LOCK, PBF_READ, */
/* PBF_ASYNC */
extern
xfs_buf_t
*
xfs_buf_get_flags
(
xfs_buftarg_t
*
,
xfs_off_t
,
size_t
,
xfs_buf_flags_t
);
#define xfs_buf_get(target, blkno, len, flags) \
#define xfs_buf_get(target, blkno, len, flags) \
xfs_buf_get_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
extern
xfs_buf_t
*
xfs_buf_read_flags
(
/* allocate and read a buffer */
xfs_buftarg_t
*
,
/* inode for buffer */
loff_t
,
/* starting offset of range */
size_t
,
/* length of range */
page_buf_flags_t
);
/* PBF_LOCK, PBF_ASYNC */
extern
xfs_buf_t
*
xfs_buf_read_flags
(
xfs_buftarg_t
*
,
xfs_off_t
,
size_t
,
xfs_buf_flags_t
);
#define xfs_buf_read(target, blkno, len, flags) \
#define xfs_buf_read(target, blkno, len, flags) \
xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
extern
xfs_buf_t
*
pagebuf_get_empty
(
/* allocate pagebuf struct with */
/* no memory or disk address */
size_t
len
,
xfs_buftarg_t
*
);
/* mount point "fake" inode */
extern
xfs_buf_t
*
pagebuf_get_no_daddr
(
/* allocate pagebuf struct */
/* without disk address */
size_t
len
,
xfs_buftarg_t
*
);
/* mount point "fake" inode */
extern
int
pagebuf_associate_memory
(
xfs_buf_t
*
,
void
*
,
size_t
);
extern
void
pagebuf_hold
(
/* increment reference count */
xfs_buf_t
*
);
/* buffer to hold */
extern
void
pagebuf_readahead
(
/* read ahead into cache */
extern
xfs_buf_t
*
xfs_buf_get_empty
(
size_t
,
xfs_buftarg_t
*
);
xfs_buftarg_t
*
,
/* target for buffer (or NULL) */
extern
xfs_buf_t
*
xfs_buf_get_noaddr
(
size_t
,
xfs_buftarg_t
*
);
loff_t
,
/* starting offset of range */
extern
int
xfs_buf_associate_memory
(
xfs_buf_t
*
,
void
*
,
size_t
);
size_t
,
/* length of range */
extern
void
xfs_buf_hold
(
xfs_buf_t
*
);
page_buf_flags_t
);
/* additional read flags */
extern
void
xfs_buf_readahead
(
xfs_buftarg_t
*
,
xfs_off_t
,
size_t
,
xfs_buf_flags_t
);
/* Releasing Buffers */
/* Releasing Buffers */
extern
void
xfs_buf_free
(
xfs_buf_t
*
);
extern
void
pagebuf_free
(
/* deallocate a buffer */
extern
void
xfs_buf_rele
(
xfs_buf_t
*
);
xfs_buf_t
*
);
/* buffer to deallocate */
extern
void
pagebuf_rele
(
/* release hold on a buffer */
xfs_buf_t
*
);
/* buffer to release */
/* Locking and Unlocking Buffers */
/* Locking and Unlocking Buffers */
extern
int
xfs_buf_cond_lock
(
xfs_buf_t
*
);
extern
int
pagebuf_cond_lock
(
/* lock buffer, if not locked */
extern
int
xfs_buf_lock_value
(
xfs_buf_t
*
);
/* (returns -EBUSY if locked) */
extern
void
xfs_buf_lock
(
xfs_buf_t
*
);
xfs_buf_t
*
);
/* buffer to lock */
extern
void
xfs_buf_unlock
(
xfs_buf_t
*
);
extern
int
pagebuf_lock_value
(
/* return count on lock */
xfs_buf_t
*
);
/* buffer to check */
extern
int
pagebuf_lock
(
/* lock buffer */
xfs_buf_t
*
);
/* buffer to lock */
extern
void
pagebuf_unlock
(
/* unlock buffer */
xfs_buf_t
*
);
/* buffer to unlock */
/* Buffer Read and Write Routines */
/* Buffer Read and Write Routines */
extern
void
xfs_buf_ioend
(
xfs_buf_t
*
,
int
);
extern
void
pagebuf_iodone
(
/* mark buffer I/O complete */
extern
void
xfs_buf_ioerror
(
xfs_buf_t
*
,
int
);
xfs_buf_t
*
,
/* buffer to mark */
extern
int
xfs_buf_iostart
(
xfs_buf_t
*
,
xfs_buf_flags_t
);
int
);
/* run completion locally, or in
extern
int
xfs_buf_iorequest
(
xfs_buf_t
*
);
* a helper thread. */
extern
int
xfs_buf_iowait
(
xfs_buf_t
*
);
extern
void
xfs_buf_iomove
(
xfs_buf_t
*
,
size_t
,
size_t
,
xfs_caddr_t
,
extern
void
pagebuf_ioerror
(
/* mark buffer in error (or not) */
xfs_buf_rw_t
);
xfs_buf_t
*
,
/* buffer to mark */
int
);
/* error to store (0 if none) */
static
inline
int
xfs_buf_iostrategy
(
xfs_buf_t
*
bp
)
extern
int
pagebuf_iostart
(
/* start I/O on a buffer */
xfs_buf_t
*
,
/* buffer to start */
page_buf_flags_t
);
/* PBF_LOCK, PBF_ASYNC, */
/* PBF_READ, PBF_WRITE, */
/* PBF_DELWRI */
extern
int
pagebuf_iorequest
(
/* start real I/O */
xfs_buf_t
*
);
/* buffer to convey to device */
extern
int
pagebuf_iowait
(
/* wait for buffer I/O done */
xfs_buf_t
*
);
/* buffer to wait on */
extern
void
pagebuf_iomove
(
/* move data in/out of pagebuf */
xfs_buf_t
*
,
/* buffer to manipulate */
size_t
,
/* starting buffer offset */
size_t
,
/* length in buffer */
caddr_t
,
/* data pointer */
page_buf_rw_t
);
/* direction */
static
inline
int
pagebuf_iostrategy
(
xfs_buf_t
*
pb
)
{
{
return
pb
->
pb_strat
?
pb
->
pb_strat
(
pb
)
:
pagebuf_iorequest
(
pb
);
return
bp
->
b_strat
?
bp
->
b_strat
(
bp
)
:
xfs_buf_iorequest
(
bp
);
}
}
static
inline
int
pagebuf_geterror
(
xfs_buf_t
*
pb
)
static
inline
int
xfs_buf_geterror
(
xfs_buf_t
*
bp
)
{
{
return
pb
?
pb
->
p
b_error
:
ENOMEM
;
return
bp
?
bp
->
b_error
:
ENOMEM
;
}
}
/* Buffer Utility Routines */
/* Buffer Utility Routines */
extern
xfs_caddr_t
xfs_buf_offset
(
xfs_buf_t
*
,
size_t
);
extern
caddr_t
pagebuf_offset
(
/* pointer at offset in buffer */
xfs_buf_t
*
,
/* buffer to offset into */
size_t
);
/* offset */
/* Pinning Buffer Storage in Memory */
/* Pinning Buffer Storage in Memory */
extern
void
xfs_buf_pin
(
xfs_buf_t
*
);
extern
void
pagebuf_pin
(
/* pin buffer in memory */
extern
void
xfs_buf_unpin
(
xfs_buf_t
*
);
xfs_buf_t
*
);
/* buffer to pin */
extern
int
xfs_buf_ispin
(
xfs_buf_t
*
);
extern
void
pagebuf_unpin
(
/* unpin buffered data */
xfs_buf_t
*
);
/* buffer to unpin */
extern
int
pagebuf_ispin
(
/* check if buffer is pinned */
xfs_buf_t
*
);
/* buffer to check */
/* Delayed Write Buffer Routines */
/* Delayed Write Buffer Routines */
extern
void
xfs_buf_delwri_dequeue
(
xfs_buf_t
*
);
extern
void
pagebuf_delwri_dequeue
(
xfs_buf_t
*
);
/* Buffer Daemon Setup Routines */
/* Buffer Daemon Setup Routines */
extern
int
xfs_buf_init
(
void
);
extern
void
xfs_buf_terminate
(
void
);
extern
int
pagebuf_init
(
void
);
#ifdef XFS_BUF_TRACE
extern
void
pagebuf_terminate
(
void
);
extern
ktrace_t
*
xfs_buf_trace_buf
;
extern
void
xfs_buf_trace
(
xfs_buf_t
*
,
char
*
,
void
*
,
void
*
);
#ifdef PAGEBUF_TRACE
extern
ktrace_t
*
pagebuf_trace_buf
;
extern
void
pagebuf_trace
(
xfs_buf_t
*
,
/* buffer being traced */
char
*
,
/* description of operation */
void
*
,
/* arbitrary diagnostic value */
void
*
);
/* return address */
#else
#else
#
define pagebuf_trace(pb, id, ptr,
ra) do { } while (0)
#
define xfs_buf_trace(bp,id,ptr,
ra) do { } while (0)
#endif
#endif
#define
page
buf_target_name(target) \
#define
xfs_
buf_target_name(target) \
({ char __b[BDEVNAME_SIZE]; bdevname((target)->
pbr
_bdev, __b); __b; })
({ char __b[BDEVNAME_SIZE]; bdevname((target)->
bt
_bdev, __b); __b; })
#define XFS_B_ASYNC XBF_ASYNC
#define XFS_B_DELWRI XBF_DELWRI
#define XFS_B_READ XBF_READ
#define XFS_B_WRITE XBF_WRITE
#define XFS_B_STALE XBF_STALE
/* These are just for xfs_syncsub... it sets an internal variable
#define XFS_BUF_TRYLOCK XBF_TRYLOCK
* then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
#define XFS_INCORE_TRYLOCK XBF_TRYLOCK
*/
#define XFS_BUF_LOCK XBF_LOCK
#define XFS_B_ASYNC PBF_ASYNC
#define XFS_BUF_MAPPED XBF_MAPPED
#define XFS_B_DELWRI PBF_DELWRI
#define XFS_B_READ PBF_READ
#define XFS_B_WRITE PBF_WRITE
#define XFS_B_STALE PBF_STALE
#define XFS_BUF_TRYLOCK PBF_TRYLOCK
#define XFS_INCORE_TRYLOCK PBF_TRYLOCK
#define XFS_BUF_LOCK PBF_LOCK
#define XFS_BUF_MAPPED PBF_MAPPED
#define BUF_BUSY PBF_DONT_BLOCK
#define XFS_BUF_BFLAGS(x) ((x)->pb_flags)
#define XFS_BUF_ZEROFLAGS(x) \
((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI))
#define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE)
#define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE)
#define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE)
#define XFS_BUF_SUPER_STALE(x) do { \
XFS_BUF_STALE(x); \
pagebuf_delwri_dequeue(x); \
XFS_BUF_DONE(x); \
} while (0)
#define XFS_BUF_MANAGE PBF_FS_MANAGED
#define BUF_BUSY XBF_DONT_BLOCK
#define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED)
#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
#define XFS_BUF_DELAYWRITE(x) ((x)->pb_flags |= PBF_DELWRI)
#define XFS_BUF_ZEROFLAGS(bp) \
#define XFS_BUF_UNDELAYWRITE(x) pagebuf_delwri_dequeue(x)
((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI))
#define XFS_BUF_ISDELAYWRITE(x) ((x)->pb_flags & PBF_DELWRI)
#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
#define XFS_BUF_ERROR(x,no) pagebuf_ioerror(x,no)
#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
#define XFS_BUF_GETERROR(x) pagebuf_geterror(x)
#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE)
#define XFS_BUF_ISERROR(x) (pagebuf_geterror(x)?1:0)
#define XFS_BUF_SUPER_STALE(bp) do { \
XFS_BUF_STALE(bp); \
#define XFS_BUF_DONE(x) ((x)->pb_flags |= PBF_DONE)
xfs_buf_delwri_dequeue(bp); \
#define XFS_BUF_UNDONE(x) ((x)->pb_flags &= ~PBF_DONE)
XFS_BUF_DONE(bp); \
#define XFS_BUF_ISDONE(x) ((x)->pb_flags & PBF_DONE)
} while (0)
#define XFS_BUF_BUSY(x) do { } while (0)
#define XFS_BUF_UNBUSY(x) do { } while (0)
#define XFS_BUF_ISBUSY(x) (1)
#define XFS_BUF_ASYNC(x) ((x)->pb_flags |= PBF_ASYNC)
#define XFS_BUF_UNASYNC(x) ((x)->pb_flags &= ~PBF_ASYNC)
#define XFS_BUF_ISASYNC(x) ((x)->pb_flags & PBF_ASYNC)
#define XFS_BUF_ORDERED(x) ((x)->pb_flags |= PBF_ORDERED)
#define XFS_BUF_UNORDERED(x) ((x)->pb_flags &= ~PBF_ORDERED)
#define XFS_BUF_ISORDERED(x) ((x)->pb_flags & PBF_ORDERED)
#define XFS_BUF_SHUT(x) printk("XFS_BUF_SHUT not implemented yet\n")
#define XFS_BUF_UNSHUT(x) printk("XFS_BUF_UNSHUT not implemented yet\n")
#define XFS_BUF_ISSHUT(x) (0)
#define XFS_BUF_HOLD(x) pagebuf_hold(x)
#define XFS_BUF_READ(x) ((x)->pb_flags |= PBF_READ)
#define XFS_BUF_UNREAD(x) ((x)->pb_flags &= ~PBF_READ)
#define XFS_BUF_ISREAD(x) ((x)->pb_flags & PBF_READ)
#define XFS_BUF_WRITE(x) ((x)->pb_flags |= PBF_WRITE)
#define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE)
#define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE)
#define XFS_BUF_ISUNINITIAL(x) (0)
#define XFS_BUF_UNUNINITIAL(x) (0)
#define XFS_BUF_BP_ISMAPPED(bp) 1
#define XFS_BUF_IODONE_FUNC(buf) (buf)->pb_iodone
#define XFS_BUF_SET_IODONE_FUNC(buf, func) \
(buf)->pb_iodone = (func)
#define XFS_BUF_CLR_IODONE_FUNC(buf) \
(buf)->pb_iodone = NULL
#define XFS_BUF_SET_BDSTRAT_FUNC(buf, func) \
(buf)->pb_strat = (func)
#define XFS_BUF_CLR_BDSTRAT_FUNC(buf) \
(buf)->pb_strat = NULL
#define XFS_BUF_FSPRIVATE(buf, type) \
((type)(buf)->pb_fspriv)
#define XFS_BUF_SET_FSPRIVATE(buf, value) \
(buf)->pb_fspriv = (void *)(value)
#define XFS_BUF_FSPRIVATE2(buf, type) \
((type)(buf)->pb_fspriv2)
#define XFS_BUF_SET_FSPRIVATE2(buf, value) \
(buf)->pb_fspriv2 = (void *)(value)
#define XFS_BUF_FSPRIVATE3(buf, type) \
((type)(buf)->pb_fspriv3)
#define XFS_BUF_SET_FSPRIVATE3(buf, value) \
(buf)->pb_fspriv3 = (void *)(value)
#define XFS_BUF_SET_START(buf)
#define XFS_BUF_SET_BRELSE_FUNC(buf, value) \
(buf)->pb_relse = (value)
#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr)
static
inline
xfs_caddr_t
xfs_buf_offset
(
xfs_buf_t
*
bp
,
size_t
offset
)
{
if
(
bp
->
pb_flags
&
PBF_MAPPED
)
return
XFS_BUF_PTR
(
bp
)
+
offset
;
return
(
xfs_caddr_t
)
pagebuf_offset
(
bp
,
offset
);
}
#define XFS_BUF_SET_PTR(bp, val, count) \
#define XFS_BUF_MANAGE XBF_FS_MANAGED
pagebuf_associate_memory(bp, val, count)
#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
#define XFS_BUF_ADDR(bp) ((bp)->pb_bn)
#define XFS_BUF_SET_ADDR(bp, blk) \
#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
((bp)->pb_bn = (xfs_daddr_t)(blk))
#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
#define XFS_BUF_OFFSET(bp) ((bp)->pb_file_offset)
#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
#define XFS_BUF_SET_OFFSET(bp, off) \
((bp)->pb_file_offset = (off))
#define XFS_BUF_ERROR(bp,no) xfs_buf_ioerror(bp,no)
#define XFS_BUF_COUNT(bp) ((bp)->pb_count_desired)
#define XFS_BUF_GETERROR(bp) xfs_buf_geterror(bp)
#define XFS_BUF_SET_COUNT(bp, cnt) \
#define XFS_BUF_ISERROR(bp) (xfs_buf_geterror(bp) ? 1 : 0)
((bp)->pb_count_desired = (cnt))
#define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length)
#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
#define XFS_BUF_SET_SIZE(bp, cnt) \
#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
((bp)->pb_buffer_length = (cnt))
#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
#define XFS_BUF_SET_VTYPE(bp, type)
#define XFS_BUF_BUSY(bp) do { } while (0)
#define XFS_BUF_SET_REF(bp, ref)
#define XFS_BUF_UNBUSY(bp) do { } while (0)
#define XFS_BUF_ISBUSY(bp) (1)
#define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp)
#define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC)
#define XFS_BUF_VALUSEMA(bp) pagebuf_lock_value(bp)
#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
#define XFS_BUF_CPSEMA(bp) (pagebuf_cond_lock(bp) == 0)
#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
#define XFS_BUF_VSEMA(bp) pagebuf_unlock(bp)
#define XFS_BUF_PSEMA(bp,x) pagebuf_lock(bp)
#define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED)
#define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema);
#define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED)
#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED)
/* setup the buffer target from a buftarg structure */
#define XFS_BUF_SET_TARGET(bp, target) \
#define XFS_BUF_SHUT(bp) do { } while (0)
(bp)->pb_target = (target)
#define XFS_BUF_UNSHUT(bp) do { } while (0)
#define XFS_BUF_TARGET(bp) ((bp)->pb_target)
#define XFS_BUF_ISSHUT(bp) (0)
#define XFS_BUFTARG_NAME(target) \
pagebuf_target_name(target)
#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
#define XFS_BUF_SET_VTYPE(bp, type)
#define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ)
#define XFS_BUF_SET_REF(bp, ref)
#define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE)
static
inline
int
xfs_bawrite
(
void
*
mp
,
xfs_buf_t
*
bp
)
#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
#define XFS_BUF_ISUNINITIAL(bp) (0)
#define XFS_BUF_UNUNINITIAL(bp) (0)
#define XFS_BUF_BP_ISMAPPED(bp) (1)
#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func))
#define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL)
#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
#define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2)
#define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val))
#define XFS_BUF_FSPRIVATE3(bp, type) ((type)(bp)->b_fspriv3)
#define XFS_BUF_SET_FSPRIVATE3(bp, val) ((bp)->b_fspriv3 = (void*)(val))
#define XFS_BUF_SET_START(bp) do { } while (0)
#define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func))
#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr)
#define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt)
#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset)
#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off))
#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired)
#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt))
#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)
#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt))
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) do { } while (0)
#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
#define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp)
#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema);
#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
#define XFS_BUF_TARGET(bp) ((bp)->b_target)
#define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target)
static
inline
int
xfs_bawrite
(
void
*
mp
,
xfs_buf_t
*
bp
)
{
{
bp
->
p
b_fspriv3
=
mp
;
bp
->
b_fspriv3
=
mp
;
bp
->
p
b_strat
=
xfs_bdstrat_cb
;
bp
->
b_strat
=
xfs_bdstrat_cb
;
page
buf_delwri_dequeue
(
bp
);
xfs_
buf_delwri_dequeue
(
bp
);
return
pagebuf_iostart
(
bp
,
PBF_WRITE
|
PBF_ASYNC
|
_P
BF_RUN_QUEUES
);
return
xfs_buf_iostart
(
bp
,
XBF_WRITE
|
XBF_ASYNC
|
_X
BF_RUN_QUEUES
);
}
}
static
inline
void
xfs_buf_relse
(
xfs_buf_t
*
bp
)
static
inline
void
xfs_buf_relse
(
xfs_buf_t
*
bp
)
{
{
if
(
!
bp
->
p
b_relse
)
if
(
!
bp
->
b_relse
)
page
buf_unlock
(
bp
);
xfs_
buf_unlock
(
bp
);
page
buf_rele
(
bp
);
xfs_
buf_rele
(
bp
);
}
}
#define xfs_bpin(bp)
page
buf_pin(bp)
#define xfs_bpin(bp)
xfs_
buf_pin(bp)
#define xfs_bunpin(bp)
page
buf_unpin(bp)
#define xfs_bunpin(bp)
xfs_
buf_unpin(bp)
#define xfs_buftrace(id, bp) \
#define xfs_buftrace(id, bp) \
page
buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
xfs_
buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
#define xfs_biodone(pb) \
#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
pagebuf_iodone(pb, 0)
#define xfs_biomove(
pb
, off, len, data, rw) \
#define xfs_biomove(
bp
, off, len, data, rw) \
pagebuf_iomove((pb
), (off), (len), (data), \
xfs_buf_iomove((bp
), (off), (len), (data), \
((rw) == XFS_B_WRITE) ?
PBRW_WRITE : P
BRW_READ)
((rw) == XFS_B_WRITE) ?
XBRW_WRITE : X
BRW_READ)
#define xfs_biozero(
pb
, off, len) \
#define xfs_biozero(
bp
, off, len) \
pagebuf_iomove((pb), (off), (len), NULL, P
BRW_ZERO)
xfs_buf_iomove((bp), (off), (len), NULL, X
BRW_ZERO)
static
inline
int
XFS_bwrite
(
xfs_buf_t
*
pb
)
static
inline
int
XFS_bwrite
(
xfs_buf_t
*
bp
)
{
{
int
iowait
=
(
pb
->
pb_flags
&
P
BF_ASYNC
)
==
0
;
int
iowait
=
(
bp
->
b_flags
&
X
BF_ASYNC
)
==
0
;
int
error
=
0
;
int
error
=
0
;
if
(
!
iowait
)
if
(
!
iowait
)
pb
->
pb_flags
|=
_P
BF_RUN_QUEUES
;
bp
->
b_flags
|=
_X
BF_RUN_QUEUES
;
pagebuf_delwri_dequeue
(
pb
);
xfs_buf_delwri_dequeue
(
bp
);
pagebuf_iostrategy
(
pb
);
xfs_buf_iostrategy
(
bp
);
if
(
iowait
)
{
if
(
iowait
)
{
error
=
pagebuf_iowait
(
pb
);
error
=
xfs_buf_iowait
(
bp
);
xfs_buf_relse
(
pb
);
xfs_buf_relse
(
bp
);
}
}
return
error
;
return
error
;
}
}
#define XFS_bdwrite(pb) \
#define XFS_bdwrite(bp) xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC)
pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
static
inline
int
xfs_bdwrite
(
void
*
mp
,
xfs_buf_t
*
bp
)
static
inline
int
xfs_bdwrite
(
void
*
mp
,
xfs_buf_t
*
bp
)
{
{
bp
->
pb_strat
=
xfs_bdstrat_cb
;
bp
->
b_strat
=
xfs_bdstrat_cb
;
bp
->
pb_fspriv3
=
mp
;
bp
->
b_fspriv3
=
mp
;
return
xfs_buf_iostart
(
bp
,
XBF_DELWRI
|
XBF_ASYNC
);
return
pagebuf_iostart
(
bp
,
PBF_DELWRI
|
PBF_ASYNC
);
}
}
#define XFS_bdstrat(bp)
page
buf_iorequest(bp)
#define XFS_bdstrat(bp)
xfs_
buf_iorequest(bp)
#define xfs_iowait(
pb) pagebuf_iowait(pb
)
#define xfs_iowait(
bp) xfs_buf_iowait(bp
)
#define xfs_baread(target, rablkno, ralen) \
#define xfs_baread(target, rablkno, ralen) \
pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK)
xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
#define xfs_buf_get_empty(len, target) pagebuf_get_empty((len), (target))
#define xfs_buf_get_noaddr(len, target) pagebuf_get_no_daddr((len), (target))
#define xfs_buf_free(bp) pagebuf_free(bp)
/*
/*
* Handling of buftargs.
* Handling of buftargs.
*/
*/
extern
xfs_buftarg_t
*
xfs_alloc_buftarg
(
struct
block_device
*
,
int
);
extern
xfs_buftarg_t
*
xfs_alloc_buftarg
(
struct
block_device
*
,
int
);
extern
void
xfs_free_buftarg
(
xfs_buftarg_t
*
,
int
);
extern
void
xfs_free_buftarg
(
xfs_buftarg_t
*
,
int
);
extern
void
xfs_wait_buftarg
(
xfs_buftarg_t
*
);
extern
void
xfs_wait_buftarg
(
xfs_buftarg_t
*
);
extern
int
xfs_setsize_buftarg
(
xfs_buftarg_t
*
,
unsigned
int
,
unsigned
int
);
extern
int
xfs_setsize_buftarg
(
xfs_buftarg_t
*
,
unsigned
int
,
unsigned
int
);
extern
int
xfs_flush_buftarg
(
xfs_buftarg_t
*
,
int
);
extern
int
xfs_flush_buftarg
(
xfs_buftarg_t
*
,
int
);
#define xfs_getsize_buftarg(buftarg) \
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
block_size((buftarg)->pbr_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) \
bdev_read_only((buftarg)->pbr_bdev)
#define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1)
#define xfs_binval(buftarg) \
#define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1)
xfs_flush_buftarg(buftarg, 1)
#define XFS_bflush(buftarg) \
xfs_flush_buftarg(buftarg, 1)
#endif
/* __XFS_BUF_H__ */
#endif
/* __XFS_BUF_H__ */
fs/xfs/linux-2.6/xfs_file.c
浏览文件 @
9f5974c8
...
@@ -509,16 +509,14 @@ linvfs_open_exec(
...
@@ -509,16 +509,14 @@ linvfs_open_exec(
vnode_t
*
vp
=
LINVFS_GET_VP
(
inode
);
vnode_t
*
vp
=
LINVFS_GET_VP
(
inode
);
xfs_mount_t
*
mp
=
XFS_VFSTOM
(
vp
->
v_vfsp
);
xfs_mount_t
*
mp
=
XFS_VFSTOM
(
vp
->
v_vfsp
);
int
error
=
0
;
int
error
=
0
;
bhv_desc_t
*
bdp
;
xfs_inode_t
*
ip
;
xfs_inode_t
*
ip
;
if
(
vp
->
v_vfsp
->
vfs_flag
&
VFS_DMI
)
{
if
(
vp
->
v_vfsp
->
vfs_flag
&
VFS_DMI
)
{
bdp
=
vn_bhv_lookup
(
VN_BHV_HEAD
(
vp
),
&
xfs_vnodeops
);
ip
=
xfs_vtoi
(
vp
);
if
(
!
bd
p
)
{
if
(
!
i
p
)
{
error
=
-
EINVAL
;
error
=
-
EINVAL
;
goto
open_exec_out
;
goto
open_exec_out
;
}
}
ip
=
XFS_BHVTOI
(
bdp
);
if
(
DM_EVENT_ENABLED
(
vp
->
v_vfsp
,
ip
,
DM_EVENT_READ
))
{
if
(
DM_EVENT_ENABLED
(
vp
->
v_vfsp
,
ip
,
DM_EVENT_READ
))
{
error
=
-
XFS_SEND_DATA
(
mp
,
DM_EVENT_READ
,
vp
,
error
=
-
XFS_SEND_DATA
(
mp
,
DM_EVENT_READ
,
vp
,
0
,
0
,
0
,
NULL
);
0
,
0
,
0
,
NULL
);
...
...
fs/xfs/linux-2.6/xfs_ioctl.c
浏览文件 @
9f5974c8
...
@@ -146,13 +146,10 @@ xfs_find_handle(
...
@@ -146,13 +146,10 @@ xfs_find_handle(
if
(
cmd
!=
XFS_IOC_PATH_TO_FSHANDLE
)
{
if
(
cmd
!=
XFS_IOC_PATH_TO_FSHANDLE
)
{
xfs_inode_t
*
ip
;
xfs_inode_t
*
ip
;
bhv_desc_t
*
bhv
;
int
lock_mode
;
int
lock_mode
;
/* need to get access to the xfs_inode to read the generation */
/* need to get access to the xfs_inode to read the generation */
bhv
=
vn_bhv_lookup_unlocked
(
VN_BHV_HEAD
(
vp
),
&
xfs_vnodeops
);
ip
=
xfs_vtoi
(
vp
);
ASSERT
(
bhv
);
ip
=
XFS_BHVTOI
(
bhv
);
ASSERT
(
ip
);
ASSERT
(
ip
);
lock_mode
=
xfs_ilock_map_shared
(
ip
);
lock_mode
=
xfs_ilock_map_shared
(
ip
);
...
@@ -751,9 +748,8 @@ xfs_ioctl(
...
@@ -751,9 +748,8 @@ xfs_ioctl(
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
?
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
?
mp
->
m_rtdev_targp
:
mp
->
m_ddev_targp
;
mp
->
m_rtdev_targp
:
mp
->
m_ddev_targp
;
da
.
d_mem
=
da
.
d_miniosz
=
1
<<
target
->
pbr_sshift
;
da
.
d_mem
=
da
.
d_miniosz
=
1
<<
target
->
bt_sshift
;
/* The size dio will do in one go */
da
.
d_maxiosz
=
INT_MAX
&
~
(
da
.
d_miniosz
-
1
);
da
.
d_maxiosz
=
64
*
PAGE_CACHE_SIZE
;
if
(
copy_to_user
(
arg
,
&
da
,
sizeof
(
da
)))
if
(
copy_to_user
(
arg
,
&
da
,
sizeof
(
da
)))
return
-
XFS_ERROR
(
EFAULT
);
return
-
XFS_ERROR
(
EFAULT
);
...
...
fs/xfs/linux-2.6/xfs_iops.c
浏览文件 @
9f5974c8
...
@@ -54,10 +54,45 @@
...
@@ -54,10 +54,45 @@
#include <linux/capability.h>
#include <linux/capability.h>
#include <linux/xattr.h>
#include <linux/xattr.h>
#include <linux/namei.h>
#include <linux/namei.h>
#include <linux/security.h>
#define IS_NOATIME(inode) ((inode->i_sb->s_flags & MS_NOATIME) || \
#define IS_NOATIME(inode) ((inode->i_sb->s_flags & MS_NOATIME) || \
(S_ISDIR(inode->i_mode) && inode->i_sb->s_flags & MS_NODIRATIME))
(S_ISDIR(inode->i_mode) && inode->i_sb->s_flags & MS_NODIRATIME))
/*
* Get a XFS inode from a given vnode.
*/
xfs_inode_t
*
xfs_vtoi
(
struct
vnode
*
vp
)
{
bhv_desc_t
*
bdp
;
bdp
=
bhv_lookup_range
(
VN_BHV_HEAD
(
vp
),
VNODE_POSITION_XFS
,
VNODE_POSITION_XFS
);
if
(
unlikely
(
bdp
==
NULL
))
return
NULL
;
return
XFS_BHVTOI
(
bdp
);
}
/*
* Bring the atime in the XFS inode uptodate.
* Used before logging the inode to disk or when the Linux inode goes away.
*/
void
xfs_synchronize_atime
(
xfs_inode_t
*
ip
)
{
vnode_t
*
vp
;
vp
=
XFS_ITOV_NULL
(
ip
);
if
(
vp
)
{
struct
inode
*
inode
=
&
vp
->
v_inode
;
ip
->
i_d
.
di_atime
.
t_sec
=
(
__int32_t
)
inode
->
i_atime
.
tv_sec
;
ip
->
i_d
.
di_atime
.
t_nsec
=
(
__int32_t
)
inode
->
i_atime
.
tv_nsec
;
}
}
/*
/*
* Change the requested timestamp in the given inode.
* Change the requested timestamp in the given inode.
* We don't lock across timestamp updates, and we don't log them but
* We don't lock across timestamp updates, and we don't log them but
...
@@ -77,23 +112,6 @@ xfs_ichgtime(
...
@@ -77,23 +112,6 @@ xfs_ichgtime(
struct
inode
*
inode
=
LINVFS_GET_IP
(
XFS_ITOV
(
ip
));
struct
inode
*
inode
=
LINVFS_GET_IP
(
XFS_ITOV
(
ip
));
timespec_t
tv
;
timespec_t
tv
;
/*
* We're not supposed to change timestamps in readonly-mounted
* filesystems. Throw it away if anyone asks us.
*/
if
(
unlikely
(
IS_RDONLY
(
inode
)))
return
;
/*
* Don't update access timestamps on reads if mounted "noatime".
* Throw it away if anyone asks us.
*/
if
(
unlikely
(
(
ip
->
i_mount
->
m_flags
&
XFS_MOUNT_NOATIME
||
IS_NOATIME
(
inode
))
&&
(
flags
&
(
XFS_ICHGTIME_ACC
|
XFS_ICHGTIME_MOD
|
XFS_ICHGTIME_CHG
))
==
XFS_ICHGTIME_ACC
))
return
;
nanotime
(
&
tv
);
nanotime
(
&
tv
);
if
(
flags
&
XFS_ICHGTIME_MOD
)
{
if
(
flags
&
XFS_ICHGTIME_MOD
)
{
inode
->
i_mtime
=
tv
;
inode
->
i_mtime
=
tv
;
...
@@ -130,8 +148,6 @@ xfs_ichgtime(
...
@@ -130,8 +148,6 @@ xfs_ichgtime(
* Variant on the above which avoids querying the system clock
* Variant on the above which avoids querying the system clock
* in situations where we know the Linux inode timestamps have
* in situations where we know the Linux inode timestamps have
* just been updated (and so we can update our inode cheaply).
* just been updated (and so we can update our inode cheaply).
* We also skip the readonly and noatime checks here, they are
* also catered for already.
*/
*/
void
void
xfs_ichgtime_fast
(
xfs_ichgtime_fast
(
...
@@ -142,20 +158,16 @@ xfs_ichgtime_fast(
...
@@ -142,20 +158,16 @@ xfs_ichgtime_fast(
timespec_t
*
tvp
;
timespec_t
*
tvp
;
/*
/*
*
We're not supposed to change timestamps in readonly-mounte
d
*
Atime updates for read() & friends are handled lazily now, an
d
*
filesystems. Throw it away if anyone asks us.
*
explicit updates must go through xfs_ichgtime()
*/
*/
if
(
unlikely
(
IS_RDONLY
(
inode
)))
ASSERT
((
flags
&
XFS_ICHGTIME_ACC
)
==
0
);
return
;
/*
/*
*
Don't update access timestamps on reads if mounted "noatime".
*
We're not supposed to change timestamps in readonly-mounted
* Throw it away if anyone asks us.
*
filesystems.
Throw it away if anyone asks us.
*/
*/
if
(
unlikely
(
if
(
unlikely
(
IS_RDONLY
(
inode
)))
(
ip
->
i_mount
->
m_flags
&
XFS_MOUNT_NOATIME
||
IS_NOATIME
(
inode
))
&&
((
flags
&
(
XFS_ICHGTIME_ACC
|
XFS_ICHGTIME_MOD
|
XFS_ICHGTIME_CHG
))
==
XFS_ICHGTIME_ACC
)))
return
;
return
;
if
(
flags
&
XFS_ICHGTIME_MOD
)
{
if
(
flags
&
XFS_ICHGTIME_MOD
)
{
...
@@ -163,11 +175,6 @@ xfs_ichgtime_fast(
...
@@ -163,11 +175,6 @@ xfs_ichgtime_fast(
ip
->
i_d
.
di_mtime
.
t_sec
=
(
__int32_t
)
tvp
->
tv_sec
;
ip
->
i_d
.
di_mtime
.
t_sec
=
(
__int32_t
)
tvp
->
tv_sec
;
ip
->
i_d
.
di_mtime
.
t_nsec
=
(
__int32_t
)
tvp
->
tv_nsec
;
ip
->
i_d
.
di_mtime
.
t_nsec
=
(
__int32_t
)
tvp
->
tv_nsec
;
}
}
if
(
flags
&
XFS_ICHGTIME_ACC
)
{
tvp
=
&
inode
->
i_atime
;
ip
->
i_d
.
di_atime
.
t_sec
=
(
__int32_t
)
tvp
->
tv_sec
;
ip
->
i_d
.
di_atime
.
t_nsec
=
(
__int32_t
)
tvp
->
tv_nsec
;
}
if
(
flags
&
XFS_ICHGTIME_CHG
)
{
if
(
flags
&
XFS_ICHGTIME_CHG
)
{
tvp
=
&
inode
->
i_ctime
;
tvp
=
&
inode
->
i_ctime
;
ip
->
i_d
.
di_ctime
.
t_sec
=
(
__int32_t
)
tvp
->
tv_sec
;
ip
->
i_d
.
di_ctime
.
t_sec
=
(
__int32_t
)
tvp
->
tv_sec
;
...
@@ -213,6 +220,39 @@ validate_fields(
...
@@ -213,6 +220,39 @@ validate_fields(
}
}
}
}
/*
* Hook in SELinux. This is not quite correct yet, what we really need
* here (as we do for default ACLs) is a mechanism by which creation of
* these attrs can be journalled at inode creation time (along with the
* inode, of course, such that log replay can't cause these to be lost).
*/
STATIC
int
linvfs_init_security
(
struct
vnode
*
vp
,
struct
inode
*
dir
)
{
struct
inode
*
ip
=
LINVFS_GET_IP
(
vp
);
size_t
length
;
void
*
value
;
char
*
name
;
int
error
;
error
=
security_inode_init_security
(
ip
,
dir
,
&
name
,
&
value
,
&
length
);
if
(
error
)
{
if
(
error
==
-
EOPNOTSUPP
)
return
0
;
return
-
error
;
}
VOP_ATTR_SET
(
vp
,
name
,
value
,
length
,
ATTR_SECURE
,
NULL
,
error
);
if
(
!
error
)
VMODIFY
(
vp
);
kfree
(
name
);
kfree
(
value
);
return
error
;
}
/*
/*
* Determine whether a process has a valid fs_struct (kernel daemons
* Determine whether a process has a valid fs_struct (kernel daemons
* like knfsd don't have an fs_struct).
* like knfsd don't have an fs_struct).
...
@@ -278,6 +318,9 @@ linvfs_mknod(
...
@@ -278,6 +318,9 @@ linvfs_mknod(
break
;
break
;
}
}
if
(
!
error
)
error
=
linvfs_init_security
(
vp
,
dir
);
if
(
default_acl
)
{
if
(
default_acl
)
{
if
(
!
error
)
{
if
(
!
error
)
{
error
=
_ACL_INHERIT
(
vp
,
&
va
,
default_acl
);
error
=
_ACL_INHERIT
(
vp
,
&
va
,
default_acl
);
...
@@ -294,8 +337,6 @@ linvfs_mknod(
...
@@ -294,8 +337,6 @@ linvfs_mknod(
teardown
.
d_inode
=
ip
=
LINVFS_GET_IP
(
vp
);
teardown
.
d_inode
=
ip
=
LINVFS_GET_IP
(
vp
);
teardown
.
d_name
=
dentry
->
d_name
;
teardown
.
d_name
=
dentry
->
d_name
;
vn_mark_bad
(
vp
);
if
(
S_ISDIR
(
mode
))
if
(
S_ISDIR
(
mode
))
VOP_RMDIR
(
dvp
,
&
teardown
,
NULL
,
err2
);
VOP_RMDIR
(
dvp
,
&
teardown
,
NULL
,
err2
);
else
else
...
@@ -506,7 +547,7 @@ linvfs_follow_link(
...
@@ -506,7 +547,7 @@ linvfs_follow_link(
ASSERT
(
dentry
);
ASSERT
(
dentry
);
ASSERT
(
nd
);
ASSERT
(
nd
);
link
=
(
char
*
)
kmalloc
(
MAX
NAME
LEN
+
1
,
GFP_KERNEL
);
link
=
(
char
*
)
kmalloc
(
MAX
PATH
LEN
+
1
,
GFP_KERNEL
);
if
(
!
link
)
{
if
(
!
link
)
{
nd_set_link
(
nd
,
ERR_PTR
(
-
ENOMEM
));
nd_set_link
(
nd
,
ERR_PTR
(
-
ENOMEM
));
return
NULL
;
return
NULL
;
...
@@ -522,12 +563,12 @@ linvfs_follow_link(
...
@@ -522,12 +563,12 @@ linvfs_follow_link(
vp
=
LINVFS_GET_VP
(
dentry
->
d_inode
);
vp
=
LINVFS_GET_VP
(
dentry
->
d_inode
);
iov
.
iov_base
=
link
;
iov
.
iov_base
=
link
;
iov
.
iov_len
=
MAX
NAME
LEN
;
iov
.
iov_len
=
MAX
PATH
LEN
;
uio
->
uio_iov
=
&
iov
;
uio
->
uio_iov
=
&
iov
;
uio
->
uio_offset
=
0
;
uio
->
uio_offset
=
0
;
uio
->
uio_segflg
=
UIO_SYSSPACE
;
uio
->
uio_segflg
=
UIO_SYSSPACE
;
uio
->
uio_resid
=
MAX
NAME
LEN
;
uio
->
uio_resid
=
MAX
PATH
LEN
;
uio
->
uio_iovcnt
=
1
;
uio
->
uio_iovcnt
=
1
;
VOP_READLINK
(
vp
,
uio
,
0
,
NULL
,
error
);
VOP_READLINK
(
vp
,
uio
,
0
,
NULL
,
error
);
...
@@ -535,7 +576,7 @@ linvfs_follow_link(
...
@@ -535,7 +576,7 @@ linvfs_follow_link(
kfree
(
link
);
kfree
(
link
);
link
=
ERR_PTR
(
-
error
);
link
=
ERR_PTR
(
-
error
);
}
else
{
}
else
{
link
[
MAX
NAME
LEN
-
uio
->
uio_resid
]
=
'\0'
;
link
[
MAX
PATH
LEN
-
uio
->
uio_resid
]
=
'\0'
;
}
}
kfree
(
uio
);
kfree
(
uio
);
...
...
fs/xfs/linux-2.6/xfs_iops.h
浏览文件 @
9f5974c8
...
@@ -26,11 +26,6 @@ extern struct file_operations linvfs_file_operations;
...
@@ -26,11 +26,6 @@ extern struct file_operations linvfs_file_operations;
extern
struct
file_operations
linvfs_invis_file_operations
;
extern
struct
file_operations
linvfs_invis_file_operations
;
extern
struct
file_operations
linvfs_dir_operations
;
extern
struct
file_operations
linvfs_dir_operations
;
extern
struct
address_space_operations
linvfs_aops
;
extern
int
linvfs_get_block
(
struct
inode
*
,
sector_t
,
struct
buffer_head
*
,
int
);
extern
void
linvfs_unwritten_done
(
struct
buffer_head
*
,
int
);
extern
int
xfs_ioctl
(
struct
bhv_desc
*
,
struct
inode
*
,
struct
file
*
,
extern
int
xfs_ioctl
(
struct
bhv_desc
*
,
struct
inode
*
,
struct
file
*
,
int
,
unsigned
int
,
void
__user
*
);
int
,
unsigned
int
,
void
__user
*
);
...
...
fs/xfs/linux-2.6/xfs_linux.h
浏览文件 @
9f5974c8
...
@@ -110,10 +110,6 @@
...
@@ -110,10 +110,6 @@
* delalloc and these ondisk-uninitialised buffers.
* delalloc and these ondisk-uninitialised buffers.
*/
*/
BUFFER_FNS
(
PrivateStart
,
unwritten
);
BUFFER_FNS
(
PrivateStart
,
unwritten
);
static
inline
void
set_buffer_unwritten_io
(
struct
buffer_head
*
bh
)
{
bh
->
b_end_io
=
linvfs_unwritten_done
;
}
#define restricted_chown xfs_params.restrict_chown.val
#define restricted_chown xfs_params.restrict_chown.val
#define irix_sgid_inherit xfs_params.sgid_inherit.val
#define irix_sgid_inherit xfs_params.sgid_inherit.val
...
@@ -232,7 +228,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
...
@@ -232,7 +228,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define xfs_itruncate_data(ip, off) \
#define xfs_itruncate_data(ip, off) \
(-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
(-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
#define xfs_statvfs_fsid(statp, mp) \
#define xfs_statvfs_fsid(statp, mp) \
({ u64 id = huge_encode_dev((mp)->m_d
ev);
\
({ u64 id = huge_encode_dev((mp)->m_d
dev_targp->bt_dev);
\
__kernel_fsid_t *fsid = &(statp)->f_fsid; \
__kernel_fsid_t *fsid = &(statp)->f_fsid; \
(fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); })
(fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); })
...
...
fs/xfs/linux-2.6/xfs_lrw.c
浏览文件 @
9f5974c8
...
@@ -233,8 +233,8 @@ xfs_read(
...
@@ -233,8 +233,8 @@ xfs_read(
xfs_buftarg_t
*
target
=
xfs_buftarg_t
*
target
=
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
?
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
?
mp
->
m_rtdev_targp
:
mp
->
m_ddev_targp
;
mp
->
m_rtdev_targp
:
mp
->
m_ddev_targp
;
if
((
*
offset
&
target
->
pbr
_smask
)
||
if
((
*
offset
&
target
->
bt
_smask
)
||
(
size
&
target
->
pbr
_smask
))
{
(
size
&
target
->
bt
_smask
))
{
if
(
*
offset
==
ip
->
i_d
.
di_size
)
{
if
(
*
offset
==
ip
->
i_d
.
di_size
)
{
return
(
0
);
return
(
0
);
}
}
...
@@ -281,9 +281,6 @@ xfs_read(
...
@@ -281,9 +281,6 @@ xfs_read(
xfs_iunlock
(
ip
,
XFS_IOLOCK_SHARED
);
xfs_iunlock
(
ip
,
XFS_IOLOCK_SHARED
);
if
(
likely
(
!
(
ioflags
&
IO_INVIS
)))
xfs_ichgtime_fast
(
ip
,
inode
,
XFS_ICHGTIME_ACC
);
unlock_isem:
unlock_isem:
if
(
unlikely
(
ioflags
&
IO_ISDIRECT
))
if
(
unlikely
(
ioflags
&
IO_ISDIRECT
))
mutex_unlock
(
&
inode
->
i_mutex
);
mutex_unlock
(
&
inode
->
i_mutex
);
...
@@ -346,9 +343,6 @@ xfs_sendfile(
...
@@ -346,9 +343,6 @@ xfs_sendfile(
if
(
ret
>
0
)
if
(
ret
>
0
)
XFS_STATS_ADD
(
xs_read_bytes
,
ret
);
XFS_STATS_ADD
(
xs_read_bytes
,
ret
);
if
(
likely
(
!
(
ioflags
&
IO_INVIS
)))
xfs_ichgtime_fast
(
ip
,
LINVFS_GET_IP
(
vp
),
XFS_ICHGTIME_ACC
);
return
ret
;
return
ret
;
}
}
...
@@ -362,7 +356,6 @@ STATIC int /* error (positive) */
...
@@ -362,7 +356,6 @@ STATIC int /* error (positive) */
xfs_zero_last_block
(
xfs_zero_last_block
(
struct
inode
*
ip
,
struct
inode
*
ip
,
xfs_iocore_t
*
io
,
xfs_iocore_t
*
io
,
xfs_off_t
offset
,
xfs_fsize_t
isize
,
xfs_fsize_t
isize
,
xfs_fsize_t
end_size
)
xfs_fsize_t
end_size
)
{
{
...
@@ -371,19 +364,16 @@ xfs_zero_last_block(
...
@@ -371,19 +364,16 @@ xfs_zero_last_block(
int
nimaps
;
int
nimaps
;
int
zero_offset
;
int
zero_offset
;
int
zero_len
;
int
zero_len
;
int
isize_fsb_offset
;
int
error
=
0
;
int
error
=
0
;
xfs_bmbt_irec_t
imap
;
xfs_bmbt_irec_t
imap
;
loff_t
loff
;
loff_t
loff
;
size_t
lsize
;
ASSERT
(
ismrlocked
(
io
->
io_lock
,
MR_UPDATE
)
!=
0
);
ASSERT
(
ismrlocked
(
io
->
io_lock
,
MR_UPDATE
)
!=
0
);
ASSERT
(
offset
>
isize
);
mp
=
io
->
io_mount
;
mp
=
io
->
io_mount
;
isize_fsb
_offset
=
XFS_B_FSB_OFFSET
(
mp
,
isize
);
zero
_offset
=
XFS_B_FSB_OFFSET
(
mp
,
isize
);
if
(
isize_fsb
_offset
==
0
)
{
if
(
zero
_offset
==
0
)
{
/*
/*
* There are no extra bytes in the last block on disk to
* There are no extra bytes in the last block on disk to
* zero, so return.
* zero, so return.
...
@@ -413,10 +403,8 @@ xfs_zero_last_block(
...
@@ -413,10 +403,8 @@ xfs_zero_last_block(
*/
*/
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_RD
);
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_RD
);
loff
=
XFS_FSB_TO_B
(
mp
,
last_fsb
);
loff
=
XFS_FSB_TO_B
(
mp
,
last_fsb
);
lsize
=
XFS_FSB_TO_B
(
mp
,
1
);
zero_offset
=
isize_fsb_offset
;
zero_len
=
mp
->
m_sb
.
sb_blocksize
-
zero_offset
;
zero_len
=
mp
->
m_sb
.
sb_blocksize
-
isize_fsb_offset
;
error
=
xfs_iozero
(
ip
,
loff
+
zero_offset
,
zero_len
,
end_size
);
error
=
xfs_iozero
(
ip
,
loff
+
zero_offset
,
zero_len
,
end_size
);
...
@@ -447,20 +435,17 @@ xfs_zero_eof(
...
@@ -447,20 +435,17 @@ xfs_zero_eof(
struct
inode
*
ip
=
LINVFS_GET_IP
(
vp
);
struct
inode
*
ip
=
LINVFS_GET_IP
(
vp
);
xfs_fileoff_t
start_zero_fsb
;
xfs_fileoff_t
start_zero_fsb
;
xfs_fileoff_t
end_zero_fsb
;
xfs_fileoff_t
end_zero_fsb
;
xfs_fileoff_t
prev_zero_fsb
;
xfs_fileoff_t
zero_count_fsb
;
xfs_fileoff_t
zero_count_fsb
;
xfs_fileoff_t
last_fsb
;
xfs_fileoff_t
last_fsb
;
xfs_extlen_t
buf_len_fsb
;
xfs_extlen_t
buf_len_fsb
;
xfs_extlen_t
prev_zero_count
;
xfs_mount_t
*
mp
;
xfs_mount_t
*
mp
;
int
nimaps
;
int
nimaps
;
int
error
=
0
;
int
error
=
0
;
xfs_bmbt_irec_t
imap
;
xfs_bmbt_irec_t
imap
;
loff_t
loff
;
size_t
lsize
;
ASSERT
(
ismrlocked
(
io
->
io_lock
,
MR_UPDATE
));
ASSERT
(
ismrlocked
(
io
->
io_lock
,
MR_UPDATE
));
ASSERT
(
ismrlocked
(
io
->
io_iolock
,
MR_UPDATE
));
ASSERT
(
ismrlocked
(
io
->
io_iolock
,
MR_UPDATE
));
ASSERT
(
offset
>
isize
);
mp
=
io
->
io_mount
;
mp
=
io
->
io_mount
;
...
@@ -468,7 +453,7 @@ xfs_zero_eof(
...
@@ -468,7 +453,7 @@ xfs_zero_eof(
* First handle zeroing the block on which isize resides.
* First handle zeroing the block on which isize resides.
* We only zero a part of that block so it is handled specially.
* We only zero a part of that block so it is handled specially.
*/
*/
error
=
xfs_zero_last_block
(
ip
,
io
,
offset
,
isize
,
end_size
);
error
=
xfs_zero_last_block
(
ip
,
io
,
isize
,
end_size
);
if
(
error
)
{
if
(
error
)
{
ASSERT
(
ismrlocked
(
io
->
io_lock
,
MR_UPDATE
));
ASSERT
(
ismrlocked
(
io
->
io_lock
,
MR_UPDATE
));
ASSERT
(
ismrlocked
(
io
->
io_iolock
,
MR_UPDATE
));
ASSERT
(
ismrlocked
(
io
->
io_iolock
,
MR_UPDATE
));
...
@@ -496,8 +481,6 @@ xfs_zero_eof(
...
@@ -496,8 +481,6 @@ xfs_zero_eof(
}
}
ASSERT
(
start_zero_fsb
<=
end_zero_fsb
);
ASSERT
(
start_zero_fsb
<=
end_zero_fsb
);
prev_zero_fsb
=
NULLFILEOFF
;
prev_zero_count
=
0
;
while
(
start_zero_fsb
<=
end_zero_fsb
)
{
while
(
start_zero_fsb
<=
end_zero_fsb
)
{
nimaps
=
1
;
nimaps
=
1
;
zero_count_fsb
=
end_zero_fsb
-
start_zero_fsb
+
1
;
zero_count_fsb
=
end_zero_fsb
-
start_zero_fsb
+
1
;
...
@@ -519,10 +502,7 @@ xfs_zero_eof(
...
@@ -519,10 +502,7 @@ xfs_zero_eof(
* that sits on a hole and sets the page as P_HOLE
* that sits on a hole and sets the page as P_HOLE
* and calls remapf if it is a mapped file.
* and calls remapf if it is a mapped file.
*/
*/
prev_zero_fsb
=
NULLFILEOFF
;
start_zero_fsb
=
imap
.
br_startoff
+
imap
.
br_blockcount
;
prev_zero_count
=
0
;
start_zero_fsb
=
imap
.
br_startoff
+
imap
.
br_blockcount
;
ASSERT
(
start_zero_fsb
<=
(
end_zero_fsb
+
1
));
ASSERT
(
start_zero_fsb
<=
(
end_zero_fsb
+
1
));
continue
;
continue
;
}
}
...
@@ -543,17 +523,15 @@ xfs_zero_eof(
...
@@ -543,17 +523,15 @@ xfs_zero_eof(
*/
*/
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_RD
);
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_RD
);
loff
=
XFS_FSB_TO_B
(
mp
,
start_zero_fsb
);
error
=
xfs_iozero
(
ip
,
lsize
=
XFS_FSB_TO_B
(
mp
,
buf_len_fsb
);
XFS_FSB_TO_B
(
mp
,
start_zero_fsb
),
XFS_FSB_TO_B
(
mp
,
buf_len_fsb
),
error
=
xfs_iozero
(
ip
,
loff
,
lsize
,
end_size
);
end_size
);
if
(
error
)
{
if
(
error
)
{
goto
out_lock
;
goto
out_lock
;
}
}
prev_zero_fsb
=
start_zero_fsb
;
prev_zero_count
=
buf_len_fsb
;
start_zero_fsb
=
imap
.
br_startoff
+
buf_len_fsb
;
start_zero_fsb
=
imap
.
br_startoff
+
buf_len_fsb
;
ASSERT
(
start_zero_fsb
<=
(
end_zero_fsb
+
1
));
ASSERT
(
start_zero_fsb
<=
(
end_zero_fsb
+
1
));
...
@@ -640,7 +618,7 @@ xfs_write(
...
@@ -640,7 +618,7 @@ xfs_write(
(
xip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
?
(
xip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
?
mp
->
m_rtdev_targp
:
mp
->
m_ddev_targp
;
mp
->
m_rtdev_targp
:
mp
->
m_ddev_targp
;
if
((
pos
&
target
->
pbr_smask
)
||
(
count
&
target
->
pbr
_smask
))
if
((
pos
&
target
->
bt_smask
)
||
(
count
&
target
->
bt
_smask
))
return
XFS_ERROR
(
-
EINVAL
);
return
XFS_ERROR
(
-
EINVAL
);
if
(
!
VN_CACHED
(
vp
)
&&
pos
<
i_size_read
(
inode
))
if
(
!
VN_CACHED
(
vp
)
&&
pos
<
i_size_read
(
inode
))
...
@@ -831,6 +809,10 @@ xfs_write(
...
@@ -831,6 +809,10 @@ xfs_write(
goto
retry
;
goto
retry
;
}
}
isize
=
i_size_read
(
inode
);
if
(
unlikely
(
ret
<
0
&&
ret
!=
-
EFAULT
&&
*
offset
>
isize
))
*
offset
=
isize
;
if
(
*
offset
>
xip
->
i_d
.
di_size
)
{
if
(
*
offset
>
xip
->
i_d
.
di_size
)
{
xfs_ilock
(
xip
,
XFS_ILOCK_EXCL
);
xfs_ilock
(
xip
,
XFS_ILOCK_EXCL
);
if
(
*
offset
>
xip
->
i_d
.
di_size
)
{
if
(
*
offset
>
xip
->
i_d
.
di_size
)
{
...
@@ -956,7 +938,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
...
@@ -956,7 +938,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
mp
=
XFS_BUF_FSPRIVATE3
(
bp
,
xfs_mount_t
*
);
mp
=
XFS_BUF_FSPRIVATE3
(
bp
,
xfs_mount_t
*
);
if
(
!
XFS_FORCED_SHUTDOWN
(
mp
))
{
if
(
!
XFS_FORCED_SHUTDOWN
(
mp
))
{
page
buf_iorequest
(
bp
);
xfs_
buf_iorequest
(
bp
);
return
0
;
return
0
;
}
else
{
}
else
{
xfs_buftrace
(
"XFS__BDSTRAT IOERROR"
,
bp
);
xfs_buftrace
(
"XFS__BDSTRAT IOERROR"
,
bp
);
...
@@ -1009,7 +991,7 @@ xfsbdstrat(
...
@@ -1009,7 +991,7 @@ xfsbdstrat(
* if (XFS_BUF_IS_GRIO(bp)) {
* if (XFS_BUF_IS_GRIO(bp)) {
*/
*/
page
buf_iorequest
(
bp
);
xfs_
buf_iorequest
(
bp
);
return
0
;
return
0
;
}
}
...
...
fs/xfs/linux-2.6/xfs_stats.c
浏览文件 @
9f5974c8
...
@@ -34,7 +34,7 @@ xfs_read_xfsstats(
...
@@ -34,7 +34,7 @@ xfs_read_xfsstats(
__uint64_t
xs_write_bytes
=
0
;
__uint64_t
xs_write_bytes
=
0
;
__uint64_t
xs_read_bytes
=
0
;
__uint64_t
xs_read_bytes
=
0
;
static
struct
xstats_entry
{
static
const
struct
xstats_entry
{
char
*
desc
;
char
*
desc
;
int
endpoint
;
int
endpoint
;
}
xstats
[]
=
{
}
xstats
[]
=
{
...
...
fs/xfs/linux-2.6/xfs_stats.h
浏览文件 @
9f5974c8
...
@@ -109,15 +109,15 @@ struct xfsstats {
...
@@ -109,15 +109,15 @@ struct xfsstats {
__uint32_t
vn_remove
;
/* # times vn_remove called */
__uint32_t
vn_remove
;
/* # times vn_remove called */
__uint32_t
vn_free
;
/* # times vn_free called */
__uint32_t
vn_free
;
/* # times vn_free called */
#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9)
#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9)
__uint32_t
p
b_get
;
__uint32_t
x
b_get
;
__uint32_t
p
b_create
;
__uint32_t
x
b_create
;
__uint32_t
p
b_get_locked
;
__uint32_t
x
b_get_locked
;
__uint32_t
p
b_get_locked_waited
;
__uint32_t
x
b_get_locked_waited
;
__uint32_t
p
b_busy_locked
;
__uint32_t
x
b_busy_locked
;
__uint32_t
p
b_miss_locked
;
__uint32_t
x
b_miss_locked
;
__uint32_t
p
b_page_retries
;
__uint32_t
x
b_page_retries
;
__uint32_t
p
b_page_found
;
__uint32_t
x
b_page_found
;
__uint32_t
p
b_get_read
;
__uint32_t
x
b_get_read
;
/* Extra precision counters */
/* Extra precision counters */
__uint64_t
xs_xstrat_bytes
;
__uint64_t
xs_xstrat_bytes
;
__uint64_t
xs_write_bytes
;
__uint64_t
xs_write_bytes
;
...
...
fs/xfs/linux-2.6/xfs_super.c
浏览文件 @
9f5974c8
...
@@ -306,13 +306,15 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
...
@@ -306,13 +306,15 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
xfs_fs_cmn_err
(
CE_NOTE
,
mp
,
xfs_fs_cmn_err
(
CE_NOTE
,
mp
,
"Disabling barriers, not supported with external log device"
);
"Disabling barriers, not supported with external log device"
);
mp
->
m_flags
&=
~
XFS_MOUNT_BARRIER
;
mp
->
m_flags
&=
~
XFS_MOUNT_BARRIER
;
return
;
}
}
if
(
mp
->
m_ddev_targp
->
pbr
_bdev
->
bd_disk
->
queue
->
ordered
==
if
(
mp
->
m_ddev_targp
->
bt
_bdev
->
bd_disk
->
queue
->
ordered
==
QUEUE_ORDERED_NONE
)
{
QUEUE_ORDERED_NONE
)
{
xfs_fs_cmn_err
(
CE_NOTE
,
mp
,
xfs_fs_cmn_err
(
CE_NOTE
,
mp
,
"Disabling barriers, not supported by the underlying device"
);
"Disabling barriers, not supported by the underlying device"
);
mp
->
m_flags
&=
~
XFS_MOUNT_BARRIER
;
mp
->
m_flags
&=
~
XFS_MOUNT_BARRIER
;
return
;
}
}
error
=
xfs_barrier_test
(
mp
);
error
=
xfs_barrier_test
(
mp
);
...
@@ -320,6 +322,7 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
...
@@ -320,6 +322,7 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
xfs_fs_cmn_err
(
CE_NOTE
,
mp
,
xfs_fs_cmn_err
(
CE_NOTE
,
mp
,
"Disabling barriers, trial barrier write failed"
);
"Disabling barriers, trial barrier write failed"
);
mp
->
m_flags
&=
~
XFS_MOUNT_BARRIER
;
mp
->
m_flags
&=
~
XFS_MOUNT_BARRIER
;
return
;
}
}
}
}
...
@@ -327,7 +330,7 @@ void
...
@@ -327,7 +330,7 @@ void
xfs_blkdev_issue_flush
(
xfs_blkdev_issue_flush
(
xfs_buftarg_t
*
buftarg
)
xfs_buftarg_t
*
buftarg
)
{
{
blkdev_issue_flush
(
buftarg
->
pbr
_bdev
,
NULL
);
blkdev_issue_flush
(
buftarg
->
bt
_bdev
,
NULL
);
}
}
STATIC
struct
inode
*
STATIC
struct
inode
*
...
@@ -576,7 +579,7 @@ xfssyncd(
...
@@ -576,7 +579,7 @@ xfssyncd(
timeleft
=
schedule_timeout_interruptible
(
timeleft
);
timeleft
=
schedule_timeout_interruptible
(
timeleft
);
/* swsusp */
/* swsusp */
try_to_freeze
();
try_to_freeze
();
if
(
kthread_should_stop
())
if
(
kthread_should_stop
()
&&
list_empty
(
&
vfsp
->
vfs_sync_list
)
)
break
;
break
;
spin_lock
(
&
vfsp
->
vfs_sync_lock
);
spin_lock
(
&
vfsp
->
vfs_sync_lock
);
...
@@ -966,9 +969,9 @@ init_xfs_fs( void )
...
@@ -966,9 +969,9 @@ init_xfs_fs( void )
if
(
error
<
0
)
if
(
error
<
0
)
goto
undo_zones
;
goto
undo_zones
;
error
=
page
buf_init
();
error
=
xfs_
buf_init
();
if
(
error
<
0
)
if
(
error
<
0
)
goto
undo_
pagebuf
;
goto
undo_
buffers
;
vn_init
();
vn_init
();
xfs_init
();
xfs_init
();
...
@@ -982,9 +985,9 @@ init_xfs_fs( void )
...
@@ -982,9 +985,9 @@ init_xfs_fs( void )
return
0
;
return
0
;
undo_register:
undo_register:
page
buf_terminate
();
xfs_
buf_terminate
();
undo_
pagebuf
:
undo_
buffers
:
linvfs_destroy_zones
();
linvfs_destroy_zones
();
undo_zones:
undo_zones:
...
@@ -998,7 +1001,7 @@ exit_xfs_fs( void )
...
@@ -998,7 +1001,7 @@ exit_xfs_fs( void )
XFS_DM_EXIT
(
&
xfs_fs_type
);
XFS_DM_EXIT
(
&
xfs_fs_type
);
unregister_filesystem
(
&
xfs_fs_type
);
unregister_filesystem
(
&
xfs_fs_type
);
xfs_cleanup
();
xfs_cleanup
();
page
buf_terminate
();
xfs_
buf_terminate
();
linvfs_destroy_zones
();
linvfs_destroy_zones
();
ktrace_uninit
();
ktrace_uninit
();
}
}
...
...
fs/xfs/linux-2.6/xfs_vnode.c
浏览文件 @
9f5974c8
...
@@ -106,7 +106,6 @@ vn_revalidate_core(
...
@@ -106,7 +106,6 @@ vn_revalidate_core(
inode
->
i_blocks
=
vap
->
va_nblocks
;
inode
->
i_blocks
=
vap
->
va_nblocks
;
inode
->
i_mtime
=
vap
->
va_mtime
;
inode
->
i_mtime
=
vap
->
va_mtime
;
inode
->
i_ctime
=
vap
->
va_ctime
;
inode
->
i_ctime
=
vap
->
va_ctime
;
inode
->
i_atime
=
vap
->
va_atime
;
inode
->
i_blksize
=
vap
->
va_blocksize
;
inode
->
i_blksize
=
vap
->
va_blocksize
;
if
(
vap
->
va_xflags
&
XFS_XFLAG_IMMUTABLE
)
if
(
vap
->
va_xflags
&
XFS_XFLAG_IMMUTABLE
)
inode
->
i_flags
|=
S_IMMUTABLE
;
inode
->
i_flags
|=
S_IMMUTABLE
;
...
...
fs/xfs/linux-2.6/xfs_vnode.h
浏览文件 @
9f5974c8
...
@@ -565,6 +565,25 @@ static inline int VN_BAD(struct vnode *vp)
...
@@ -565,6 +565,25 @@ static inline int VN_BAD(struct vnode *vp)
return
is_bad_inode
(
LINVFS_GET_IP
(
vp
));
return
is_bad_inode
(
LINVFS_GET_IP
(
vp
));
}
}
/*
* Extracting atime values in various formats
*/
static
inline
void
vn_atime_to_bstime
(
struct
vnode
*
vp
,
xfs_bstime_t
*
bs_atime
)
{
bs_atime
->
tv_sec
=
vp
->
v_inode
.
i_atime
.
tv_sec
;
bs_atime
->
tv_nsec
=
vp
->
v_inode
.
i_atime
.
tv_nsec
;
}
static
inline
void
vn_atime_to_timespec
(
struct
vnode
*
vp
,
struct
timespec
*
ts
)
{
*
ts
=
vp
->
v_inode
.
i_atime
;
}
static
inline
void
vn_atime_to_time_t
(
struct
vnode
*
vp
,
time_t
*
tt
)
{
*
tt
=
vp
->
v_inode
.
i_atime
.
tv_sec
;
}
/*
/*
* Some useful predicates.
* Some useful predicates.
*/
*/
...
...
fs/xfs/quota/xfs_dquot_item.c
浏览文件 @
9f5974c8
...
@@ -239,7 +239,7 @@ xfs_qm_dquot_logitem_pushbuf(
...
@@ -239,7 +239,7 @@ xfs_qm_dquot_logitem_pushbuf(
* trying to duplicate our effort.
* trying to duplicate our effort.
*/
*/
ASSERT
(
qip
->
qli_pushbuf_flag
!=
0
);
ASSERT
(
qip
->
qli_pushbuf_flag
!=
0
);
ASSERT
(
qip
->
qli_push_owner
==
get_thread_
id
());
ASSERT
(
qip
->
qli_push_owner
==
current_p
id
());
/*
/*
* If flushlock isn't locked anymore, chances are that the
* If flushlock isn't locked anymore, chances are that the
...
@@ -333,7 +333,7 @@ xfs_qm_dquot_logitem_trylock(
...
@@ -333,7 +333,7 @@ xfs_qm_dquot_logitem_trylock(
qip
->
qli_pushbuf_flag
=
1
;
qip
->
qli_pushbuf_flag
=
1
;
ASSERT
(
qip
->
qli_format
.
qlf_blkno
==
dqp
->
q_blkno
);
ASSERT
(
qip
->
qli_format
.
qlf_blkno
==
dqp
->
q_blkno
);
#ifdef DEBUG
#ifdef DEBUG
qip
->
qli_push_owner
=
get_thread_
id
();
qip
->
qli_push_owner
=
current_p
id
();
#endif
#endif
/*
/*
* The dquot is left locked.
* The dquot is left locked.
...
...
fs/xfs/quota/xfs_qm.c
浏览文件 @
9f5974c8
...
@@ -1392,11 +1392,12 @@ xfs_qm_qino_alloc(
...
@@ -1392,11 +1392,12 @@ xfs_qm_qino_alloc(
{
{
xfs_trans_t
*
tp
;
xfs_trans_t
*
tp
;
int
error
;
int
error
;
unsigned
long
s
;
unsigned
long
s
;
cred_t
zerocr
;
cred_t
zerocr
;
xfs_inode_t
zeroino
;
int
committed
;
int
committed
;
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_QM_QINOCREATE
);
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_QM_QINOCREATE
);
if
((
error
=
xfs_trans_reserve
(
tp
,
if
((
error
=
xfs_trans_reserve
(
tp
,
XFS_QM_QINOCREATE_SPACE_RES
(
mp
),
XFS_QM_QINOCREATE_SPACE_RES
(
mp
),
XFS_CREATE_LOG_RES
(
mp
),
0
,
XFS_CREATE_LOG_RES
(
mp
),
0
,
...
@@ -1406,8 +1407,9 @@ xfs_qm_qino_alloc(
...
@@ -1406,8 +1407,9 @@ xfs_qm_qino_alloc(
return
(
error
);
return
(
error
);
}
}
memset
(
&
zerocr
,
0
,
sizeof
(
zerocr
));
memset
(
&
zerocr
,
0
,
sizeof
(
zerocr
));
memset
(
&
zeroino
,
0
,
sizeof
(
zeroino
));
if
((
error
=
xfs_dir_ialloc
(
&
tp
,
mp
->
m_rootip
,
S_IFREG
,
1
,
0
,
if
((
error
=
xfs_dir_ialloc
(
&
tp
,
&
zeroino
,
S_IFREG
,
1
,
0
,
&
zerocr
,
0
,
1
,
ip
,
&
committed
)))
{
&
zerocr
,
0
,
1
,
ip
,
&
committed
)))
{
xfs_trans_cancel
(
tp
,
XFS_TRANS_RELEASE_LOG_RES
|
xfs_trans_cancel
(
tp
,
XFS_TRANS_RELEASE_LOG_RES
|
XFS_TRANS_ABORT
);
XFS_TRANS_ABORT
);
...
@@ -1918,9 +1920,7 @@ xfs_qm_quotacheck(
...
@@ -1918,9 +1920,7 @@ xfs_qm_quotacheck(
* at this point (because we intentionally didn't in dqget_noattach).
* at this point (because we intentionally didn't in dqget_noattach).
*/
*/
if
(
error
)
{
if
(
error
)
{
xfs_qm_dqpurge_all
(
mp
,
xfs_qm_dqpurge_all
(
mp
,
XFS_QMOPT_QUOTALL
|
XFS_QMOPT_QUOTAOFF
);
XFS_QMOPT_UQUOTA
|
XFS_QMOPT_GQUOTA
|
XFS_QMOPT_PQUOTA
|
XFS_QMOPT_QUOTAOFF
);
goto
error_return
;
goto
error_return
;
}
}
/*
/*
...
@@ -2743,6 +2743,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
...
@@ -2743,6 +2743,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
xfs_dqunlock
(
udqp
);
xfs_dqunlock
(
udqp
);
ASSERT
(
ip
->
i_udquot
==
NULL
);
ASSERT
(
ip
->
i_udquot
==
NULL
);
ip
->
i_udquot
=
udqp
;
ip
->
i_udquot
=
udqp
;
ASSERT
(
XFS_IS_UQUOTA_ON
(
tp
->
t_mountp
));
ASSERT
(
ip
->
i_d
.
di_uid
==
be32_to_cpu
(
udqp
->
q_core
.
d_id
));
ASSERT
(
ip
->
i_d
.
di_uid
==
be32_to_cpu
(
udqp
->
q_core
.
d_id
));
xfs_trans_mod_dquot
(
tp
,
udqp
,
XFS_TRANS_DQ_ICOUNT
,
1
);
xfs_trans_mod_dquot
(
tp
,
udqp
,
XFS_TRANS_DQ_ICOUNT
,
1
);
}
}
...
@@ -2752,7 +2753,10 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
...
@@ -2752,7 +2753,10 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
xfs_dqunlock
(
gdqp
);
xfs_dqunlock
(
gdqp
);
ASSERT
(
ip
->
i_gdquot
==
NULL
);
ASSERT
(
ip
->
i_gdquot
==
NULL
);
ip
->
i_gdquot
=
gdqp
;
ip
->
i_gdquot
=
gdqp
;
ASSERT
(
ip
->
i_d
.
di_gid
==
be32_to_cpu
(
gdqp
->
q_core
.
d_id
));
ASSERT
(
XFS_IS_OQUOTA_ON
(
tp
->
t_mountp
));
ASSERT
((
XFS_IS_GQUOTA_ON
(
tp
->
t_mountp
)
?
ip
->
i_d
.
di_gid
:
ip
->
i_d
.
di_projid
)
==
be32_to_cpu
(
gdqp
->
q_core
.
d_id
));
xfs_trans_mod_dquot
(
tp
,
gdqp
,
XFS_TRANS_DQ_ICOUNT
,
1
);
xfs_trans_mod_dquot
(
tp
,
gdqp
,
XFS_TRANS_DQ_ICOUNT
,
1
);
}
}
}
}
...
...
fs/xfs/support/debug.c
浏览文件 @
9f5974c8
...
@@ -27,44 +27,11 @@ static DEFINE_SPINLOCK(xfs_err_lock);
...
@@ -27,44 +27,11 @@ static DEFINE_SPINLOCK(xfs_err_lock);
/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
#define XFS_MAX_ERR_LEVEL 7
#define XFS_MAX_ERR_LEVEL 7
#define XFS_ERR_MASK ((1 << 3) - 1)
#define XFS_ERR_MASK ((1 << 3) - 1)
static
c
har
*
err_level
[
XFS_MAX_ERR_LEVEL
+
1
]
=
static
c
onst
char
*
const
err_level
[
XFS_MAX_ERR_LEVEL
+
1
]
=
{
KERN_EMERG
,
KERN_ALERT
,
KERN_CRIT
,
{
KERN_EMERG
,
KERN_ALERT
,
KERN_CRIT
,
KERN_ERR
,
KERN_WARNING
,
KERN_NOTICE
,
KERN_ERR
,
KERN_WARNING
,
KERN_NOTICE
,
KERN_INFO
,
KERN_DEBUG
};
KERN_INFO
,
KERN_DEBUG
};
void
assfail
(
char
*
a
,
char
*
f
,
int
l
)
{
printk
(
"XFS assertion failed: %s, file: %s, line: %d
\n
"
,
a
,
f
,
l
);
BUG
();
}
#if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM))
unsigned
long
random
(
void
)
{
static
unsigned
long
RandomValue
=
1
;
/* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
register
long
rv
=
RandomValue
;
register
long
lo
;
register
long
hi
;
hi
=
rv
/
127773
;
lo
=
rv
%
127773
;
rv
=
16807
*
lo
-
2836
*
hi
;
if
(
rv
<=
0
)
rv
+=
2147483647
;
return
(
RandomValue
=
rv
);
}
int
get_thread_id
(
void
)
{
return
current
->
pid
;
}
#endif
/* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */
void
void
cmn_err
(
register
int
level
,
char
*
fmt
,
...)
cmn_err
(
register
int
level
,
char
*
fmt
,
...)
{
{
...
@@ -90,7 +57,6 @@ cmn_err(register int level, char *fmt, ...)
...
@@ -90,7 +57,6 @@ cmn_err(register int level, char *fmt, ...)
BUG
();
BUG
();
}
}
void
void
icmn_err
(
register
int
level
,
char
*
fmt
,
va_list
ap
)
icmn_err
(
register
int
level
,
char
*
fmt
,
va_list
ap
)
{
{
...
@@ -109,3 +75,27 @@ icmn_err(register int level, char *fmt, va_list ap)
...
@@ -109,3 +75,27 @@ icmn_err(register int level, char *fmt, va_list ap)
if
(
level
==
CE_PANIC
)
if
(
level
==
CE_PANIC
)
BUG
();
BUG
();
}
}
void
assfail
(
char
*
expr
,
char
*
file
,
int
line
)
{
printk
(
"Assertion failed: %s, file: %s, line: %d
\n
"
,
expr
,
file
,
line
);
BUG
();
}
#if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM))
unsigned
long
random
(
void
)
{
static
unsigned
long
RandomValue
=
1
;
/* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
register
long
rv
=
RandomValue
;
register
long
lo
;
register
long
hi
;
hi
=
rv
/
127773
;
lo
=
rv
%
127773
;
rv
=
16807
*
lo
-
2836
*
hi
;
if
(
rv
<=
0
)
rv
+=
2147483647
;
return
RandomValue
=
rv
;
}
#endif
/* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */
fs/xfs/support/debug.h
浏览文件 @
9f5974c8
...
@@ -31,24 +31,23 @@ extern void icmn_err(int, char *, va_list)
...
@@ -31,24 +31,23 @@ extern void icmn_err(int, char *, va_list)
__attribute__
((
format
(
printf
,
2
,
0
)));
__attribute__
((
format
(
printf
,
2
,
0
)));
extern
void
cmn_err
(
int
,
char
*
,
...)
extern
void
cmn_err
(
int
,
char
*
,
...)
__attribute__
((
format
(
printf
,
2
,
3
)));
__attribute__
((
format
(
printf
,
2
,
3
)));
extern
void
assfail
(
char
*
expr
,
char
*
f
,
int
l
);
#ifndef STATIC
#define prdev(fmt,targ,args...) \
# define STATIC static
printk("Device %s - " fmt "\n", XFS_BUFTARG_NAME(targ), ## args)
#endif
#ifdef DEBUG
#define ASSERT_ALWAYS(expr) \
# define ASSERT(EX) ((EX) ? ((void)0) : assfail(#EX, __FILE__, __LINE__))
(unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#else
# define ASSERT(x) ((void)0)
#endif
extern
void
assfail
(
char
*
,
char
*
,
int
);
#ifndef DEBUG
#ifdef DEBUG
# define ASSERT(expr) ((void)0)
#else
# define ASSERT(expr) ASSERT_ALWAYS(expr)
extern
unsigned
long
random
(
void
);
extern
unsigned
long
random
(
void
);
extern
int
get_thread_id
(
void
);
#endif
#endif
#define ASSERT_ALWAYS(EX) ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__))
#ifndef STATIC
#define debug_stop_all_cpus(param)
/* param is "cpumask_t *" */
# define STATIC static
#endif
#endif
/* __XFS_SUPPORT_DEBUG_H__ */
#endif
/* __XFS_SUPPORT_DEBUG_H__ */
fs/xfs/support/uuid.c
浏览文件 @
9f5974c8
...
@@ -27,6 +27,16 @@ uuid_init(void)
...
@@ -27,6 +27,16 @@ uuid_init(void)
mutex_init
(
&
uuid_monitor
);
mutex_init
(
&
uuid_monitor
);
}
}
/* IRIX interpretation of an uuid_t */
typedef
struct
{
__be32
uu_timelow
;
__be16
uu_timemid
;
__be16
uu_timehi
;
__be16
uu_clockseq
;
__be16
uu_node
[
3
];
}
xfs_uu_t
;
/*
/*
* uuid_getnodeuniq - obtain the node unique fields of a UUID.
* uuid_getnodeuniq - obtain the node unique fields of a UUID.
*
*
...
@@ -36,16 +46,11 @@ uuid_init(void)
...
@@ -36,16 +46,11 @@ uuid_init(void)
void
void
uuid_getnodeuniq
(
uuid_t
*
uuid
,
int
fsid
[
2
])
uuid_getnodeuniq
(
uuid_t
*
uuid
,
int
fsid
[
2
])
{
{
char
*
uu
=
(
char
*
)
uuid
;
xfs_uu_t
*
uup
=
(
xfs_uu_t
*
)
uuid
;
/* on IRIX, this function assumes big-endian fields within
* the uuid, so we use INT_GET to get the same result on
* little-endian systems
*/
fsid
[
0
]
=
(
INT_GET
(
*
(
u_int16_t
*
)(
uu
+
8
),
ARCH_CONVERT
)
<<
16
)
+
fsid
[
0
]
=
(
be16_to_cpu
(
uup
->
uu_clockseq
)
<<
16
)
|
INT_GET
(
*
(
u_int16_t
*
)(
uu
+
4
),
ARCH_CONVERT
);
be16_to_cpu
(
uup
->
uu_timemid
);
fsid
[
1
]
=
INT_GET
(
*
(
u_int32_t
*
)(
uu
),
ARCH_CONVERT
);
fsid
[
1
]
=
be16_to_cpu
(
uup
->
uu_timelow
);
}
}
void
void
...
...
fs/xfs/xfs_arch.h
浏览文件 @
9f5974c8
...
@@ -40,6 +40,22 @@
...
@@ -40,6 +40,22 @@
#undef XFS_NATIVE_HOST
#undef XFS_NATIVE_HOST
#endif
#endif
#ifdef XFS_NATIVE_HOST
#define cpu_to_be16(val) ((__be16)(val))
#define cpu_to_be32(val) ((__be32)(val))
#define cpu_to_be64(val) ((__be64)(val))
#define be16_to_cpu(val) ((__uint16_t)(val))
#define be32_to_cpu(val) ((__uint32_t)(val))
#define be64_to_cpu(val) ((__uint64_t)(val))
#else
#define cpu_to_be16(val) (__swab16((__uint16_t)(val)))
#define cpu_to_be32(val) (__swab32((__uint32_t)(val)))
#define cpu_to_be64(val) (__swab64((__uint64_t)(val)))
#define be16_to_cpu(val) (__swab16((__be16)(val)))
#define be32_to_cpu(val) (__swab32((__be32)(val)))
#define be64_to_cpu(val) (__swab64((__be64)(val)))
#endif
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
/* do we need conversion? */
/* do we need conversion? */
...
@@ -186,7 +202,7 @@ static inline void be64_add(__be64 *a, __s64 b)
...
@@ -186,7 +202,7 @@ static inline void be64_add(__be64 *a, __s64 b)
*/
*/
#define XFS_GET_DIR_INO4(di) \
#define XFS_GET_DIR_INO4(di) \
(((u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
(((
__
u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
#define XFS_PUT_DIR_INO4(from, di) \
#define XFS_PUT_DIR_INO4(from, di) \
do { \
do { \
...
@@ -197,9 +213,9 @@ do { \
...
@@ -197,9 +213,9 @@ do { \
} while (0)
} while (0)
#define XFS_DI_HI(di) \
#define XFS_DI_HI(di) \
(((u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
(((
__
u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
#define XFS_DI_LO(di) \
#define XFS_DI_LO(di) \
(((u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7]))
(((
__
u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7]))
#define XFS_GET_DIR_INO8(di) \
#define XFS_GET_DIR_INO8(di) \
(((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \
(((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \
...
...
fs/xfs/xfs_attr_leaf.c
浏览文件 @
9f5974c8
...
@@ -128,7 +128,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
...
@@ -128,7 +128,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
return
(
offset
>=
minforkoff
)
?
minforkoff
:
0
;
return
(
offset
>=
minforkoff
)
?
minforkoff
:
0
;
}
}
if
(
unlikely
(
mp
->
m_flags
&
XFS_MOUNT_COMPAT_ATTR
))
{
if
(
!
(
mp
->
m_flags
&
XFS_MOUNT_ATTR2
))
{
if
(
bytes
<=
XFS_IFORK_ASIZE
(
dp
))
if
(
bytes
<=
XFS_IFORK_ASIZE
(
dp
))
return
mp
->
m_attroffset
>>
3
;
return
mp
->
m_attroffset
>>
3
;
return
0
;
return
0
;
...
@@ -157,7 +157,7 @@ xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
...
@@ -157,7 +157,7 @@ xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
{
{
unsigned
long
s
;
unsigned
long
s
;
if
(
!
(
mp
->
m_flags
&
XFS_MOUNT_COMPAT_ATTR
)
&&
if
(
(
mp
->
m_flags
&
XFS_MOUNT_ATTR2
)
&&
!
(
XFS_SB_VERSION_HASATTR2
(
&
mp
->
m_sb
)))
{
!
(
XFS_SB_VERSION_HASATTR2
(
&
mp
->
m_sb
)))
{
s
=
XFS_SB_LOCK
(
mp
);
s
=
XFS_SB_LOCK
(
mp
);
if
(
!
XFS_SB_VERSION_HASATTR2
(
&
mp
->
m_sb
))
{
if
(
!
XFS_SB_VERSION_HASATTR2
(
&
mp
->
m_sb
))
{
...
@@ -311,7 +311,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
...
@@ -311,7 +311,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
*/
*/
totsize
-=
size
;
totsize
-=
size
;
if
(
totsize
==
sizeof
(
xfs_attr_sf_hdr_t
)
&&
!
args
->
addname
&&
if
(
totsize
==
sizeof
(
xfs_attr_sf_hdr_t
)
&&
!
args
->
addname
&&
!
(
mp
->
m_flags
&
XFS_MOUNT_COMPAT_ATTR
))
{
(
mp
->
m_flags
&
XFS_MOUNT_ATTR2
))
{
/*
/*
* Last attribute now removed, revert to original
* Last attribute now removed, revert to original
* inode format making all literal area available
* inode format making all literal area available
...
@@ -330,7 +330,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
...
@@ -330,7 +330,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
dp
->
i_d
.
di_forkoff
=
xfs_attr_shortform_bytesfit
(
dp
,
totsize
);
dp
->
i_d
.
di_forkoff
=
xfs_attr_shortform_bytesfit
(
dp
,
totsize
);
ASSERT
(
dp
->
i_d
.
di_forkoff
);
ASSERT
(
dp
->
i_d
.
di_forkoff
);
ASSERT
(
totsize
>
sizeof
(
xfs_attr_sf_hdr_t
)
||
args
->
addname
||
ASSERT
(
totsize
>
sizeof
(
xfs_attr_sf_hdr_t
)
||
args
->
addname
||
(
mp
->
m_flags
&
XFS_MOUNT_COMPAT_ATTR
));
!
(
mp
->
m_flags
&
XFS_MOUNT_ATTR2
));
dp
->
i_afp
->
if_ext_max
=
dp
->
i_afp
->
if_ext_max
=
XFS_IFORK_ASIZE
(
dp
)
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
XFS_IFORK_ASIZE
(
dp
)
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
dp
->
i_df
.
if_ext_max
=
dp
->
i_df
.
if_ext_max
=
...
@@ -739,7 +739,7 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
...
@@ -739,7 +739,7 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
+
name_loc
->
namelen
+
name_loc
->
namelen
+
INT_GET
(
name_loc
->
valuelen
,
ARCH_CONVERT
);
+
INT_GET
(
name_loc
->
valuelen
,
ARCH_CONVERT
);
}
}
if
(
!
(
dp
->
i_mount
->
m_flags
&
XFS_MOUNT_COMPAT_ATTR
)
&&
if
(
(
dp
->
i_mount
->
m_flags
&
XFS_MOUNT_ATTR2
)
&&
(
bytes
==
sizeof
(
struct
xfs_attr_sf_hdr
)))
(
bytes
==
sizeof
(
struct
xfs_attr_sf_hdr
)))
return
(
-
1
);
return
(
-
1
);
return
(
xfs_attr_shortform_bytesfit
(
dp
,
bytes
));
return
(
xfs_attr_shortform_bytesfit
(
dp
,
bytes
));
...
@@ -778,7 +778,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
...
@@ -778,7 +778,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
goto
out
;
goto
out
;
if
(
forkoff
==
-
1
)
{
if
(
forkoff
==
-
1
)
{
ASSERT
(
!
(
dp
->
i_mount
->
m_flags
&
XFS_MOUNT_COMPAT_ATTR
)
);
ASSERT
(
dp
->
i_mount
->
m_flags
&
XFS_MOUNT_ATTR2
);
/*
/*
* Last attribute was removed, revert to original
* Last attribute was removed, revert to original
...
...
fs/xfs/xfs_attr_leaf.h
浏览文件 @
9f5974c8
...
@@ -63,7 +63,7 @@ struct xfs_trans;
...
@@ -63,7 +63,7 @@ struct xfs_trans;
* the leaf_entry. The namespaces are independent only because we also look
* the leaf_entry. The namespaces are independent only because we also look
* at the namespace bit when we are looking for a matching attribute name.
* at the namespace bit when we are looking for a matching attribute name.
*
*
* We also store a "incomplete" bit in the leaf_entry. It shows that an
* We also store a
n
"incomplete" bit in the leaf_entry. It shows that an
* attribute is in the middle of being created and should not be shown to
* attribute is in the middle of being created and should not be shown to
* the user if we crash during the time that the bit is set. We clear the
* the user if we crash during the time that the bit is set. We clear the
* bit when we have finished setting up the attribute. We do this because
* bit when we have finished setting up the attribute. We do this because
...
@@ -72,42 +72,48 @@ struct xfs_trans;
...
@@ -72,42 +72,48 @@ struct xfs_trans;
*/
*/
#define XFS_ATTR_LEAF_MAPSIZE 3
/* how many freespace slots */
#define XFS_ATTR_LEAF_MAPSIZE 3
/* how many freespace slots */
typedef
struct
xfs_attr_leaf_map
{
/* RLE map of free bytes */
__uint16_t
base
;
/* base of free region */
__uint16_t
size
;
/* length of free region */
}
xfs_attr_leaf_map_t
;
typedef
struct
xfs_attr_leaf_hdr
{
/* constant-structure header block */
xfs_da_blkinfo_t
info
;
/* block type, links, etc. */
__uint16_t
count
;
/* count of active leaf_entry's */
__uint16_t
usedbytes
;
/* num bytes of names/values stored */
__uint16_t
firstused
;
/* first used byte in name area */
__uint8_t
holes
;
/* != 0 if blk needs compaction */
__uint8_t
pad1
;
xfs_attr_leaf_map_t
freemap
[
XFS_ATTR_LEAF_MAPSIZE
];
/* N largest free regions */
}
xfs_attr_leaf_hdr_t
;
typedef
struct
xfs_attr_leaf_entry
{
/* sorted on key, not name */
xfs_dahash_t
hashval
;
/* hash value of name */
__uint16_t
nameidx
;
/* index into buffer of name/value */
__uint8_t
flags
;
/* LOCAL/ROOT/SECURE/INCOMPLETE flag */
__uint8_t
pad2
;
/* unused pad byte */
}
xfs_attr_leaf_entry_t
;
typedef
struct
xfs_attr_leaf_name_local
{
__uint16_t
valuelen
;
/* number of bytes in value */
__uint8_t
namelen
;
/* length of name bytes */
__uint8_t
nameval
[
1
];
/* name/value bytes */
}
xfs_attr_leaf_name_local_t
;
typedef
struct
xfs_attr_leaf_name_remote
{
xfs_dablk_t
valueblk
;
/* block number of value bytes */
__uint32_t
valuelen
;
/* number of bytes in value */
__uint8_t
namelen
;
/* length of name bytes */
__uint8_t
name
[
1
];
/* name bytes */
}
xfs_attr_leaf_name_remote_t
;
typedef
struct
xfs_attr_leafblock
{
typedef
struct
xfs_attr_leafblock
{
struct
xfs_attr_leaf_hdr
{
/* constant-structure header block */
xfs_attr_leaf_hdr_t
hdr
;
/* constant-structure header block */
xfs_da_blkinfo_t
info
;
/* block type, links, etc. */
xfs_attr_leaf_entry_t
entries
[
1
];
/* sorted on key, not name */
__uint16_t
count
;
/* count of active leaf_entry's */
xfs_attr_leaf_name_local_t
namelist
;
/* grows from bottom of buf */
__uint16_t
usedbytes
;
/* num bytes of names/values stored */
xfs_attr_leaf_name_remote_t
valuelist
;
/* grows from bottom of buf */
__uint16_t
firstused
;
/* first used byte in name area */
__uint8_t
holes
;
/* != 0 if blk needs compaction */
__uint8_t
pad1
;
struct
xfs_attr_leaf_map
{
/* RLE map of free bytes */
__uint16_t
base
;
/* base of free region */
__uint16_t
size
;
/* length of free region */
}
freemap
[
XFS_ATTR_LEAF_MAPSIZE
];
/* N largest free regions */
}
hdr
;
struct
xfs_attr_leaf_entry
{
/* sorted on key, not name */
xfs_dahash_t
hashval
;
/* hash value of name */
__uint16_t
nameidx
;
/* index into buffer of name/value */
__uint8_t
flags
;
/* LOCAL/ROOT/SECURE/INCOMPLETE flag */
__uint8_t
pad2
;
/* unused pad byte */
}
entries
[
1
];
/* variable sized array */
struct
xfs_attr_leaf_name_local
{
__uint16_t
valuelen
;
/* number of bytes in value */
__uint8_t
namelen
;
/* length of name bytes */
__uint8_t
nameval
[
1
];
/* name/value bytes */
}
namelist
;
/* grows from bottom of buf */
struct
xfs_attr_leaf_name_remote
{
xfs_dablk_t
valueblk
;
/* block number of value bytes */
__uint32_t
valuelen
;
/* number of bytes in value */
__uint8_t
namelen
;
/* length of name bytes */
__uint8_t
name
[
1
];
/* name bytes */
}
valuelist
;
/* grows from bottom of buf */
}
xfs_attr_leafblock_t
;
}
xfs_attr_leafblock_t
;
typedef
struct
xfs_attr_leaf_hdr
xfs_attr_leaf_hdr_t
;
typedef
struct
xfs_attr_leaf_map
xfs_attr_leaf_map_t
;
typedef
struct
xfs_attr_leaf_entry
xfs_attr_leaf_entry_t
;
typedef
struct
xfs_attr_leaf_name_local
xfs_attr_leaf_name_local_t
;
typedef
struct
xfs_attr_leaf_name_remote
xfs_attr_leaf_name_remote_t
;
/*
/*
* Flags used in the leaf_entry[i].flags field.
* Flags used in the leaf_entry[i].flags field.
...
@@ -150,7 +156,8 @@ xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
...
@@ -150,7 +156,8 @@ xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
(
leafp
))[
INT_GET
((
leafp
)
->
entries
[
idx
].
nameidx
,
ARCH_CONVERT
)];
(
leafp
))[
INT_GET
((
leafp
)
->
entries
[
idx
].
nameidx
,
ARCH_CONVERT
)];
}
}
#define XFS_ATTR_LEAF_NAME(leafp,idx) xfs_attr_leaf_name(leafp,idx)
#define XFS_ATTR_LEAF_NAME(leafp,idx) \
xfs_attr_leaf_name(leafp,idx)
static
inline
char
*
xfs_attr_leaf_name
(
xfs_attr_leafblock_t
*
leafp
,
int
idx
)
static
inline
char
*
xfs_attr_leaf_name
(
xfs_attr_leafblock_t
*
leafp
,
int
idx
)
{
{
return
(
&
((
char
*
)
return
(
&
((
char
*
)
...
...
fs/xfs/xfs_bmap.c
浏览文件 @
9f5974c8
...
@@ -2146,13 +2146,176 @@ xfs_bmap_add_extent_hole_real(
...
@@ -2146,13 +2146,176 @@ xfs_bmap_add_extent_hole_real(
return
0
;
/* keep gcc quite */
return
0
;
/* keep gcc quite */
}
}
/*
* Adjust the size of the new extent based on di_extsize and rt extsize.
*/
STATIC
int
xfs_bmap_extsize_align
(
xfs_mount_t
*
mp
,
xfs_bmbt_irec_t
*
gotp
,
/* next extent pointer */
xfs_bmbt_irec_t
*
prevp
,
/* previous extent pointer */
xfs_extlen_t
extsz
,
/* align to this extent size */
int
rt
,
/* is this a realtime inode? */
int
eof
,
/* is extent at end-of-file? */
int
delay
,
/* creating delalloc extent? */
int
convert
,
/* overwriting unwritten extent? */
xfs_fileoff_t
*
offp
,
/* in/out: aligned offset */
xfs_extlen_t
*
lenp
)
/* in/out: aligned length */
{
xfs_fileoff_t
orig_off
;
/* original offset */
xfs_extlen_t
orig_alen
;
/* original length */
xfs_fileoff_t
orig_end
;
/* original off+len */
xfs_fileoff_t
nexto
;
/* next file offset */
xfs_fileoff_t
prevo
;
/* previous file offset */
xfs_fileoff_t
align_off
;
/* temp for offset */
xfs_extlen_t
align_alen
;
/* temp for length */
xfs_extlen_t
temp
;
/* temp for calculations */
if
(
convert
)
return
0
;
orig_off
=
align_off
=
*
offp
;
orig_alen
=
align_alen
=
*
lenp
;
orig_end
=
orig_off
+
orig_alen
;
/*
* If this request overlaps an existing extent, then don't
* attempt to perform any additional alignment.
*/
if
(
!
delay
&&
!
eof
&&
(
orig_off
>=
gotp
->
br_startoff
)
&&
(
orig_end
<=
gotp
->
br_startoff
+
gotp
->
br_blockcount
))
{
return
0
;
}
/*
* If the file offset is unaligned vs. the extent size
* we need to align it. This will be possible unless
* the file was previously written with a kernel that didn't
* perform this alignment, or if a truncate shot us in the
* foot.
*/
temp
=
do_mod
(
orig_off
,
extsz
);
if
(
temp
)
{
align_alen
+=
temp
;
align_off
-=
temp
;
}
/*
* Same adjustment for the end of the requested area.
*/
if
((
temp
=
(
align_alen
%
extsz
)))
{
align_alen
+=
extsz
-
temp
;
}
/*
* If the previous block overlaps with this proposed allocation
* then move the start forward without adjusting the length.
*/
if
(
prevp
->
br_startoff
!=
NULLFILEOFF
)
{
if
(
prevp
->
br_startblock
==
HOLESTARTBLOCK
)
prevo
=
prevp
->
br_startoff
;
else
prevo
=
prevp
->
br_startoff
+
prevp
->
br_blockcount
;
}
else
prevo
=
0
;
if
(
align_off
!=
orig_off
&&
align_off
<
prevo
)
align_off
=
prevo
;
/*
* If the next block overlaps with this proposed allocation
* then move the start back without adjusting the length,
* but not before offset 0.
* This may of course make the start overlap previous block,
* and if we hit the offset 0 limit then the next block
* can still overlap too.
*/
if
(
!
eof
&&
gotp
->
br_startoff
!=
NULLFILEOFF
)
{
if
((
delay
&&
gotp
->
br_startblock
==
HOLESTARTBLOCK
)
||
(
!
delay
&&
gotp
->
br_startblock
==
DELAYSTARTBLOCK
))
nexto
=
gotp
->
br_startoff
+
gotp
->
br_blockcount
;
else
nexto
=
gotp
->
br_startoff
;
}
else
nexto
=
NULLFILEOFF
;
if
(
!
eof
&&
align_off
+
align_alen
!=
orig_end
&&
align_off
+
align_alen
>
nexto
)
align_off
=
nexto
>
align_alen
?
nexto
-
align_alen
:
0
;
/*
* If we're now overlapping the next or previous extent that
* means we can't fit an extsz piece in this hole. Just move
* the start forward to the first valid spot and set
* the length so we hit the end.
*/
if
(
align_off
!=
orig_off
&&
align_off
<
prevo
)
align_off
=
prevo
;
if
(
align_off
+
align_alen
!=
orig_end
&&
align_off
+
align_alen
>
nexto
&&
nexto
!=
NULLFILEOFF
)
{
ASSERT
(
nexto
>
prevo
);
align_alen
=
nexto
-
align_off
;
}
/*
* If realtime, and the result isn't a multiple of the realtime
* extent size we need to remove blocks until it is.
*/
if
(
rt
&&
(
temp
=
(
align_alen
%
mp
->
m_sb
.
sb_rextsize
)))
{
/*
* We're not covering the original request, or
* we won't be able to once we fix the length.
*/
if
(
orig_off
<
align_off
||
orig_end
>
align_off
+
align_alen
||
align_alen
-
temp
<
orig_alen
)
return
XFS_ERROR
(
EINVAL
);
/*
* Try to fix it by moving the start up.
*/
if
(
align_off
+
temp
<=
orig_off
)
{
align_alen
-=
temp
;
align_off
+=
temp
;
}
/*
* Try to fix it by moving the end in.
*/
else
if
(
align_off
+
align_alen
-
temp
>=
orig_end
)
align_alen
-=
temp
;
/*
* Set the start to the minimum then trim the length.
*/
else
{
align_alen
-=
orig_off
-
align_off
;
align_off
=
orig_off
;
align_alen
-=
align_alen
%
mp
->
m_sb
.
sb_rextsize
;
}
/*
* Result doesn't cover the request, fail it.
*/
if
(
orig_off
<
align_off
||
orig_end
>
align_off
+
align_alen
)
return
XFS_ERROR
(
EINVAL
);
}
else
{
ASSERT
(
orig_off
>=
align_off
);
ASSERT
(
orig_end
<=
align_off
+
align_alen
);
}
#ifdef DEBUG
if
(
!
eof
&&
gotp
->
br_startoff
!=
NULLFILEOFF
)
ASSERT
(
align_off
+
align_alen
<=
gotp
->
br_startoff
);
if
(
prevp
->
br_startoff
!=
NULLFILEOFF
)
ASSERT
(
align_off
>=
prevp
->
br_startoff
+
prevp
->
br_blockcount
);
#endif
*
lenp
=
align_alen
;
*
offp
=
align_off
;
return
0
;
}
#define XFS_ALLOC_GAP_UNITS 4
#define XFS_ALLOC_GAP_UNITS 4
/*
/*
* xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
* xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
* It figures out where to ask the underlying allocator to put the new extent.
* It figures out where to ask the underlying allocator to put the new extent.
*/
*/
STATIC
int
/* error */
STATIC
int
xfs_bmap_alloc
(
xfs_bmap_alloc
(
xfs_bmalloca_t
*
ap
)
/* bmap alloc argument struct */
xfs_bmalloca_t
*
ap
)
/* bmap alloc argument struct */
{
{
...
@@ -2163,10 +2326,10 @@ xfs_bmap_alloc(
...
@@ -2163,10 +2326,10 @@ xfs_bmap_alloc(
xfs_mount_t
*
mp
;
/* mount point structure */
xfs_mount_t
*
mp
;
/* mount point structure */
int
nullfb
;
/* true if ap->firstblock isn't set */
int
nullfb
;
/* true if ap->firstblock isn't set */
int
rt
;
/* true if inode is realtime */
int
rt
;
/* true if inode is realtime */
#ifdef __KERNEL__
xfs_extlen_t
prod
=
0
;
/* product factor for allocators */
xfs_extlen_t
prod
=
0
;
/* product factor for allocators
*/
xfs_extlen_t
ralen
=
0
;
/* realtime allocation length
*/
xfs_extlen_t
ralen
=
0
;
/* realtime allocation length
*/
xfs_extlen_t
align
;
/* minimum allocation alignment
*/
#endif
xfs_rtblock_t
rtx
;
#define ISVALID(x,y) \
#define ISVALID(x,y) \
(rt ? \
(rt ? \
...
@@ -2182,125 +2345,25 @@ xfs_bmap_alloc(
...
@@ -2182,125 +2345,25 @@ xfs_bmap_alloc(
nullfb
=
ap
->
firstblock
==
NULLFSBLOCK
;
nullfb
=
ap
->
firstblock
==
NULLFSBLOCK
;
rt
=
XFS_IS_REALTIME_INODE
(
ap
->
ip
)
&&
ap
->
userdata
;
rt
=
XFS_IS_REALTIME_INODE
(
ap
->
ip
)
&&
ap
->
userdata
;
fb_agno
=
nullfb
?
NULLAGNUMBER
:
XFS_FSB_TO_AGNO
(
mp
,
ap
->
firstblock
);
fb_agno
=
nullfb
?
NULLAGNUMBER
:
XFS_FSB_TO_AGNO
(
mp
,
ap
->
firstblock
);
#ifdef __KERNEL__
if
(
rt
)
{
if
(
rt
)
{
xfs_extlen_t
extsz
;
/* file extent size for rt */
align
=
ap
->
ip
->
i_d
.
di_extsize
?
xfs_fileoff_t
nexto
;
/* next file offset */
ap
->
ip
->
i_d
.
di_extsize
:
mp
->
m_sb
.
sb_rextsize
;
xfs_extlen_t
orig_alen
;
/* original ap->alen */
/* Set prod to match the extent size */
xfs_fileoff_t
orig_end
;
/* original off+len */
prod
=
align
/
mp
->
m_sb
.
sb_rextsize
;
xfs_fileoff_t
orig_off
;
/* original ap->off */
xfs_extlen_t
mod_off
;
/* modulus calculations */
error
=
xfs_bmap_extsize_align
(
mp
,
ap
->
gotp
,
ap
->
prevp
,
xfs_fileoff_t
prevo
;
/* previous file offset */
align
,
rt
,
ap
->
eof
,
0
,
xfs_rtblock_t
rtx
;
/* realtime extent number */
ap
->
conv
,
&
ap
->
off
,
&
ap
->
alen
);
xfs_extlen_t
temp
;
/* temp for rt calculations */
if
(
error
)
return
error
;
/*
ASSERT
(
ap
->
alen
);
* Set prod to match the realtime extent size.
*/
if
(
!
(
extsz
=
ap
->
ip
->
i_d
.
di_extsize
))
extsz
=
mp
->
m_sb
.
sb_rextsize
;
prod
=
extsz
/
mp
->
m_sb
.
sb_rextsize
;
orig_off
=
ap
->
off
;
orig_alen
=
ap
->
alen
;
orig_end
=
orig_off
+
orig_alen
;
/*
* If the file offset is unaligned vs. the extent size
* we need to align it. This will be possible unless
* the file was previously written with a kernel that didn't
* perform this alignment.
*/
mod_off
=
do_mod
(
orig_off
,
extsz
);
if
(
mod_off
)
{
ap
->
alen
+=
mod_off
;
ap
->
off
-=
mod_off
;
}
/*
* Same adjustment for the end of the requested area.
*/
if
((
temp
=
(
ap
->
alen
%
extsz
)))
ap
->
alen
+=
extsz
-
temp
;
/*
* If the previous block overlaps with this proposed allocation
* then move the start forward without adjusting the length.
*/
prevo
=
ap
->
prevp
->
br_startoff
==
NULLFILEOFF
?
0
:
(
ap
->
prevp
->
br_startoff
+
ap
->
prevp
->
br_blockcount
);
if
(
ap
->
off
!=
orig_off
&&
ap
->
off
<
prevo
)
ap
->
off
=
prevo
;
/*
* If the next block overlaps with this proposed allocation
* then move the start back without adjusting the length,
* but not before offset 0.
* This may of course make the start overlap previous block,
* and if we hit the offset 0 limit then the next block
* can still overlap too.
*/
nexto
=
(
ap
->
eof
||
ap
->
gotp
->
br_startoff
==
NULLFILEOFF
)
?
NULLFILEOFF
:
ap
->
gotp
->
br_startoff
;
if
(
!
ap
->
eof
&&
ap
->
off
+
ap
->
alen
!=
orig_end
&&
ap
->
off
+
ap
->
alen
>
nexto
)
ap
->
off
=
nexto
>
ap
->
alen
?
nexto
-
ap
->
alen
:
0
;
/*
* If we're now overlapping the next or previous extent that
* means we can't fit an extsz piece in this hole. Just move
* the start forward to the first valid spot and set
* the length so we hit the end.
*/
if
((
ap
->
off
!=
orig_off
&&
ap
->
off
<
prevo
)
||
(
ap
->
off
+
ap
->
alen
!=
orig_end
&&
ap
->
off
+
ap
->
alen
>
nexto
))
{
ap
->
off
=
prevo
;
ap
->
alen
=
nexto
-
prevo
;
}
/*
* If the result isn't a multiple of rtextents we need to
* remove blocks until it is.
*/
if
((
temp
=
(
ap
->
alen
%
mp
->
m_sb
.
sb_rextsize
)))
{
/*
* We're not covering the original request, or
* we won't be able to once we fix the length.
*/
if
(
orig_off
<
ap
->
off
||
orig_end
>
ap
->
off
+
ap
->
alen
||
ap
->
alen
-
temp
<
orig_alen
)
return
XFS_ERROR
(
EINVAL
);
/*
* Try to fix it by moving the start up.
*/
if
(
ap
->
off
+
temp
<=
orig_off
)
{
ap
->
alen
-=
temp
;
ap
->
off
+=
temp
;
}
/*
* Try to fix it by moving the end in.
*/
else
if
(
ap
->
off
+
ap
->
alen
-
temp
>=
orig_end
)
ap
->
alen
-=
temp
;
/*
* Set the start to the minimum then trim the length.
*/
else
{
ap
->
alen
-=
orig_off
-
ap
->
off
;
ap
->
off
=
orig_off
;
ap
->
alen
-=
ap
->
alen
%
mp
->
m_sb
.
sb_rextsize
;
}
/*
* Result doesn't cover the request, fail it.
*/
if
(
orig_off
<
ap
->
off
||
orig_end
>
ap
->
off
+
ap
->
alen
)
return
XFS_ERROR
(
EINVAL
);
}
ASSERT
(
ap
->
alen
%
mp
->
m_sb
.
sb_rextsize
==
0
);
ASSERT
(
ap
->
alen
%
mp
->
m_sb
.
sb_rextsize
==
0
);
/*
/*
* If the offset & length are not perfectly aligned
* If the offset & length are not perfectly aligned
* then kill prod, it will just get us in trouble.
* then kill prod, it will just get us in trouble.
*/
*/
if
(
do_mod
(
ap
->
off
,
extsz
)
||
ap
->
alen
%
extsz
)
if
(
do_mod
(
ap
->
off
,
align
)
||
ap
->
alen
%
align
)
prod
=
1
;
prod
=
1
;
/*
/*
* Set ralen to be the actual requested length in rtextents.
* Set ralen to be the actual requested length in rtextents.
...
@@ -2326,15 +2389,24 @@ xfs_bmap_alloc(
...
@@ -2326,15 +2389,24 @@ xfs_bmap_alloc(
ap
->
rval
=
rtx
*
mp
->
m_sb
.
sb_rextsize
;
ap
->
rval
=
rtx
*
mp
->
m_sb
.
sb_rextsize
;
}
else
}
else
ap
->
rval
=
0
;
ap
->
rval
=
0
;
}
else
{
align
=
(
ap
->
userdata
&&
ap
->
ip
->
i_d
.
di_extsize
&&
(
ap
->
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_EXTSIZE
))
?
ap
->
ip
->
i_d
.
di_extsize
:
0
;
if
(
unlikely
(
align
))
{
error
=
xfs_bmap_extsize_align
(
mp
,
ap
->
gotp
,
ap
->
prevp
,
align
,
rt
,
ap
->
eof
,
0
,
ap
->
conv
,
&
ap
->
off
,
&
ap
->
alen
);
ASSERT
(
!
error
);
ASSERT
(
ap
->
alen
);
}
if
(
nullfb
)
ap
->
rval
=
XFS_INO_TO_FSB
(
mp
,
ap
->
ip
->
i_ino
);
else
ap
->
rval
=
ap
->
firstblock
;
}
}
#else
if
(
rt
)
ap
->
rval
=
0
;
#endif
/* __KERNEL__ */
else
if
(
nullfb
)
ap
->
rval
=
XFS_INO_TO_FSB
(
mp
,
ap
->
ip
->
i_ino
);
else
ap
->
rval
=
ap
->
firstblock
;
/*
/*
* If allocating at eof, and there's a previous real block,
* If allocating at eof, and there's a previous real block,
* try to use it's last block as our starting point.
* try to use it's last block as our starting point.
...
@@ -2598,11 +2670,12 @@ xfs_bmap_alloc(
...
@@ -2598,11 +2670,12 @@ xfs_bmap_alloc(
args
.
total
=
ap
->
total
;
args
.
total
=
ap
->
total
;
args
.
minlen
=
ap
->
minlen
;
args
.
minlen
=
ap
->
minlen
;
}
}
if
(
ap
->
ip
->
i_d
.
di_extsize
)
{
if
(
unlikely
(
ap
->
userdata
&&
ap
->
ip
->
i_d
.
di_extsize
&&
(
ap
->
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_EXTSIZE
)))
{
args
.
prod
=
ap
->
ip
->
i_d
.
di_extsize
;
args
.
prod
=
ap
->
ip
->
i_d
.
di_extsize
;
if
((
args
.
mod
=
(
xfs_extlen_t
)
do_mod
(
ap
->
off
,
args
.
prod
)))
if
((
args
.
mod
=
(
xfs_extlen_t
)
do_mod
(
ap
->
off
,
args
.
prod
)))
args
.
mod
=
(
xfs_extlen_t
)(
args
.
prod
-
args
.
mod
);
args
.
mod
=
(
xfs_extlen_t
)(
args
.
prod
-
args
.
mod
);
}
else
if
(
mp
->
m_sb
.
sb_blocksize
>=
NBPP
)
{
}
else
if
(
unlikely
(
mp
->
m_sb
.
sb_blocksize
>=
NBPP
)
)
{
args
.
prod
=
1
;
args
.
prod
=
1
;
args
.
mod
=
0
;
args
.
mod
=
0
;
}
else
{
}
else
{
...
@@ -3580,14 +3653,16 @@ xfs_bmap_search_extents(
...
@@ -3580,14 +3653,16 @@ xfs_bmap_search_extents(
ep
=
xfs_bmap_do_search_extents
(
base
,
lastx
,
nextents
,
bno
,
eofp
,
ep
=
xfs_bmap_do_search_extents
(
base
,
lastx
,
nextents
,
bno
,
eofp
,
lastxp
,
gotp
,
prevp
);
lastxp
,
gotp
,
prevp
);
rt
=
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
;
rt
=
(
whichfork
==
XFS_DATA_FORK
)
&&
XFS_IS_REALTIME_INODE
(
ip
)
;
if
(
!
rt
&&
!
gotp
->
br_startblock
&&
(
*
lastxp
!=
NULLEXTNUM
))
{
if
(
unlikely
(
!
rt
&&
!
gotp
->
br_startblock
&&
(
*
lastxp
!=
NULLEXTNUM
)
))
{
cmn_err
(
CE_PANIC
,
"Access to block zero: fs: <%s> inode: %lld "
cmn_err
(
CE_PANIC
,
"Access to block zero: fs: <%s> inode: %lld "
"start_block : %llx start_off : %llx blkcnt : %llx "
"start_block : %llx start_off : %llx blkcnt : %llx "
"extent-state : %x
\n
"
,
"extent-state : %x
\n
"
,
(
ip
->
i_mount
)
->
m_fsname
,(
long
long
)
ip
->
i_ino
,
(
ip
->
i_mount
)
->
m_fsname
,
(
long
long
)
ip
->
i_ino
,
gotp
->
br_startblock
,
gotp
->
br_startoff
,
(
unsigned
long
long
)
gotp
->
br_startblock
,
gotp
->
br_blockcount
,
gotp
->
br_state
);
(
unsigned
long
long
)
gotp
->
br_startoff
,
(
unsigned
long
long
)
gotp
->
br_blockcount
,
gotp
->
br_state
);
}
}
return
ep
;
return
ep
;
}
}
...
@@ -3875,7 +3950,7 @@ xfs_bmap_add_attrfork(
...
@@ -3875,7 +3950,7 @@ xfs_bmap_add_attrfork(
ip
->
i_d
.
di_forkoff
=
xfs_attr_shortform_bytesfit
(
ip
,
size
);
ip
->
i_d
.
di_forkoff
=
xfs_attr_shortform_bytesfit
(
ip
,
size
);
if
(
!
ip
->
i_d
.
di_forkoff
)
if
(
!
ip
->
i_d
.
di_forkoff
)
ip
->
i_d
.
di_forkoff
=
mp
->
m_attroffset
>>
3
;
ip
->
i_d
.
di_forkoff
=
mp
->
m_attroffset
>>
3
;
else
if
(
!
(
mp
->
m_flags
&
XFS_MOUNT_COMPAT_ATTR
)
)
else
if
(
mp
->
m_flags
&
XFS_MOUNT_ATTR2
)
version
=
2
;
version
=
2
;
break
;
break
;
default:
default:
...
@@ -4023,13 +4098,13 @@ xfs_bmap_compute_maxlevels(
...
@@ -4023,13 +4098,13 @@ xfs_bmap_compute_maxlevels(
*/
*/
if
(
whichfork
==
XFS_DATA_FORK
)
{
if
(
whichfork
==
XFS_DATA_FORK
)
{
maxleafents
=
MAXEXTNUM
;
maxleafents
=
MAXEXTNUM
;
sz
=
(
mp
->
m_flags
&
XFS_MOUNT_
COMPAT_ATTR
)
?
sz
=
(
mp
->
m_flags
&
XFS_MOUNT_
ATTR2
)
?
mp
->
m_attroffset
:
XFS_BMDR_SPACE_CALC
(
MINDBTPTRS
)
;
XFS_BMDR_SPACE_CALC
(
MINDBTPTRS
)
:
mp
->
m_attroffset
;
}
else
{
}
else
{
maxleafents
=
MAXAEXTNUM
;
maxleafents
=
MAXAEXTNUM
;
sz
=
(
mp
->
m_flags
&
XFS_MOUNT_
COMPAT_ATTR
)
?
sz
=
(
mp
->
m_flags
&
XFS_MOUNT_
ATTR2
)
?
mp
->
m_sb
.
sb_inodesize
-
mp
->
m_attroffset
:
XFS_BMDR_SPACE_CALC
(
MINABTPTRS
)
:
XFS_BMDR_SPACE_CALC
(
MINABTPTRS
)
;
mp
->
m_sb
.
sb_inodesize
-
mp
->
m_attroffset
;
}
}
maxrootrecs
=
(
int
)
XFS_BTREE_BLOCK_MAXRECS
(
sz
,
xfs_bmdr
,
0
);
maxrootrecs
=
(
int
)
XFS_BTREE_BLOCK_MAXRECS
(
sz
,
xfs_bmdr
,
0
);
minleafrecs
=
mp
->
m_bmap_dmnr
[
0
];
minleafrecs
=
mp
->
m_bmap_dmnr
[
0
];
...
@@ -4418,8 +4493,8 @@ xfs_bmap_read_extents(
...
@@ -4418,8 +4493,8 @@ xfs_bmap_read_extents(
num_recs
=
be16_to_cpu
(
block
->
bb_numrecs
);
num_recs
=
be16_to_cpu
(
block
->
bb_numrecs
);
if
(
unlikely
(
i
+
num_recs
>
room
))
{
if
(
unlikely
(
i
+
num_recs
>
room
))
{
ASSERT
(
i
+
num_recs
<=
room
);
ASSERT
(
i
+
num_recs
<=
room
);
xfs_fs_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
xfs_fs_
repair_
cmn_err
(
CE_WARN
,
ip
->
i_mount
,
"corrupt dinode %Lu, (btree extents).
Unmount and run xfs_repair.
"
,
"corrupt dinode %Lu, (btree extents)."
,
(
unsigned
long
long
)
ip
->
i_ino
);
(
unsigned
long
long
)
ip
->
i_ino
);
XFS_ERROR_REPORT
(
"xfs_bmap_read_extents(1)"
,
XFS_ERROR_REPORT
(
"xfs_bmap_read_extents(1)"
,
XFS_ERRLEVEL_LOW
,
XFS_ERRLEVEL_LOW
,
...
@@ -4590,6 +4665,7 @@ xfs_bmapi(
...
@@ -4590,6 +4665,7 @@ xfs_bmapi(
char
contig
;
/* allocation must be one extent */
char
contig
;
/* allocation must be one extent */
char
delay
;
/* this request is for delayed alloc */
char
delay
;
/* this request is for delayed alloc */
char
exact
;
/* don't do all of wasdelayed extent */
char
exact
;
/* don't do all of wasdelayed extent */
char
convert
;
/* unwritten extent I/O completion */
xfs_bmbt_rec_t
*
ep
;
/* extent list entry pointer */
xfs_bmbt_rec_t
*
ep
;
/* extent list entry pointer */
int
error
;
/* error return */
int
error
;
/* error return */
xfs_bmbt_irec_t
got
;
/* current extent list record */
xfs_bmbt_irec_t
got
;
/* current extent list record */
...
@@ -4643,7 +4719,7 @@ xfs_bmapi(
...
@@ -4643,7 +4719,7 @@ xfs_bmapi(
}
}
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
return
XFS_ERROR
(
EIO
);
return
XFS_ERROR
(
EIO
);
rt
=
XFS_IS_REALTIME_INODE
(
ip
);
rt
=
(
whichfork
==
XFS_DATA_FORK
)
&&
XFS_IS_REALTIME_INODE
(
ip
);
ifp
=
XFS_IFORK_PTR
(
ip
,
whichfork
);
ifp
=
XFS_IFORK_PTR
(
ip
,
whichfork
);
ASSERT
(
ifp
->
if_ext_max
==
ASSERT
(
ifp
->
if_ext_max
==
XFS_IFORK_SIZE
(
ip
,
whichfork
)
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
));
XFS_IFORK_SIZE
(
ip
,
whichfork
)
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
));
...
@@ -4654,6 +4730,7 @@ xfs_bmapi(
...
@@ -4654,6 +4730,7 @@ xfs_bmapi(
delay
=
(
flags
&
XFS_BMAPI_DELAY
)
!=
0
;
delay
=
(
flags
&
XFS_BMAPI_DELAY
)
!=
0
;
trim
=
(
flags
&
XFS_BMAPI_ENTIRE
)
==
0
;
trim
=
(
flags
&
XFS_BMAPI_ENTIRE
)
==
0
;
userdata
=
(
flags
&
XFS_BMAPI_METADATA
)
==
0
;
userdata
=
(
flags
&
XFS_BMAPI_METADATA
)
==
0
;
convert
=
(
flags
&
XFS_BMAPI_CONVERT
)
!=
0
;
exact
=
(
flags
&
XFS_BMAPI_EXACT
)
!=
0
;
exact
=
(
flags
&
XFS_BMAPI_EXACT
)
!=
0
;
rsvd
=
(
flags
&
XFS_BMAPI_RSVBLOCKS
)
!=
0
;
rsvd
=
(
flags
&
XFS_BMAPI_RSVBLOCKS
)
!=
0
;
contig
=
(
flags
&
XFS_BMAPI_CONTIG
)
!=
0
;
contig
=
(
flags
&
XFS_BMAPI_CONTIG
)
!=
0
;
...
@@ -4748,15 +4825,25 @@ xfs_bmapi(
...
@@ -4748,15 +4825,25 @@ xfs_bmapi(
}
}
minlen
=
contig
?
alen
:
1
;
minlen
=
contig
?
alen
:
1
;
if
(
delay
)
{
if
(
delay
)
{
xfs_extlen_t
extsz
=
0
;
xfs_extlen_t
extsz
;
/* Figure out the extent size, adjust alen */
/* Figure out the extent size, adjust alen */
if
(
rt
)
{
if
(
rt
)
{
if
(
!
(
extsz
=
ip
->
i_d
.
di_extsize
))
if
(
!
(
extsz
=
ip
->
i_d
.
di_extsize
))
extsz
=
mp
->
m_sb
.
sb_rextsize
;
extsz
=
mp
->
m_sb
.
sb_rextsize
;
alen
=
roundup
(
alen
,
extsz
);
}
else
{
extsz
=
alen
/
mp
->
m_sb
.
sb_r
extsize
;
extsz
=
ip
->
i_d
.
di_
extsize
;
}
}
if
(
extsz
)
{
error
=
xfs_bmap_extsize_align
(
mp
,
&
got
,
&
prev
,
extsz
,
rt
,
eof
,
delay
,
convert
,
&
aoff
,
&
alen
);
ASSERT
(
!
error
);
}
if
(
rt
)
extsz
=
alen
/
mp
->
m_sb
.
sb_rextsize
;
/*
/*
* Make a transaction-less quota reservation for
* Make a transaction-less quota reservation for
...
@@ -4785,32 +4872,33 @@ xfs_bmapi(
...
@@ -4785,32 +4872,33 @@ xfs_bmapi(
xfs_bmap_worst_indlen
(
ip
,
alen
);
xfs_bmap_worst_indlen
(
ip
,
alen
);
ASSERT
(
indlen
>
0
);
ASSERT
(
indlen
>
0
);
if
(
rt
)
if
(
rt
)
{
error
=
xfs_mod_incore_sb
(
mp
,
error
=
xfs_mod_incore_sb
(
mp
,
XFS_SBS_FREXTENTS
,
XFS_SBS_FREXTENTS
,
-
(
extsz
),
rsvd
);
-
(
extsz
),
rsvd
);
else
}
else
{
error
=
xfs_mod_incore_sb
(
mp
,
error
=
xfs_mod_incore_sb
(
mp
,
XFS_SBS_FDBLOCKS
,
XFS_SBS_FDBLOCKS
,
-
(
alen
),
rsvd
);
-
(
alen
),
rsvd
);
}
if
(
!
error
)
{
if
(
!
error
)
{
error
=
xfs_mod_incore_sb
(
mp
,
error
=
xfs_mod_incore_sb
(
mp
,
XFS_SBS_FDBLOCKS
,
XFS_SBS_FDBLOCKS
,
-
(
indlen
),
rsvd
);
-
(
indlen
),
rsvd
);
if
(
error
&&
rt
)
{
if
(
error
&&
rt
)
xfs_mod_incore_sb
(
ip
->
i_mount
,
xfs_mod_incore_sb
(
mp
,
XFS_SBS_FREXTENTS
,
XFS_SBS_FREXTENTS
,
extsz
,
rsvd
);
extsz
,
rsvd
);
}
else
if
(
error
)
{
else
if
(
error
)
xfs_mod_incore_sb
(
ip
->
i_mount
,
xfs_mod_incore_sb
(
mp
,
XFS_SBS_FDBLOCKS
,
XFS_SBS_FDBLOCKS
,
alen
,
rsvd
);
alen
,
rsvd
);
}
}
}
if
(
error
)
{
if
(
error
)
{
if
(
XFS_IS_QUOTA_ON
(
ip
->
i_mount
))
if
(
XFS_IS_QUOTA_ON
(
mp
))
/* unreserve the blocks now */
/* unreserve the blocks now */
(
void
)
XFS_TRANS_UNRESERVE_QUOTA_NBLKS
(
XFS_TRANS_UNRESERVE_QUOTA_NBLKS
(
mp
,
NULL
,
ip
,
mp
,
NULL
,
ip
,
(
long
)
alen
,
0
,
rt
?
(
long
)
alen
,
0
,
rt
?
...
@@ -4849,6 +4937,7 @@ xfs_bmapi(
...
@@ -4849,6 +4937,7 @@ xfs_bmapi(
bma
.
firstblock
=
*
firstblock
;
bma
.
firstblock
=
*
firstblock
;
bma
.
alen
=
alen
;
bma
.
alen
=
alen
;
bma
.
off
=
aoff
;
bma
.
off
=
aoff
;
bma
.
conv
=
convert
;
bma
.
wasdel
=
wasdelay
;
bma
.
wasdel
=
wasdelay
;
bma
.
minlen
=
minlen
;
bma
.
minlen
=
minlen
;
bma
.
low
=
flist
->
xbf_low
;
bma
.
low
=
flist
->
xbf_low
;
...
@@ -5270,8 +5359,7 @@ xfs_bunmapi(
...
@@ -5270,8 +5359,7 @@ xfs_bunmapi(
return
0
;
return
0
;
}
}
XFS_STATS_INC
(
xs_blk_unmap
);
XFS_STATS_INC
(
xs_blk_unmap
);
isrt
=
(
whichfork
==
XFS_DATA_FORK
)
&&
isrt
=
(
whichfork
==
XFS_DATA_FORK
)
&&
XFS_IS_REALTIME_INODE
(
ip
);
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
);
start
=
bno
;
start
=
bno
;
bno
=
start
+
len
-
1
;
bno
=
start
+
len
-
1
;
ep
=
xfs_bmap_search_extents
(
ip
,
bno
,
whichfork
,
&
eof
,
&
lastx
,
&
got
,
ep
=
xfs_bmap_search_extents
(
ip
,
bno
,
whichfork
,
&
eof
,
&
lastx
,
&
got
,
...
@@ -5443,7 +5531,7 @@ xfs_bunmapi(
...
@@ -5443,7 +5531,7 @@ xfs_bunmapi(
}
}
if
(
wasdel
)
{
if
(
wasdel
)
{
ASSERT
(
STARTBLOCKVAL
(
del
.
br_startblock
)
>
0
);
ASSERT
(
STARTBLOCKVAL
(
del
.
br_startblock
)
>
0
);
/* Update realtim/data freespace, unreserve quota */
/* Update realtim
e
/data freespace, unreserve quota */
if
(
isrt
)
{
if
(
isrt
)
{
xfs_filblks_t
rtexts
;
xfs_filblks_t
rtexts
;
...
@@ -5451,14 +5539,14 @@ xfs_bunmapi(
...
@@ -5451,14 +5539,14 @@ xfs_bunmapi(
do_div
(
rtexts
,
mp
->
m_sb
.
sb_rextsize
);
do_div
(
rtexts
,
mp
->
m_sb
.
sb_rextsize
);
xfs_mod_incore_sb
(
mp
,
XFS_SBS_FREXTENTS
,
xfs_mod_incore_sb
(
mp
,
XFS_SBS_FREXTENTS
,
(
int
)
rtexts
,
rsvd
);
(
int
)
rtexts
,
rsvd
);
XFS_TRANS_RESERVE_QUOTA_NBLKS
(
mp
,
NULL
,
i
p
,
(
void
)
XFS_TRANS_RESERVE_QUOTA_NBLKS
(
m
p
,
-
((
long
)
del
.
br_blockcount
),
0
,
NULL
,
ip
,
-
((
long
)
del
.
br_blockcount
),
0
,
XFS_QMOPT_RES_RTBLKS
);
XFS_QMOPT_RES_RTBLKS
);
}
else
{
}
else
{
xfs_mod_incore_sb
(
mp
,
XFS_SBS_FDBLOCKS
,
xfs_mod_incore_sb
(
mp
,
XFS_SBS_FDBLOCKS
,
(
int
)
del
.
br_blockcount
,
rsvd
);
(
int
)
del
.
br_blockcount
,
rsvd
);
XFS_TRANS_RESERVE_QUOTA_NBLKS
(
mp
,
NULL
,
i
p
,
(
void
)
XFS_TRANS_RESERVE_QUOTA_NBLKS
(
m
p
,
-
((
long
)
del
.
br_blockcount
),
0
,
NULL
,
ip
,
-
((
long
)
del
.
br_blockcount
),
0
,
XFS_QMOPT_RES_REGBLKS
);
XFS_QMOPT_RES_REGBLKS
);
}
}
ip
->
i_delayed_blks
-=
del
.
br_blockcount
;
ip
->
i_delayed_blks
-=
del
.
br_blockcount
;
...
@@ -5652,7 +5740,9 @@ xfs_getbmap(
...
@@ -5652,7 +5740,9 @@ xfs_getbmap(
ip
->
i_d
.
di_format
!=
XFS_DINODE_FMT_LOCAL
)
ip
->
i_d
.
di_format
!=
XFS_DINODE_FMT_LOCAL
)
return
XFS_ERROR
(
EINVAL
);
return
XFS_ERROR
(
EINVAL
);
if
(
whichfork
==
XFS_DATA_FORK
)
{
if
(
whichfork
==
XFS_DATA_FORK
)
{
if
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_PREALLOC
)
{
if
((
ip
->
i_d
.
di_extsize
&&
(
ip
->
i_d
.
di_flags
&
(
XFS_DIFLAG_REALTIME
|
XFS_DIFLAG_EXTSIZE
)))
||
ip
->
i_d
.
di_flags
&
(
XFS_DIFLAG_PREALLOC
|
XFS_DIFLAG_APPEND
)){
prealloced
=
1
;
prealloced
=
1
;
fixlen
=
XFS_MAXIOFFSET
(
mp
);
fixlen
=
XFS_MAXIOFFSET
(
mp
);
}
else
{
}
else
{
...
...
fs/xfs/xfs_bmap.h
浏览文件 @
9f5974c8
...
@@ -62,6 +62,10 @@ typedef struct xfs_bmap_free
...
@@ -62,6 +62,10 @@ typedef struct xfs_bmap_free
#define XFS_BMAPI_IGSTATE 0x200
/* Ignore state - */
#define XFS_BMAPI_IGSTATE 0x200
/* Ignore state - */
/* combine contig. space */
/* combine contig. space */
#define XFS_BMAPI_CONTIG 0x400
/* must allocate only one extent */
#define XFS_BMAPI_CONTIG 0x400
/* must allocate only one extent */
/* XFS_BMAPI_DIRECT_IO 0x800 */
#define XFS_BMAPI_CONVERT 0x1000
/* unwritten extent conversion - */
/* need write cache flushing and no */
/* additional allocation alignments */
#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w)
#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w)
static
inline
int
xfs_bmapi_aflag
(
int
w
)
static
inline
int
xfs_bmapi_aflag
(
int
w
)
...
@@ -101,7 +105,8 @@ typedef struct xfs_bmalloca {
...
@@ -101,7 +105,8 @@ typedef struct xfs_bmalloca {
char
wasdel
;
/* replacing a delayed allocation */
char
wasdel
;
/* replacing a delayed allocation */
char
userdata
;
/* set if is user data */
char
userdata
;
/* set if is user data */
char
low
;
/* low on space, using seq'l ags */
char
low
;
/* low on space, using seq'l ags */
char
aeof
;
/* allocated space at eof */
char
aeof
;
/* allocated space at eof */
char
conv
;
/* overwriting unwritten extents */
}
xfs_bmalloca_t
;
}
xfs_bmalloca_t
;
#ifdef __KERNEL__
#ifdef __KERNEL__
...
...
fs/xfs/xfs_clnt.h
浏览文件 @
9f5974c8
...
@@ -57,7 +57,7 @@ struct xfs_mount_args {
...
@@ -57,7 +57,7 @@ struct xfs_mount_args {
/*
/*
* XFS mount option flags -- args->flags1
* XFS mount option flags -- args->flags1
*/
*/
#define XFSMNT_
COMPAT_ATTR 0x00000001
/* do not use ATTR2
format */
#define XFSMNT_
ATTR2 0x00000001
/* allow ATTR2 EA
format */
#define XFSMNT_WSYNC 0x00000002
/* safe mode nfs mount
#define XFSMNT_WSYNC 0x00000002
/* safe mode nfs mount
* compatible */
* compatible */
#define XFSMNT_INO64 0x00000004
/* move inode numbers up
#define XFSMNT_INO64 0x00000004
/* move inode numbers up
...
...
fs/xfs/xfs_dfrag.c
浏览文件 @
9f5974c8
...
@@ -60,8 +60,6 @@ xfs_swapext(
...
@@ -60,8 +60,6 @@ xfs_swapext(
xfs_bstat_t
*
sbp
;
xfs_bstat_t
*
sbp
;
struct
file
*
fp
=
NULL
,
*
tfp
=
NULL
;
struct
file
*
fp
=
NULL
,
*
tfp
=
NULL
;
vnode_t
*
vp
,
*
tvp
;
vnode_t
*
vp
,
*
tvp
;
bhv_desc_t
*
bdp
,
*
tbdp
;
vn_bhv_head_t
*
bhp
,
*
tbhp
;
static
uint
lock_flags
=
XFS_ILOCK_EXCL
|
XFS_IOLOCK_EXCL
;
static
uint
lock_flags
=
XFS_ILOCK_EXCL
|
XFS_IOLOCK_EXCL
;
int
ilf_fields
,
tilf_fields
;
int
ilf_fields
,
tilf_fields
;
int
error
=
0
;
int
error
=
0
;
...
@@ -90,13 +88,10 @@ xfs_swapext(
...
@@ -90,13 +88,10 @@ xfs_swapext(
goto
error0
;
goto
error0
;
}
}
bhp
=
VN_BHV_HEAD
(
vp
);
ip
=
xfs_vtoi
(
vp
);
bdp
=
vn_bhv_lookup
(
bhp
,
&
xfs_vnodeops
);
if
(
ip
==
NULL
)
{
if
(
bdp
==
NULL
)
{
error
=
XFS_ERROR
(
EBADF
);
error
=
XFS_ERROR
(
EBADF
);
goto
error0
;
goto
error0
;
}
else
{
ip
=
XFS_BHVTOI
(
bdp
);
}
}
if
(((
tfp
=
fget
((
int
)
sxp
->
sx_fdtmp
))
==
NULL
)
||
if
(((
tfp
=
fget
((
int
)
sxp
->
sx_fdtmp
))
==
NULL
)
||
...
@@ -105,13 +100,10 @@ xfs_swapext(
...
@@ -105,13 +100,10 @@ xfs_swapext(
goto
error0
;
goto
error0
;
}
}
tbhp
=
VN_BHV_HEAD
(
tvp
);
tip
=
xfs_vtoi
(
tvp
);
tbdp
=
vn_bhv_lookup
(
tbhp
,
&
xfs_vnodeops
);
if
(
tip
==
NULL
)
{
if
(
tbdp
==
NULL
)
{
error
=
XFS_ERROR
(
EBADF
);
error
=
XFS_ERROR
(
EBADF
);
goto
error0
;
goto
error0
;
}
else
{
tip
=
XFS_BHVTOI
(
tbdp
);
}
}
if
(
ip
->
i_mount
!=
tip
->
i_mount
)
{
if
(
ip
->
i_mount
!=
tip
->
i_mount
)
{
...
...
fs/xfs/xfs_dinode.h
浏览文件 @
9f5974c8
...
@@ -199,10 +199,16 @@ typedef enum xfs_dinode_fmt
...
@@ -199,10 +199,16 @@ typedef enum xfs_dinode_fmt
#define XFS_DFORK_DSIZE(dip,mp) \
#define XFS_DFORK_DSIZE(dip,mp) \
XFS_CFORK_DSIZE_DISK(&(dip)->di_core, mp)
XFS_CFORK_DSIZE_DISK(&(dip)->di_core, mp)
#define XFS_DFORK_DSIZE_HOST(dip,mp) \
XFS_CFORK_DSIZE(&(dip)->di_core, mp)
#define XFS_DFORK_ASIZE(dip,mp) \
#define XFS_DFORK_ASIZE(dip,mp) \
XFS_CFORK_ASIZE_DISK(&(dip)->di_core, mp)
XFS_CFORK_ASIZE_DISK(&(dip)->di_core, mp)
#define XFS_DFORK_ASIZE_HOST(dip,mp) \
XFS_CFORK_ASIZE(&(dip)->di_core, mp)
#define XFS_DFORK_SIZE(dip,mp,w) \
#define XFS_DFORK_SIZE(dip,mp,w) \
XFS_CFORK_SIZE_DISK(&(dip)->di_core, mp, w)
XFS_CFORK_SIZE_DISK(&(dip)->di_core, mp, w)
#define XFS_DFORK_SIZE_HOST(dip,mp,w) \
XFS_CFORK_SIZE(&(dip)->di_core, mp, w)
#define XFS_DFORK_Q(dip) XFS_CFORK_Q_DISK(&(dip)->di_core)
#define XFS_DFORK_Q(dip) XFS_CFORK_Q_DISK(&(dip)->di_core)
#define XFS_DFORK_BOFF(dip) XFS_CFORK_BOFF_DISK(&(dip)->di_core)
#define XFS_DFORK_BOFF(dip) XFS_CFORK_BOFF_DISK(&(dip)->di_core)
...
@@ -216,6 +222,7 @@ typedef enum xfs_dinode_fmt
...
@@ -216,6 +222,7 @@ typedef enum xfs_dinode_fmt
#define XFS_CFORK_FMT_SET(dcp,w,n) \
#define XFS_CFORK_FMT_SET(dcp,w,n) \
((w) == XFS_DATA_FORK ? \
((w) == XFS_DATA_FORK ? \
((dcp)->di_format = (n)) : ((dcp)->di_aformat = (n)))
((dcp)->di_format = (n)) : ((dcp)->di_aformat = (n)))
#define XFS_DFORK_FORMAT(dip,w) XFS_CFORK_FORMAT(&(dip)->di_core, w)
#define XFS_CFORK_NEXTENTS_DISK(dcp,w) \
#define XFS_CFORK_NEXTENTS_DISK(dcp,w) \
((w) == XFS_DATA_FORK ? \
((w) == XFS_DATA_FORK ? \
...
@@ -223,13 +230,13 @@ typedef enum xfs_dinode_fmt
...
@@ -223,13 +230,13 @@ typedef enum xfs_dinode_fmt
INT_GET((dcp)->di_anextents, ARCH_CONVERT))
INT_GET((dcp)->di_anextents, ARCH_CONVERT))
#define XFS_CFORK_NEXTENTS(dcp,w) \
#define XFS_CFORK_NEXTENTS(dcp,w) \
((w) == XFS_DATA_FORK ? (dcp)->di_nextents : (dcp)->di_anextents)
((w) == XFS_DATA_FORK ? (dcp)->di_nextents : (dcp)->di_anextents)
#define XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w)
#define XFS_DFORK_NEXTENTS_HOST(dip,w) XFS_CFORK_NEXTENTS(&(dip)->di_core, w)
#define XFS_CFORK_NEXT_SET(dcp,w,n) \
#define XFS_CFORK_NEXT_SET(dcp,w,n) \
((w) == XFS_DATA_FORK ? \
((w) == XFS_DATA_FORK ? \
((dcp)->di_nextents = (n)) : ((dcp)->di_anextents = (n)))
((dcp)->di_nextents = (n)) : ((dcp)->di_anextents = (n)))
#define XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w)
#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp))
#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp))
/*
/*
...
@@ -246,8 +253,10 @@ typedef enum xfs_dinode_fmt
...
@@ -246,8 +253,10 @@ typedef enum xfs_dinode_fmt
#define XFS_DIFLAG_NOATIME_BIT 6
/* do not update atime */
#define XFS_DIFLAG_NOATIME_BIT 6
/* do not update atime */
#define XFS_DIFLAG_NODUMP_BIT 7
/* do not dump */
#define XFS_DIFLAG_NODUMP_BIT 7
/* do not dump */
#define XFS_DIFLAG_RTINHERIT_BIT 8
/* create with realtime bit set */
#define XFS_DIFLAG_RTINHERIT_BIT 8
/* create with realtime bit set */
#define XFS_DIFLAG_PROJINHERIT_BIT 9
/* create with parents projid */
#define XFS_DIFLAG_PROJINHERIT_BIT 9
/* create with parents projid */
#define XFS_DIFLAG_NOSYMLINKS_BIT 10
/* disallow symlink creation */
#define XFS_DIFLAG_NOSYMLINKS_BIT 10
/* disallow symlink creation */
#define XFS_DIFLAG_EXTSIZE_BIT 11
/* inode extent size allocator hint */
#define XFS_DIFLAG_EXTSZINHERIT_BIT 12
/* inherit inode extent size */
#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
...
@@ -259,11 +268,14 @@ typedef enum xfs_dinode_fmt
...
@@ -259,11 +268,14 @@ typedef enum xfs_dinode_fmt
#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT)
#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT)
#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT)
#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT)
#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
#define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT)
#define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
#define XFS_DIFLAG_ANY \
#define XFS_DIFLAG_ANY \
(XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
(XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS)
XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
XFS_DIFLAG_EXTSZINHERIT)
#endif
/* __XFS_DINODE_H__ */
#endif
/* __XFS_DINODE_H__ */
fs/xfs/xfs_dir.c
浏览文件 @
9f5974c8
...
@@ -176,7 +176,7 @@ xfs_dir_mount(xfs_mount_t *mp)
...
@@ -176,7 +176,7 @@ xfs_dir_mount(xfs_mount_t *mp)
uint
shortcount
,
leafcount
,
count
;
uint
shortcount
,
leafcount
,
count
;
mp
->
m_dirversion
=
1
;
mp
->
m_dirversion
=
1
;
if
(
mp
->
m_flags
&
XFS_MOUNT_COMPAT_ATTR
)
{
if
(
!
(
mp
->
m_flags
&
XFS_MOUNT_ATTR2
)
)
{
shortcount
=
(
mp
->
m_attroffset
-
shortcount
=
(
mp
->
m_attroffset
-
(
uint
)
sizeof
(
xfs_dir_sf_hdr_t
))
/
(
uint
)
sizeof
(
xfs_dir_sf_hdr_t
))
/
(
uint
)
sizeof
(
xfs_dir_sf_entry_t
);
(
uint
)
sizeof
(
xfs_dir_sf_entry_t
);
...
...
fs/xfs/xfs_dir.h
浏览文件 @
9f5974c8
...
@@ -135,6 +135,8 @@ void xfs_dir_startup(void); /* called exactly once */
...
@@ -135,6 +135,8 @@ void xfs_dir_startup(void); /* called exactly once */
((mp)->m_dirops.xd_shortform_to_single(args))
((mp)->m_dirops.xd_shortform_to_single(args))
#define XFS_DIR_IS_V1(mp) ((mp)->m_dirversion == 1)
#define XFS_DIR_IS_V1(mp) ((mp)->m_dirversion == 1)
#define XFS_DIR_IS_V2(mp) ((mp)->m_dirversion == 2)
extern
xfs_dirops_t
xfsv1_dirops
;
extern
xfs_dirops_t
xfsv1_dirops
;
extern
xfs_dirops_t
xfsv2_dirops
;
#endif
/* __XFS_DIR_H__ */
#endif
/* __XFS_DIR_H__ */
fs/xfs/xfs_dir2.h
浏览文件 @
9f5974c8
...
@@ -72,9 +72,6 @@ typedef struct xfs_dir2_put_args {
...
@@ -72,9 +72,6 @@ typedef struct xfs_dir2_put_args {
struct
uio
*
uio
;
/* uio control structure */
struct
uio
*
uio
;
/* uio control structure */
}
xfs_dir2_put_args_t
;
}
xfs_dir2_put_args_t
;
#define XFS_DIR_IS_V2(mp) ((mp)->m_dirversion == 2)
extern
xfs_dirops_t
xfsv2_dirops
;
/*
/*
* Other interfaces used by the rest of the dir v2 code.
* Other interfaces used by the rest of the dir v2 code.
*/
*/
...
...
fs/xfs/xfs_dir_leaf.h
浏览文件 @
9f5974c8
...
@@ -67,34 +67,38 @@ struct xfs_trans;
...
@@ -67,34 +67,38 @@ struct xfs_trans;
*/
*/
#define XFS_DIR_LEAF_MAPSIZE 3
/* how many freespace slots */
#define XFS_DIR_LEAF_MAPSIZE 3
/* how many freespace slots */
typedef
struct
xfs_dir_leaf_map
{
/* RLE map of free bytes */
__uint16_t
base
;
/* base of free region */
__uint16_t
size
;
/* run length of free region */
}
xfs_dir_leaf_map_t
;
typedef
struct
xfs_dir_leaf_hdr
{
/* constant-structure header block */
xfs_da_blkinfo_t
info
;
/* block type, links, etc. */
__uint16_t
count
;
/* count of active leaf_entry's */
__uint16_t
namebytes
;
/* num bytes of name strings stored */
__uint16_t
firstused
;
/* first used byte in name area */
__uint8_t
holes
;
/* != 0 if blk needs compaction */
__uint8_t
pad1
;
xfs_dir_leaf_map_t
freemap
[
XFS_DIR_LEAF_MAPSIZE
];
}
xfs_dir_leaf_hdr_t
;
typedef
struct
xfs_dir_leaf_entry
{
/* sorted on key, not name */
xfs_dahash_t
hashval
;
/* hash value of name */
__uint16_t
nameidx
;
/* index into buffer of name */
__uint8_t
namelen
;
/* length of name string */
__uint8_t
pad2
;
}
xfs_dir_leaf_entry_t
;
typedef
struct
xfs_dir_leaf_name
{
xfs_dir_ino_t
inumber
;
/* inode number for this key */
__uint8_t
name
[
1
];
/* name string itself */
}
xfs_dir_leaf_name_t
;
typedef
struct
xfs_dir_leafblock
{
typedef
struct
xfs_dir_leafblock
{
struct
xfs_dir_leaf_hdr
{
/* constant-structure header block */
xfs_dir_leaf_hdr_t
hdr
;
/* constant-structure header block */
xfs_da_blkinfo_t
info
;
/* block type, links, etc. */
xfs_dir_leaf_entry_t
entries
[
1
];
/* var sized array */
__uint16_t
count
;
/* count of active leaf_entry's */
xfs_dir_leaf_name_t
namelist
[
1
];
/* grows from bottom of buf */
__uint16_t
namebytes
;
/* num bytes of name strings stored */
__uint16_t
firstused
;
/* first used byte in name area */
__uint8_t
holes
;
/* != 0 if blk needs compaction */
__uint8_t
pad1
;
struct
xfs_dir_leaf_map
{
/* RLE map of free bytes */
__uint16_t
base
;
/* base of free region */
__uint16_t
size
;
/* run length of free region */
}
freemap
[
XFS_DIR_LEAF_MAPSIZE
];
/* N largest free regions */
}
hdr
;
struct
xfs_dir_leaf_entry
{
/* sorted on key, not name */
xfs_dahash_t
hashval
;
/* hash value of name */
__uint16_t
nameidx
;
/* index into buffer of name */
__uint8_t
namelen
;
/* length of name string */
__uint8_t
pad2
;
}
entries
[
1
];
/* var sized array */
struct
xfs_dir_leaf_name
{
xfs_dir_ino_t
inumber
;
/* inode number for this key */
__uint8_t
name
[
1
];
/* name string itself */
}
namelist
[
1
];
/* grows from bottom of buf */
}
xfs_dir_leafblock_t
;
}
xfs_dir_leafblock_t
;
typedef
struct
xfs_dir_leaf_hdr
xfs_dir_leaf_hdr_t
;
typedef
struct
xfs_dir_leaf_map
xfs_dir_leaf_map_t
;
typedef
struct
xfs_dir_leaf_entry
xfs_dir_leaf_entry_t
;
typedef
struct
xfs_dir_leaf_name
xfs_dir_leaf_name_t
;
/*
/*
* Length of name for which a 512-byte block filesystem
* Length of name for which a 512-byte block filesystem
...
@@ -126,11 +130,10 @@ typedef union {
...
@@ -126,11 +130,10 @@ typedef union {
#define XFS_PUT_COOKIE(c,mp,bno,entry,hash) \
#define XFS_PUT_COOKIE(c,mp,bno,entry,hash) \
((c).s.be = XFS_DA_MAKE_BNOENTRY(mp, bno, entry), (c).s.h = (hash))
((c).s.be = XFS_DA_MAKE_BNOENTRY(mp, bno, entry), (c).s.h = (hash))
typedef
struct
xfs_dir_put_args
typedef
struct
xfs_dir_put_args
{
{
xfs_dircook_t
cook
;
/* cookie of (next) entry */
xfs_dircook_t
cook
;
/* cookie of (next) entry */
xfs_intino_t
ino
;
/* inode number */
xfs_intino_t
ino
;
/* inode number */
struct
xfs_dirent
*
dbp
;
/* buffer pointer */
struct
xfs_dirent
*
dbp
;
/* buffer pointer */
char
*
name
;
/* directory entry name */
char
*
name
;
/* directory entry name */
int
namelen
;
/* length of name */
int
namelen
;
/* length of name */
int
done
;
/* output: set if value was stored */
int
done
;
/* output: set if value was stored */
...
@@ -138,7 +141,8 @@ typedef struct xfs_dir_put_args
...
@@ -138,7 +141,8 @@ typedef struct xfs_dir_put_args
struct
uio
*
uio
;
/* uio control structure */
struct
uio
*
uio
;
/* uio control structure */
}
xfs_dir_put_args_t
;
}
xfs_dir_put_args_t
;
#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) xfs_dir_leaf_entsize_byname(len)
#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) \
xfs_dir_leaf_entsize_byname(len)
static
inline
int
xfs_dir_leaf_entsize_byname
(
int
len
)
static
inline
int
xfs_dir_leaf_entsize_byname
(
int
len
)
{
{
return
(
uint
)
sizeof
(
xfs_dir_leaf_name_t
)
-
1
+
len
;
return
(
uint
)
sizeof
(
xfs_dir_leaf_name_t
)
-
1
+
len
;
...
...
fs/xfs/xfs_error.c
浏览文件 @
9f5974c8
...
@@ -54,7 +54,6 @@ xfs_error_trap(int e)
...
@@ -54,7 +54,6 @@ xfs_error_trap(int e)
if
(
e
!=
xfs_etrap
[
i
])
if
(
e
!=
xfs_etrap
[
i
])
continue
;
continue
;
cmn_err
(
CE_NOTE
,
"xfs_error_trap: error %d"
,
e
);
cmn_err
(
CE_NOTE
,
"xfs_error_trap: error %d"
,
e
);
debug_stop_all_cpus
((
void
*
)
-
1LL
);
BUG
();
BUG
();
break
;
break
;
}
}
...
...
fs/xfs/xfs_error.h
浏览文件 @
9f5974c8
...
@@ -18,9 +18,6 @@
...
@@ -18,9 +18,6 @@
#ifndef __XFS_ERROR_H__
#ifndef __XFS_ERROR_H__
#define __XFS_ERROR_H__
#define __XFS_ERROR_H__
#define prdev(fmt,targ,args...) \
printk("XFS: device %s - " fmt "\n", XFS_BUFTARG_NAME(targ), ## args)
#define XFS_ERECOVER 1
/* Failure to recover log */
#define XFS_ERECOVER 1
/* Failure to recover log */
#define XFS_ELOGSTAT 2
/* Failure to stat log in user space */
#define XFS_ELOGSTAT 2
/* Failure to stat log in user space */
#define XFS_ENOLOGSPACE 3
/* Reservation too large */
#define XFS_ENOLOGSPACE 3
/* Reservation too large */
...
@@ -182,8 +179,11 @@ extern int xfs_errortag_clearall_umount(int64_t fsid, char *fsname, int loud);
...
@@ -182,8 +179,11 @@ extern int xfs_errortag_clearall_umount(int64_t fsid, char *fsname, int loud);
struct
xfs_mount
;
struct
xfs_mount
;
/* PRINTFLIKE4 */
/* PRINTFLIKE4 */
extern
void
xfs_cmn_err
(
int
panic_tag
,
int
level
,
struct
xfs_mount
*
mp
,
extern
void
xfs_cmn_err
(
int
panic_tag
,
int
level
,
struct
xfs_mount
*
mp
,
char
*
fmt
,
...);
char
*
fmt
,
...);
/* PRINTFLIKE3 */
/* PRINTFLIKE3 */
extern
void
xfs_fs_cmn_err
(
int
level
,
struct
xfs_mount
*
mp
,
char
*
fmt
,
...);
extern
void
xfs_fs_cmn_err
(
int
level
,
struct
xfs_mount
*
mp
,
char
*
fmt
,
...);
#define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \
xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args)
#endif
/* __XFS_ERROR_H__ */
#endif
/* __XFS_ERROR_H__ */
fs/xfs/xfs_fs.h
浏览文件 @
9f5974c8
...
@@ -3,15 +3,15 @@
...
@@ -3,15 +3,15 @@
* All Rights Reserved.
* All Rights Reserved.
*
*
* This program is free software; you can redistribute it and/or
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU
General Public License as
* modify it under the terms of the GNU
Lesser General Public License
* published by the Free Software Foundation.
*
as
published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it would be useful,
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* GNU
Lesser
General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* You should have received a copy of the GNU
Lesser
General Public License
* along with this program; if not, write the Free Software Foundation,
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
*/
...
@@ -65,6 +65,8 @@ struct fsxattr {
...
@@ -65,6 +65,8 @@ struct fsxattr {
#define XFS_XFLAG_RTINHERIT 0x00000100
/* create with rt bit set */
#define XFS_XFLAG_RTINHERIT 0x00000100
/* create with rt bit set */
#define XFS_XFLAG_PROJINHERIT 0x00000200
/* create with parents projid */
#define XFS_XFLAG_PROJINHERIT 0x00000200
/* create with parents projid */
#define XFS_XFLAG_NOSYMLINKS 0x00000400
/* disallow symlink creation */
#define XFS_XFLAG_NOSYMLINKS 0x00000400
/* disallow symlink creation */
#define XFS_XFLAG_EXTSIZE 0x00000800
/* extent size allocator hint */
#define XFS_XFLAG_EXTSZINHERIT 0x00001000
/* inherit inode extent size */
#define XFS_XFLAG_HASATTR 0x80000000
/* no DIFLAG for this */
#define XFS_XFLAG_HASATTR 0x80000000
/* no DIFLAG for this */
/*
/*
...
...
fs/xfs/xfs_fsops.c
浏览文件 @
9f5974c8
...
@@ -540,6 +540,32 @@ xfs_reserve_blocks(
...
@@ -540,6 +540,32 @@ xfs_reserve_blocks(
return
(
0
);
return
(
0
);
}
}
void
xfs_fs_log_dummy
(
xfs_mount_t
*
mp
)
{
xfs_trans_t
*
tp
;
xfs_inode_t
*
ip
;
tp
=
_xfs_trans_alloc
(
mp
,
XFS_TRANS_DUMMY1
);
atomic_inc
(
&
mp
->
m_active_trans
);
if
(
xfs_trans_reserve
(
tp
,
0
,
XFS_ICHANGE_LOG_RES
(
mp
),
0
,
0
,
0
))
{
xfs_trans_cancel
(
tp
,
0
);
return
;
}
ip
=
mp
->
m_rootip
;
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
xfs_trans_ijoin
(
tp
,
ip
,
XFS_ILOCK_EXCL
);
xfs_trans_ihold
(
tp
,
ip
);
xfs_trans_log_inode
(
tp
,
ip
,
XFS_ILOG_CORE
);
xfs_trans_set_sync
(
tp
);
xfs_trans_commit
(
tp
,
0
,
NULL
);
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
}
int
int
xfs_fs_goingdown
(
xfs_fs_goingdown
(
xfs_mount_t
*
mp
,
xfs_mount_t
*
mp
,
...
...
fs/xfs/xfs_fsops.h
浏览文件 @
9f5974c8
...
@@ -25,5 +25,6 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
...
@@ -25,5 +25,6 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
extern
int
xfs_reserve_blocks
(
xfs_mount_t
*
mp
,
__uint64_t
*
inval
,
extern
int
xfs_reserve_blocks
(
xfs_mount_t
*
mp
,
__uint64_t
*
inval
,
xfs_fsop_resblks_t
*
outval
);
xfs_fsop_resblks_t
*
outval
);
extern
int
xfs_fs_goingdown
(
xfs_mount_t
*
mp
,
__uint32_t
inflags
);
extern
int
xfs_fs_goingdown
(
xfs_mount_t
*
mp
,
__uint32_t
inflags
);
extern
void
xfs_fs_log_dummy
(
xfs_mount_t
*
mp
);
#endif
/* __XFS_FSOPS_H__ */
#endif
/* __XFS_FSOPS_H__ */
fs/xfs/xfs_iget.c
浏览文件 @
9f5974c8
...
@@ -493,7 +493,6 @@ xfs_iget(
...
@@ -493,7 +493,6 @@ xfs_iget(
retry:
retry:
if
((
inode
=
iget_locked
(
XFS_MTOVFS
(
mp
)
->
vfs_super
,
ino
)))
{
if
((
inode
=
iget_locked
(
XFS_MTOVFS
(
mp
)
->
vfs_super
,
ino
)))
{
bhv_desc_t
*
bdp
;
xfs_inode_t
*
ip
;
xfs_inode_t
*
ip
;
vp
=
LINVFS_GET_VP
(
inode
);
vp
=
LINVFS_GET_VP
(
inode
);
...
@@ -517,14 +516,12 @@ xfs_iget(
...
@@ -517,14 +516,12 @@ xfs_iget(
* to wait for the inode to go away.
* to wait for the inode to go away.
*/
*/
if
(
is_bad_inode
(
inode
)
||
if
(
is_bad_inode
(
inode
)
||
((
bdp
=
vn_bhv_lookup
(
VN_BHV_HEAD
(
vp
),
((
ip
=
xfs_vtoi
(
vp
))
==
NULL
))
{
&
xfs_vnodeops
))
==
NULL
))
{
iput
(
inode
);
iput
(
inode
);
delay
(
1
);
delay
(
1
);
goto
retry
;
goto
retry
;
}
}
ip
=
XFS_BHVTOI
(
bdp
);
if
(
lock_flags
!=
0
)
if
(
lock_flags
!=
0
)
xfs_ilock
(
ip
,
lock_flags
);
xfs_ilock
(
ip
,
lock_flags
);
XFS_STATS_INC
(
xs_ig_found
);
XFS_STATS_INC
(
xs_ig_found
);
...
...
fs/xfs/xfs_inode.c
浏览文件 @
9f5974c8
...
@@ -404,9 +404,8 @@ xfs_iformat(
...
@@ -404,9 +404,8 @@ xfs_iformat(
INT_GET
(
dip
->
di_core
.
di_nextents
,
ARCH_CONVERT
)
+
INT_GET
(
dip
->
di_core
.
di_nextents
,
ARCH_CONVERT
)
+
INT_GET
(
dip
->
di_core
.
di_anextents
,
ARCH_CONVERT
)
>
INT_GET
(
dip
->
di_core
.
di_anextents
,
ARCH_CONVERT
)
>
INT_GET
(
dip
->
di_core
.
di_nblocks
,
ARCH_CONVERT
)))
{
INT_GET
(
dip
->
di_core
.
di_nblocks
,
ARCH_CONVERT
)))
{
xfs_fs_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
xfs_fs_repair_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
"corrupt dinode %Lu, extent total = %d, nblocks = %Lu."
"corrupt dinode %Lu, extent total = %d, nblocks = %Lu."
,
" Unmount and run xfs_repair."
,
(
unsigned
long
long
)
ip
->
i_ino
,
(
unsigned
long
long
)
ip
->
i_ino
,
(
int
)(
INT_GET
(
dip
->
di_core
.
di_nextents
,
ARCH_CONVERT
)
(
int
)(
INT_GET
(
dip
->
di_core
.
di_nextents
,
ARCH_CONVERT
)
+
INT_GET
(
dip
->
di_core
.
di_anextents
,
ARCH_CONVERT
)),
+
INT_GET
(
dip
->
di_core
.
di_anextents
,
ARCH_CONVERT
)),
...
@@ -418,9 +417,8 @@ xfs_iformat(
...
@@ -418,9 +417,8 @@ xfs_iformat(
}
}
if
(
unlikely
(
INT_GET
(
dip
->
di_core
.
di_forkoff
,
ARCH_CONVERT
)
>
ip
->
i_mount
->
m_sb
.
sb_inodesize
))
{
if
(
unlikely
(
INT_GET
(
dip
->
di_core
.
di_forkoff
,
ARCH_CONVERT
)
>
ip
->
i_mount
->
m_sb
.
sb_inodesize
))
{
xfs_fs_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
xfs_fs_repair_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
"corrupt dinode %Lu, forkoff = 0x%x."
"corrupt dinode %Lu, forkoff = 0x%x."
,
" Unmount and run xfs_repair."
,
(
unsigned
long
long
)
ip
->
i_ino
,
(
unsigned
long
long
)
ip
->
i_ino
,
(
int
)(
INT_GET
(
dip
->
di_core
.
di_forkoff
,
ARCH_CONVERT
)));
(
int
)(
INT_GET
(
dip
->
di_core
.
di_forkoff
,
ARCH_CONVERT
)));
XFS_CORRUPTION_ERROR
(
"xfs_iformat(2)"
,
XFS_ERRLEVEL_LOW
,
XFS_CORRUPTION_ERROR
(
"xfs_iformat(2)"
,
XFS_ERRLEVEL_LOW
,
...
@@ -451,8 +449,9 @@ xfs_iformat(
...
@@ -451,8 +449,9 @@ xfs_iformat(
* no local regular files yet
* no local regular files yet
*/
*/
if
(
unlikely
((
INT_GET
(
dip
->
di_core
.
di_mode
,
ARCH_CONVERT
)
&
S_IFMT
)
==
S_IFREG
))
{
if
(
unlikely
((
INT_GET
(
dip
->
di_core
.
di_mode
,
ARCH_CONVERT
)
&
S_IFMT
)
==
S_IFREG
))
{
xfs_fs_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
xfs_fs_repair_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
"corrupt inode (local format for regular file) %Lu. Unmount and run xfs_repair."
,
"corrupt inode %Lu "
"(local format for regular file)."
,
(
unsigned
long
long
)
ip
->
i_ino
);
(
unsigned
long
long
)
ip
->
i_ino
);
XFS_CORRUPTION_ERROR
(
"xfs_iformat(4)"
,
XFS_CORRUPTION_ERROR
(
"xfs_iformat(4)"
,
XFS_ERRLEVEL_LOW
,
XFS_ERRLEVEL_LOW
,
...
@@ -462,8 +461,9 @@ xfs_iformat(
...
@@ -462,8 +461,9 @@ xfs_iformat(
di_size
=
INT_GET
(
dip
->
di_core
.
di_size
,
ARCH_CONVERT
);
di_size
=
INT_GET
(
dip
->
di_core
.
di_size
,
ARCH_CONVERT
);
if
(
unlikely
(
di_size
>
XFS_DFORK_DSIZE
(
dip
,
ip
->
i_mount
)))
{
if
(
unlikely
(
di_size
>
XFS_DFORK_DSIZE
(
dip
,
ip
->
i_mount
)))
{
xfs_fs_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
xfs_fs_repair_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
"corrupt inode %Lu (bad size %Ld for local inode). Unmount and run xfs_repair."
,
"corrupt inode %Lu "
"(bad size %Ld for local inode)."
,
(
unsigned
long
long
)
ip
->
i_ino
,
(
unsigned
long
long
)
ip
->
i_ino
,
(
long
long
)
di_size
);
(
long
long
)
di_size
);
XFS_CORRUPTION_ERROR
(
"xfs_iformat(5)"
,
XFS_CORRUPTION_ERROR
(
"xfs_iformat(5)"
,
...
@@ -551,8 +551,9 @@ xfs_iformat_local(
...
@@ -551,8 +551,9 @@ xfs_iformat_local(
* kmem_alloc() or memcpy() below.
* kmem_alloc() or memcpy() below.
*/
*/
if
(
unlikely
(
size
>
XFS_DFORK_SIZE
(
dip
,
ip
->
i_mount
,
whichfork
)))
{
if
(
unlikely
(
size
>
XFS_DFORK_SIZE
(
dip
,
ip
->
i_mount
,
whichfork
)))
{
xfs_fs_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
xfs_fs_repair_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
"corrupt inode %Lu (bad size %d for local fork, size = %d). Unmount and run xfs_repair."
,
"corrupt inode %Lu "
"(bad size %d for local fork, size = %d)."
,
(
unsigned
long
long
)
ip
->
i_ino
,
size
,
(
unsigned
long
long
)
ip
->
i_ino
,
size
,
XFS_DFORK_SIZE
(
dip
,
ip
->
i_mount
,
whichfork
));
XFS_DFORK_SIZE
(
dip
,
ip
->
i_mount
,
whichfork
));
XFS_CORRUPTION_ERROR
(
"xfs_iformat_local"
,
XFS_ERRLEVEL_LOW
,
XFS_CORRUPTION_ERROR
(
"xfs_iformat_local"
,
XFS_ERRLEVEL_LOW
,
...
@@ -610,8 +611,8 @@ xfs_iformat_extents(
...
@@ -610,8 +611,8 @@ xfs_iformat_extents(
* kmem_alloc() or memcpy() below.
* kmem_alloc() or memcpy() below.
*/
*/
if
(
unlikely
(
size
<
0
||
size
>
XFS_DFORK_SIZE
(
dip
,
ip
->
i_mount
,
whichfork
)))
{
if
(
unlikely
(
size
<
0
||
size
>
XFS_DFORK_SIZE
(
dip
,
ip
->
i_mount
,
whichfork
)))
{
xfs_fs_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
xfs_fs_
repair_
cmn_err
(
CE_WARN
,
ip
->
i_mount
,
"corrupt inode %Lu ((a)extents = %d).
Unmount and run xfs_repair.
"
,
"corrupt inode %Lu ((a)extents = %d)."
,
(
unsigned
long
long
)
ip
->
i_ino
,
nex
);
(
unsigned
long
long
)
ip
->
i_ino
,
nex
);
XFS_CORRUPTION_ERROR
(
"xfs_iformat_extents(1)"
,
XFS_ERRLEVEL_LOW
,
XFS_CORRUPTION_ERROR
(
"xfs_iformat_extents(1)"
,
XFS_ERRLEVEL_LOW
,
ip
->
i_mount
,
dip
);
ip
->
i_mount
,
dip
);
...
@@ -692,8 +693,8 @@ xfs_iformat_btree(
...
@@ -692,8 +693,8 @@ xfs_iformat_btree(
||
XFS_BMDR_SPACE_CALC
(
nrecs
)
>
||
XFS_BMDR_SPACE_CALC
(
nrecs
)
>
XFS_DFORK_SIZE
(
dip
,
ip
->
i_mount
,
whichfork
)
XFS_DFORK_SIZE
(
dip
,
ip
->
i_mount
,
whichfork
)
||
XFS_IFORK_NEXTENTS
(
ip
,
whichfork
)
>
ip
->
i_d
.
di_nblocks
))
{
||
XFS_IFORK_NEXTENTS
(
ip
,
whichfork
)
>
ip
->
i_d
.
di_nblocks
))
{
xfs_fs_cmn_err
(
CE_WARN
,
ip
->
i_mount
,
xfs_fs_
repair_
cmn_err
(
CE_WARN
,
ip
->
i_mount
,
"corrupt inode %Lu (btree).
Unmount and run xfs_repair.
"
,
"corrupt inode %Lu (btree)."
,
(
unsigned
long
long
)
ip
->
i_ino
);
(
unsigned
long
long
)
ip
->
i_ino
);
XFS_ERROR_REPORT
(
"xfs_iformat_btree"
,
XFS_ERRLEVEL_LOW
,
XFS_ERROR_REPORT
(
"xfs_iformat_btree"
,
XFS_ERRLEVEL_LOW
,
ip
->
i_mount
);
ip
->
i_mount
);
...
@@ -809,6 +810,10 @@ _xfs_dic2xflags(
...
@@ -809,6 +810,10 @@ _xfs_dic2xflags(
flags
|=
XFS_XFLAG_PROJINHERIT
;
flags
|=
XFS_XFLAG_PROJINHERIT
;
if
(
di_flags
&
XFS_DIFLAG_NOSYMLINKS
)
if
(
di_flags
&
XFS_DIFLAG_NOSYMLINKS
)
flags
|=
XFS_XFLAG_NOSYMLINKS
;
flags
|=
XFS_XFLAG_NOSYMLINKS
;
if
(
di_flags
&
XFS_DIFLAG_EXTSIZE
)
flags
|=
XFS_XFLAG_EXTSIZE
;
if
(
di_flags
&
XFS_DIFLAG_EXTSZINHERIT
)
flags
|=
XFS_XFLAG_EXTSZINHERIT
;
}
}
return
flags
;
return
flags
;
...
@@ -1192,11 +1197,19 @@ xfs_ialloc(
...
@@ -1192,11 +1197,19 @@ xfs_ialloc(
if
((
mode
&
S_IFMT
)
==
S_IFDIR
)
{
if
((
mode
&
S_IFMT
)
==
S_IFDIR
)
{
if
(
pip
->
i_d
.
di_flags
&
XFS_DIFLAG_RTINHERIT
)
if
(
pip
->
i_d
.
di_flags
&
XFS_DIFLAG_RTINHERIT
)
di_flags
|=
XFS_DIFLAG_RTINHERIT
;
di_flags
|=
XFS_DIFLAG_RTINHERIT
;
}
else
{
if
(
pip
->
i_d
.
di_flags
&
XFS_DIFLAG_EXTSZINHERIT
)
{
di_flags
|=
XFS_DIFLAG_EXTSZINHERIT
;
ip
->
i_d
.
di_extsize
=
pip
->
i_d
.
di_extsize
;
}
}
else
if
((
mode
&
S_IFMT
)
==
S_IFREG
)
{
if
(
pip
->
i_d
.
di_flags
&
XFS_DIFLAG_RTINHERIT
)
{
if
(
pip
->
i_d
.
di_flags
&
XFS_DIFLAG_RTINHERIT
)
{
di_flags
|=
XFS_DIFLAG_REALTIME
;
di_flags
|=
XFS_DIFLAG_REALTIME
;
ip
->
i_iocore
.
io_flags
|=
XFS_IOCORE_RT
;
ip
->
i_iocore
.
io_flags
|=
XFS_IOCORE_RT
;
}
}
if
(
pip
->
i_d
.
di_flags
&
XFS_DIFLAG_EXTSZINHERIT
)
{
di_flags
|=
XFS_DIFLAG_EXTSIZE
;
ip
->
i_d
.
di_extsize
=
pip
->
i_d
.
di_extsize
;
}
}
}
if
((
pip
->
i_d
.
di_flags
&
XFS_DIFLAG_NOATIME
)
&&
if
((
pip
->
i_d
.
di_flags
&
XFS_DIFLAG_NOATIME
)
&&
xfs_inherit_noatime
)
xfs_inherit_noatime
)
...
@@ -1262,7 +1275,7 @@ xfs_isize_check(
...
@@ -1262,7 +1275,7 @@ xfs_isize_check(
if
((
ip
->
i_d
.
di_mode
&
S_IFMT
)
!=
S_IFREG
)
if
((
ip
->
i_d
.
di_mode
&
S_IFMT
)
!=
S_IFREG
)
return
;
return
;
if
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
if
(
ip
->
i_d
.
di_flags
&
(
XFS_DIFLAG_REALTIME
|
XFS_DIFLAG_EXTSIZE
)
)
return
;
return
;
nimaps
=
2
;
nimaps
=
2
;
...
@@ -1765,22 +1778,19 @@ xfs_igrow_start(
...
@@ -1765,22 +1778,19 @@ xfs_igrow_start(
xfs_fsize_t
new_size
,
xfs_fsize_t
new_size
,
cred_t
*
credp
)
cred_t
*
credp
)
{
{
xfs_fsize_t
isize
;
int
error
;
int
error
;
ASSERT
(
ismrlocked
(
&
(
ip
->
i_lock
),
MR_UPDATE
)
!=
0
);
ASSERT
(
ismrlocked
(
&
(
ip
->
i_lock
),
MR_UPDATE
)
!=
0
);
ASSERT
(
ismrlocked
(
&
(
ip
->
i_iolock
),
MR_UPDATE
)
!=
0
);
ASSERT
(
ismrlocked
(
&
(
ip
->
i_iolock
),
MR_UPDATE
)
!=
0
);
ASSERT
(
new_size
>
ip
->
i_d
.
di_size
);
ASSERT
(
new_size
>
ip
->
i_d
.
di_size
);
error
=
0
;
isize
=
ip
->
i_d
.
di_size
;
/*
/*
* Zero any pages that may have been created by
* Zero any pages that may have been created by
* xfs_write_file() beyond the end of the file
* xfs_write_file() beyond the end of the file
* and any blocks between the old and new file sizes.
* and any blocks between the old and new file sizes.
*/
*/
error
=
xfs_zero_eof
(
XFS_ITOV
(
ip
),
&
ip
->
i_iocore
,
new_size
,
isize
,
error
=
xfs_zero_eof
(
XFS_ITOV
(
ip
),
&
ip
->
i_iocore
,
new_size
,
new_size
);
ip
->
i_d
.
di_size
,
new_size
);
return
error
;
return
error
;
}
}
...
@@ -3355,6 +3365,11 @@ xfs_iflush_int(
...
@@ -3355,6 +3365,11 @@ xfs_iflush_int(
ip
->
i_update_core
=
0
;
ip
->
i_update_core
=
0
;
SYNCHRONIZE
();
SYNCHRONIZE
();
/*
* Make sure to get the latest atime from the Linux inode.
*/
xfs_synchronize_atime
(
ip
);
if
(
XFS_TEST_ERROR
(
INT_GET
(
dip
->
di_core
.
di_magic
,
ARCH_CONVERT
)
!=
XFS_DINODE_MAGIC
,
if
(
XFS_TEST_ERROR
(
INT_GET
(
dip
->
di_core
.
di_magic
,
ARCH_CONVERT
)
!=
XFS_DINODE_MAGIC
,
mp
,
XFS_ERRTAG_IFLUSH_1
,
XFS_RANDOM_IFLUSH_1
))
{
mp
,
XFS_ERRTAG_IFLUSH_1
,
XFS_RANDOM_IFLUSH_1
))
{
xfs_cmn_err
(
XFS_PTAG_IFLUSH
,
CE_ALERT
,
mp
,
xfs_cmn_err
(
XFS_PTAG_IFLUSH
,
CE_ALERT
,
mp
,
...
...
fs/xfs/xfs_inode.h
浏览文件 @
9f5974c8
...
@@ -436,6 +436,10 @@ void xfs_ichgtime(xfs_inode_t *, int);
...
@@ -436,6 +436,10 @@ void xfs_ichgtime(xfs_inode_t *, int);
xfs_fsize_t
xfs_file_last_byte
(
xfs_inode_t
*
);
xfs_fsize_t
xfs_file_last_byte
(
xfs_inode_t
*
);
void
xfs_lock_inodes
(
xfs_inode_t
**
,
int
,
int
,
uint
);
void
xfs_lock_inodes
(
xfs_inode_t
**
,
int
,
int
,
uint
);
xfs_inode_t
*
xfs_vtoi
(
struct
vnode
*
vp
);
void
xfs_synchronize_atime
(
xfs_inode_t
*
);
#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
#ifdef DEBUG
#ifdef DEBUG
...
...
fs/xfs/xfs_inode_item.c
浏览文件 @
9f5974c8
...
@@ -271,6 +271,11 @@ xfs_inode_item_format(
...
@@ -271,6 +271,11 @@ xfs_inode_item_format(
if
(
ip
->
i_update_size
)
if
(
ip
->
i_update_size
)
ip
->
i_update_size
=
0
;
ip
->
i_update_size
=
0
;
/*
* Make sure to get the latest atime from the Linux inode.
*/
xfs_synchronize_atime
(
ip
);
vecp
->
i_addr
=
(
xfs_caddr_t
)
&
ip
->
i_d
;
vecp
->
i_addr
=
(
xfs_caddr_t
)
&
ip
->
i_d
;
vecp
->
i_len
=
sizeof
(
xfs_dinode_core_t
);
vecp
->
i_len
=
sizeof
(
xfs_dinode_core_t
);
XLOG_VEC_SET_TYPE
(
vecp
,
XLOG_REG_TYPE_ICORE
);
XLOG_VEC_SET_TYPE
(
vecp
,
XLOG_REG_TYPE_ICORE
);
...
@@ -603,7 +608,7 @@ xfs_inode_item_trylock(
...
@@ -603,7 +608,7 @@ xfs_inode_item_trylock(
if
(
iip
->
ili_pushbuf_flag
==
0
)
{
if
(
iip
->
ili_pushbuf_flag
==
0
)
{
iip
->
ili_pushbuf_flag
=
1
;
iip
->
ili_pushbuf_flag
=
1
;
#ifdef DEBUG
#ifdef DEBUG
iip
->
ili_push_owner
=
get_thread_
id
();
iip
->
ili_push_owner
=
current_p
id
();
#endif
#endif
/*
/*
* Inode is left locked in shared mode.
* Inode is left locked in shared mode.
...
@@ -782,7 +787,7 @@ xfs_inode_item_pushbuf(
...
@@ -782,7 +787,7 @@ xfs_inode_item_pushbuf(
* trying to duplicate our effort.
* trying to duplicate our effort.
*/
*/
ASSERT
(
iip
->
ili_pushbuf_flag
!=
0
);
ASSERT
(
iip
->
ili_pushbuf_flag
!=
0
);
ASSERT
(
iip
->
ili_push_owner
==
get_thread_
id
());
ASSERT
(
iip
->
ili_push_owner
==
current_p
id
());
/*
/*
* If flushlock isn't locked anymore, chances are that the
* If flushlock isn't locked anymore, chances are that the
...
...
fs/xfs/xfs_iomap.c
浏览文件 @
9f5974c8
...
@@ -262,7 +262,7 @@ xfs_iomap(
...
@@ -262,7 +262,7 @@ xfs_iomap(
case
BMAPI_WRITE
:
case
BMAPI_WRITE
:
/* If we found an extent, return it */
/* If we found an extent, return it */
if
(
nimaps
&&
if
(
nimaps
&&
(
imap
.
br_startblock
!=
HOLESTARTBLOCK
)
&&
(
imap
.
br_startblock
!=
HOLESTARTBLOCK
)
&&
(
imap
.
br_startblock
!=
DELAYSTARTBLOCK
))
{
(
imap
.
br_startblock
!=
DELAYSTARTBLOCK
))
{
xfs_iomap_map_trace
(
XFS_IOMAP_WRITE_MAP
,
io
,
xfs_iomap_map_trace
(
XFS_IOMAP_WRITE_MAP
,
io
,
offset
,
count
,
iomapp
,
&
imap
,
flags
);
offset
,
count
,
iomapp
,
&
imap
,
flags
);
...
@@ -316,6 +316,58 @@ xfs_iomap(
...
@@ -316,6 +316,58 @@ xfs_iomap(
return
XFS_ERROR
(
error
);
return
XFS_ERROR
(
error
);
}
}
STATIC
int
xfs_iomap_eof_align_last_fsb
(
xfs_mount_t
*
mp
,
xfs_iocore_t
*
io
,
xfs_fsize_t
isize
,
xfs_extlen_t
extsize
,
xfs_fileoff_t
*
last_fsb
)
{
xfs_fileoff_t
new_last_fsb
=
0
;
xfs_extlen_t
align
;
int
eof
,
error
;
if
(
io
->
io_flags
&
XFS_IOCORE_RT
)
;
/*
* If mounted with the "-o swalloc" option, roundup the allocation
* request to a stripe width boundary if the file size is >=
* stripe width and we are allocating past the allocation eof.
*/
else
if
(
mp
->
m_swidth
&&
(
mp
->
m_flags
&
XFS_MOUNT_SWALLOC
)
&&
(
isize
>=
XFS_FSB_TO_B
(
mp
,
mp
->
m_swidth
)))
new_last_fsb
=
roundup_64
(
*
last_fsb
,
mp
->
m_swidth
);
/*
* Roundup the allocation request to a stripe unit (m_dalign) boundary
* if the file size is >= stripe unit size, and we are allocating past
* the allocation eof.
*/
else
if
(
mp
->
m_dalign
&&
(
isize
>=
XFS_FSB_TO_B
(
mp
,
mp
->
m_dalign
)))
new_last_fsb
=
roundup_64
(
*
last_fsb
,
mp
->
m_dalign
);
/*
* Always round up the allocation request to an extent boundary
* (when file on a real-time subvolume or has di_extsize hint).
*/
if
(
extsize
)
{
if
(
new_last_fsb
)
align
=
roundup_64
(
new_last_fsb
,
extsize
);
else
align
=
extsize
;
new_last_fsb
=
roundup_64
(
*
last_fsb
,
align
);
}
if
(
new_last_fsb
)
{
error
=
XFS_BMAP_EOF
(
mp
,
io
,
new_last_fsb
,
XFS_DATA_FORK
,
&
eof
);
if
(
error
)
return
error
;
if
(
eof
)
*
last_fsb
=
new_last_fsb
;
}
return
0
;
}
STATIC
int
STATIC
int
xfs_flush_space
(
xfs_flush_space
(
xfs_inode_t
*
ip
,
xfs_inode_t
*
ip
,
...
@@ -362,19 +414,20 @@ xfs_iomap_write_direct(
...
@@ -362,19 +414,20 @@ xfs_iomap_write_direct(
xfs_iocore_t
*
io
=
&
ip
->
i_iocore
;
xfs_iocore_t
*
io
=
&
ip
->
i_iocore
;
xfs_fileoff_t
offset_fsb
;
xfs_fileoff_t
offset_fsb
;
xfs_fileoff_t
last_fsb
;
xfs_fileoff_t
last_fsb
;
xfs_filblks_t
count_fsb
;
xfs_filblks_t
count_fsb
,
resaligned
;
xfs_fsblock_t
firstfsb
;
xfs_fsblock_t
firstfsb
;
xfs_extlen_t
extsz
,
temp
;
xfs_fsize_t
isize
;
int
nimaps
;
int
nimaps
;
int
error
;
int
bmapi_flag
;
int
bmapi_flag
;
int
quota_flag
;
int
quota_flag
;
int
rt
;
int
rt
;
xfs_trans_t
*
tp
;
xfs_trans_t
*
tp
;
xfs_bmbt_irec_t
imap
;
xfs_bmbt_irec_t
imap
;
xfs_bmap_free_t
free_list
;
xfs_bmap_free_t
free_list
;
xfs_filblks_t
qblocks
,
resblk
s
;
uint
qblocks
,
resblks
,
resrtextent
s
;
int
committed
;
int
committed
;
int
resrtextents
;
int
error
;
/*
/*
* Make sure that the dquots are there. This doesn't hold
* Make sure that the dquots are there. This doesn't hold
...
@@ -384,37 +437,52 @@ xfs_iomap_write_direct(
...
@@ -384,37 +437,52 @@ xfs_iomap_write_direct(
if
(
error
)
if
(
error
)
return
XFS_ERROR
(
error
);
return
XFS_ERROR
(
error
);
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
rt
=
XFS_IS_REALTIME_INODE
(
ip
);
last_fsb
=
XFS_B_TO_FSB
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
)));
if
(
unlikely
(
rt
))
{
count_fsb
=
last_fsb
-
offset_fsb
;
if
(
!
(
extsz
=
ip
->
i_d
.
di_extsize
))
if
(
found
&&
(
ret_imap
->
br_startblock
==
HOLESTARTBLOCK
))
{
extsz
=
mp
->
m_sb
.
sb_rextsize
;
xfs_fileoff_t
map_last_fsb
;
}
else
{
extsz
=
ip
->
i_d
.
di_extsize
;
map_last_fsb
=
ret_imap
->
br_blockcount
+
ret_imap
->
br_startoff
;
if
(
map_last_fsb
<
last_fsb
)
{
last_fsb
=
map_last_fsb
;
count_fsb
=
last_fsb
-
offset_fsb
;
}
ASSERT
(
count_fsb
>
0
);
}
}
/*
isize
=
ip
->
i_d
.
di_size
;
* Determine if reserving space on the data or realtime partition.
if
(
io
->
io_new_size
>
isize
)
*/
isize
=
io
->
io_new_size
;
if
((
rt
=
XFS_IS_REALTIME_INODE
(
ip
)))
{
xfs_extlen_t
extsz
;
if
(
!
(
extsz
=
ip
->
i_d
.
di_extsize
))
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
extsz
=
mp
->
m_sb
.
sb_rextsize
;
last_fsb
=
XFS_B_TO_FSB
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
)));
resrtextents
=
qblocks
=
(
count_fsb
+
extsz
-
1
);
if
((
offset
+
count
)
>
isize
)
{
do_div
(
resrtextents
,
mp
->
m_sb
.
sb_rextsize
);
error
=
xfs_iomap_eof_align_last_fsb
(
mp
,
io
,
isize
,
extsz
,
resblks
=
XFS_DIOSTRAT_SPACE_RES
(
mp
,
0
);
&
last_fsb
);
quota_flag
=
XFS_QMOPT_RES_RTBLKS
;
if
(
error
)
goto
error_out
;
}
else
{
}
else
{
resrtextents
=
0
;
if
(
found
&&
(
ret_imap
->
br_startblock
==
HOLESTARTBLOCK
))
resblks
=
qblocks
=
XFS_DIOSTRAT_SPACE_RES
(
mp
,
count_fsb
);
last_fsb
=
MIN
(
last_fsb
,
(
xfs_fileoff_t
)
quota_flag
=
XFS_QMOPT_RES_REGBLKS
;
ret_imap
->
br_blockcount
+
ret_imap
->
br_startoff
);
}
}
count_fsb
=
last_fsb
-
offset_fsb
;
ASSERT
(
count_fsb
>
0
);
resaligned
=
count_fsb
;
if
(
unlikely
(
extsz
))
{
if
((
temp
=
do_mod
(
offset_fsb
,
extsz
)))
resaligned
+=
temp
;
if
((
temp
=
do_mod
(
resaligned
,
extsz
)))
resaligned
+=
extsz
-
temp
;
}
if
(
unlikely
(
rt
))
{
resrtextents
=
qblocks
=
resaligned
;
resrtextents
/=
mp
->
m_sb
.
sb_rextsize
;
resblks
=
XFS_DIOSTRAT_SPACE_RES
(
mp
,
0
);
quota_flag
=
XFS_QMOPT_RES_RTBLKS
;
}
else
{
resrtextents
=
0
;
resblks
=
qblocks
=
XFS_DIOSTRAT_SPACE_RES
(
mp
,
resaligned
);
quota_flag
=
XFS_QMOPT_RES_REGBLKS
;
}
/*
/*
* Allocate and setup the transaction
* Allocate and setup the transaction
...
@@ -425,7 +493,6 @@ xfs_iomap_write_direct(
...
@@ -425,7 +493,6 @@ xfs_iomap_write_direct(
XFS_WRITE_LOG_RES
(
mp
),
resrtextents
,
XFS_WRITE_LOG_RES
(
mp
),
resrtextents
,
XFS_TRANS_PERM_LOG_RES
,
XFS_TRANS_PERM_LOG_RES
,
XFS_WRITE_LOG_COUNT
);
XFS_WRITE_LOG_COUNT
);
/*
/*
* Check for running out of space, note: need lock to return
* Check for running out of space, note: need lock to return
*/
*/
...
@@ -435,20 +502,20 @@ xfs_iomap_write_direct(
...
@@ -435,20 +502,20 @@ xfs_iomap_write_direct(
if
(
error
)
if
(
error
)
goto
error_out
;
goto
error_out
;
if
(
XFS_TRANS_RESERVE_QUOTA_NBLKS
(
mp
,
tp
,
ip
,
qblocks
,
0
,
quota_flag
))
{
error
=
XFS_TRANS_RESERVE_QUOTA_NBLKS
(
mp
,
tp
,
ip
,
error
=
(
EDQUOT
);
qblocks
,
0
,
quota_flag
);
if
(
error
)
goto
error1
;
goto
error1
;
}
bmapi_flag
=
XFS_BMAPI_WRITE
;
xfs_trans_ijoin
(
tp
,
ip
,
XFS_ILOCK_EXCL
);
xfs_trans_ijoin
(
tp
,
ip
,
XFS_ILOCK_EXCL
);
xfs_trans_ihold
(
tp
,
ip
);
xfs_trans_ihold
(
tp
,
ip
);
if
(
!
(
flags
&
BMAPI_MMAP
)
&&
(
offset
<
ip
->
i_d
.
di_size
||
rt
))
bmapi_flag
=
XFS_BMAPI_WRITE
;
if
((
flags
&
BMAPI_DIRECT
)
&&
(
offset
<
ip
->
i_d
.
di_size
||
extsz
))
bmapi_flag
|=
XFS_BMAPI_PREALLOC
;
bmapi_flag
|=
XFS_BMAPI_PREALLOC
;
/*
/*
* Issue the bmapi() call to allocate the blocks
* Issue the
xfs_
bmapi() call to allocate the blocks
*/
*/
XFS_BMAP_INIT
(
&
free_list
,
&
firstfsb
);
XFS_BMAP_INIT
(
&
free_list
,
&
firstfsb
);
nimaps
=
1
;
nimaps
=
1
;
...
@@ -483,8 +550,10 @@ xfs_iomap_write_direct(
...
@@ -483,8 +550,10 @@ xfs_iomap_write_direct(
"extent-state : %x
\n
"
,
"extent-state : %x
\n
"
,
(
ip
->
i_mount
)
->
m_fsname
,
(
ip
->
i_mount
)
->
m_fsname
,
(
long
long
)
ip
->
i_ino
,
(
long
long
)
ip
->
i_ino
,
ret_imap
->
br_startblock
,
ret_imap
->
br_startoff
,
(
unsigned
long
long
)
ret_imap
->
br_startblock
,
ret_imap
->
br_blockcount
,
ret_imap
->
br_state
);
(
unsigned
long
long
)
ret_imap
->
br_startoff
,
(
unsigned
long
long
)
ret_imap
->
br_blockcount
,
ret_imap
->
br_state
);
}
}
return
0
;
return
0
;
...
@@ -500,6 +569,63 @@ xfs_iomap_write_direct(
...
@@ -500,6 +569,63 @@ xfs_iomap_write_direct(
return
XFS_ERROR
(
error
);
return
XFS_ERROR
(
error
);
}
}
/*
* If the caller is doing a write at the end of the file,
* then extend the allocation out to the file system's write
* iosize. We clean up any extra space left over when the
* file is closed in xfs_inactive().
*
* For sync writes, we are flushing delayed allocate space to
* try to make additional space available for allocation near
* the filesystem full boundary - preallocation hurts in that
* situation, of course.
*/
STATIC
int
xfs_iomap_eof_want_preallocate
(
xfs_mount_t
*
mp
,
xfs_iocore_t
*
io
,
xfs_fsize_t
isize
,
xfs_off_t
offset
,
size_t
count
,
int
ioflag
,
xfs_bmbt_irec_t
*
imap
,
int
nimaps
,
int
*
prealloc
)
{
xfs_fileoff_t
start_fsb
;
xfs_filblks_t
count_fsb
;
xfs_fsblock_t
firstblock
;
int
n
,
error
,
imaps
;
*
prealloc
=
0
;
if
((
ioflag
&
BMAPI_SYNC
)
||
(
offset
+
count
)
<=
isize
)
return
0
;
/*
* If there are any real blocks past eof, then don't
* do any speculative allocation.
*/
start_fsb
=
XFS_B_TO_FSBT
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
-
1
)));
count_fsb
=
XFS_B_TO_FSB
(
mp
,
(
xfs_ufsize_t
)
XFS_MAXIOFFSET
(
mp
));
while
(
count_fsb
>
0
)
{
imaps
=
nimaps
;
firstblock
=
NULLFSBLOCK
;
error
=
XFS_BMAPI
(
mp
,
NULL
,
io
,
start_fsb
,
count_fsb
,
0
,
&
firstblock
,
0
,
imap
,
&
imaps
,
NULL
);
if
(
error
)
return
error
;
for
(
n
=
0
;
n
<
imaps
;
n
++
)
{
if
((
imap
[
n
].
br_startblock
!=
HOLESTARTBLOCK
)
&&
(
imap
[
n
].
br_startblock
!=
DELAYSTARTBLOCK
))
return
0
;
start_fsb
+=
imap
[
n
].
br_blockcount
;
count_fsb
-=
imap
[
n
].
br_blockcount
;
}
}
*
prealloc
=
1
;
return
0
;
}
int
int
xfs_iomap_write_delay
(
xfs_iomap_write_delay
(
xfs_inode_t
*
ip
,
xfs_inode_t
*
ip
,
...
@@ -513,13 +639,15 @@ xfs_iomap_write_delay(
...
@@ -513,13 +639,15 @@ xfs_iomap_write_delay(
xfs_iocore_t
*
io
=
&
ip
->
i_iocore
;
xfs_iocore_t
*
io
=
&
ip
->
i_iocore
;
xfs_fileoff_t
offset_fsb
;
xfs_fileoff_t
offset_fsb
;
xfs_fileoff_t
last_fsb
;
xfs_fileoff_t
last_fsb
;
xfs_fsize_t
isize
;
xfs_off_t
aligned_offset
;
xfs_fileoff_t
ioalign
;
xfs_fsblock_t
firstblock
;
xfs_fsblock_t
firstblock
;
xfs_extlen_t
extsz
;
xfs_fsize_t
isize
;
int
nimaps
;
int
nimaps
;
int
error
;
xfs_bmbt_irec_t
imap
[
XFS_WRITE_IMAPS
];
xfs_bmbt_irec_t
imap
[
XFS_WRITE_IMAPS
];
int
aeof
;
int
prealloc
,
fsynced
=
0
;
int
fsynced
=
0
;
int
error
;
ASSERT
(
ismrlocked
(
&
ip
->
i_lock
,
MR_UPDATE
)
!=
0
);
ASSERT
(
ismrlocked
(
&
ip
->
i_lock
,
MR_UPDATE
)
!=
0
);
...
@@ -527,152 +655,57 @@ xfs_iomap_write_delay(
...
@@ -527,152 +655,57 @@ xfs_iomap_write_delay(
* Make sure that the dquots are there. This doesn't hold
* Make sure that the dquots are there. This doesn't hold
* the ilock across a disk read.
* the ilock across a disk read.
*/
*/
error
=
XFS_QM_DQATTACH
(
mp
,
ip
,
XFS_QMOPT_ILOCKED
);
error
=
XFS_QM_DQATTACH
(
mp
,
ip
,
XFS_QMOPT_ILOCKED
);
if
(
error
)
if
(
error
)
return
XFS_ERROR
(
error
);
return
XFS_ERROR
(
error
);
if
(
XFS_IS_REALTIME_INODE
(
ip
))
{
if
(
!
(
extsz
=
ip
->
i_d
.
di_extsize
))
extsz
=
mp
->
m_sb
.
sb_rextsize
;
}
else
{
extsz
=
ip
->
i_d
.
di_extsize
;
}
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
retry:
retry:
isize
=
ip
->
i_d
.
di_size
;
isize
=
ip
->
i_d
.
di_size
;
if
(
io
->
io_new_size
>
isize
)
{
if
(
io
->
io_new_size
>
isize
)
isize
=
io
->
io_new_size
;
isize
=
io
->
io_new_size
;
}
aeof
=
0
;
error
=
xfs_iomap_eof_want_preallocate
(
mp
,
io
,
isize
,
offset
,
count
,
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
ioflag
,
imap
,
XFS_WRITE_IMAPS
,
&
prealloc
);
last_fsb
=
XFS_B_TO_FSB
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
)));
if
(
error
)
/*
return
error
;
* If the caller is doing a write at the end of the file,
* then extend the allocation (and the buffer used for the write)
* out to the file system's write iosize. We clean up any extra
* space left over when the file is closed in xfs_inactive().
*
* For sync writes, we are flushing delayed allocate space to
* try to make additional space available for allocation near
* the filesystem full boundary - preallocation hurts in that
* situation, of course.
*/
if
(
!
(
ioflag
&
BMAPI_SYNC
)
&&
((
offset
+
count
)
>
ip
->
i_d
.
di_size
))
{
xfs_off_t
aligned_offset
;
xfs_filblks_t
count_fsb
;
unsigned
int
iosize
;
xfs_fileoff_t
ioalign
;
int
n
;
xfs_fileoff_t
start_fsb
;
/*
if
(
prealloc
)
{
* If there are any real blocks past eof, then don't
* do any speculative allocation.
*/
start_fsb
=
XFS_B_TO_FSBT
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
-
1
)));
count_fsb
=
XFS_B_TO_FSB
(
mp
,
(
xfs_ufsize_t
)
XFS_MAXIOFFSET
(
mp
));
while
(
count_fsb
>
0
)
{
nimaps
=
XFS_WRITE_IMAPS
;
error
=
XFS_BMAPI
(
mp
,
NULL
,
io
,
start_fsb
,
count_fsb
,
0
,
&
firstblock
,
0
,
imap
,
&
nimaps
,
NULL
);
if
(
error
)
{
return
error
;
}
for
(
n
=
0
;
n
<
nimaps
;
n
++
)
{
if
(
!
(
io
->
io_flags
&
XFS_IOCORE_RT
)
&&
!
imap
[
n
].
br_startblock
)
{
cmn_err
(
CE_PANIC
,
"Access to block "
"zero: fs <%s> inode: %lld "
"start_block : %llx start_off "
": %llx blkcnt : %llx "
"extent-state : %x
\n
"
,
(
ip
->
i_mount
)
->
m_fsname
,
(
long
long
)
ip
->
i_ino
,
imap
[
n
].
br_startblock
,
imap
[
n
].
br_startoff
,
imap
[
n
].
br_blockcount
,
imap
[
n
].
br_state
);
}
if
((
imap
[
n
].
br_startblock
!=
HOLESTARTBLOCK
)
&&
(
imap
[
n
].
br_startblock
!=
DELAYSTARTBLOCK
))
{
goto
write_map
;
}
start_fsb
+=
imap
[
n
].
br_blockcount
;
count_fsb
-=
imap
[
n
].
br_blockcount
;
}
}
iosize
=
mp
->
m_writeio_blocks
;
aligned_offset
=
XFS_WRITEIO_ALIGN
(
mp
,
(
offset
+
count
-
1
));
aligned_offset
=
XFS_WRITEIO_ALIGN
(
mp
,
(
offset
+
count
-
1
));
ioalign
=
XFS_B_TO_FSBT
(
mp
,
aligned_offset
);
ioalign
=
XFS_B_TO_FSBT
(
mp
,
aligned_offset
);
last_fsb
=
ioalign
+
iosize
;
last_fsb
=
ioalign
+
mp
->
m_writeio_blocks
;
aeof
=
1
;
}
else
{
last_fsb
=
XFS_B_TO_FSB
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
)));
}
}
write_map:
nimaps
=
XFS_WRITE_IMAPS
;
firstblock
=
NULLFSBLOCK
;
/*
if
(
prealloc
||
extsz
)
{
* If mounted with the "-o swalloc" option, roundup the allocation
error
=
xfs_iomap_eof_align_last_fsb
(
mp
,
io
,
isize
,
extsz
,
* request to a stripe width boundary if the file size is >=
&
last_fsb
);
* stripe width and we are allocating past the allocation eof.
if
(
error
)
*/
if
(
!
(
io
->
io_flags
&
XFS_IOCORE_RT
)
&&
mp
->
m_swidth
&&
(
mp
->
m_flags
&
XFS_MOUNT_SWALLOC
)
&&
(
isize
>=
XFS_FSB_TO_B
(
mp
,
mp
->
m_swidth
))
&&
aeof
)
{
int
eof
;
xfs_fileoff_t
new_last_fsb
;
new_last_fsb
=
roundup_64
(
last_fsb
,
mp
->
m_swidth
);
error
=
xfs_bmap_eof
(
ip
,
new_last_fsb
,
XFS_DATA_FORK
,
&
eof
);
if
(
error
)
{
return
error
;
}
if
(
eof
)
{
last_fsb
=
new_last_fsb
;
}
/*
* Roundup the allocation request to a stripe unit (m_dalign) boundary
* if the file size is >= stripe unit size, and we are allocating past
* the allocation eof.
*/
}
else
if
(
!
(
io
->
io_flags
&
XFS_IOCORE_RT
)
&&
mp
->
m_dalign
&&
(
isize
>=
XFS_FSB_TO_B
(
mp
,
mp
->
m_dalign
))
&&
aeof
)
{
int
eof
;
xfs_fileoff_t
new_last_fsb
;
new_last_fsb
=
roundup_64
(
last_fsb
,
mp
->
m_dalign
);
error
=
xfs_bmap_eof
(
ip
,
new_last_fsb
,
XFS_DATA_FORK
,
&
eof
);
if
(
error
)
{
return
error
;
}
if
(
eof
)
{
last_fsb
=
new_last_fsb
;
}
/*
* Round up the allocation request to a real-time extent boundary
* if the file is on the real-time subvolume.
*/
}
else
if
(
io
->
io_flags
&
XFS_IOCORE_RT
&&
aeof
)
{
int
eof
;
xfs_fileoff_t
new_last_fsb
;
new_last_fsb
=
roundup_64
(
last_fsb
,
mp
->
m_sb
.
sb_rextsize
);
error
=
XFS_BMAP_EOF
(
mp
,
io
,
new_last_fsb
,
XFS_DATA_FORK
,
&
eof
);
if
(
error
)
{
return
error
;
return
error
;
}
if
(
eof
)
last_fsb
=
new_last_fsb
;
}
}
nimaps
=
XFS_WRITE_IMAPS
;
firstblock
=
NULLFSBLOCK
;
error
=
xfs_bmapi
(
NULL
,
ip
,
offset_fsb
,
error
=
xfs_bmapi
(
NULL
,
ip
,
offset_fsb
,
(
xfs_filblks_t
)(
last_fsb
-
offset_fsb
),
(
xfs_filblks_t
)(
last_fsb
-
offset_fsb
),
XFS_BMAPI_DELAY
|
XFS_BMAPI_WRITE
|
XFS_BMAPI_DELAY
|
XFS_BMAPI_WRITE
|
XFS_BMAPI_ENTIRE
,
&
firstblock
,
1
,
imap
,
XFS_BMAPI_ENTIRE
,
&
firstblock
,
1
,
imap
,
&
nimaps
,
NULL
);
&
nimaps
,
NULL
);
/*
if
(
error
&&
(
error
!=
ENOSPC
))
* This can be EDQUOT, if nimaps == 0
*/
if
(
error
&&
(
error
!=
ENOSPC
))
{
return
XFS_ERROR
(
error
);
return
XFS_ERROR
(
error
);
}
/*
/*
* If bmapi returned us nothing, and if we didn't get back EDQUOT,
* If bmapi returned us nothing, and if we didn't get back EDQUOT,
* then we must have run out of space.
* then we must have run out of space
- flush delalloc, and retry.
.
*/
*/
if
(
nimaps
==
0
)
{
if
(
nimaps
==
0
)
{
xfs_iomap_enter_trace
(
XFS_IOMAP_WRITE_NOSPACE
,
xfs_iomap_enter_trace
(
XFS_IOMAP_WRITE_NOSPACE
,
...
@@ -684,17 +717,21 @@ xfs_iomap_write_delay(
...
@@ -684,17 +717,21 @@ xfs_iomap_write_delay(
goto
retry
;
goto
retry
;
}
}
*
ret_imap
=
imap
[
0
];
if
(
!
(
io
->
io_flags
&
XFS_IOCORE_RT
)
&&
!
ret_imap
->
br_startblock
)
{
*
nmaps
=
1
;
if
(
!
(
io
->
io_flags
&
XFS_IOCORE_RT
)
&&
!
ret_imap
->
br_startblock
)
{
cmn_err
(
CE_PANIC
,
"Access to block zero: fs <%s> inode: %lld "
cmn_err
(
CE_PANIC
,
"Access to block zero: fs <%s> inode: %lld "
"start_block : %llx start_off : %llx blkcnt : %llx "
"start_block : %llx start_off : %llx blkcnt : %llx "
"extent-state : %x
\n
"
,
"extent-state : %x
\n
"
,
(
ip
->
i_mount
)
->
m_fsname
,
(
ip
->
i_mount
)
->
m_fsname
,
(
long
long
)
ip
->
i_ino
,
(
long
long
)
ip
->
i_ino
,
ret_imap
->
br_startblock
,
ret_imap
->
br_startoff
,
(
unsigned
long
long
)
ret_imap
->
br_startblock
,
ret_imap
->
br_blockcount
,
ret_imap
->
br_state
);
(
unsigned
long
long
)
ret_imap
->
br_startoff
,
(
unsigned
long
long
)
ret_imap
->
br_blockcount
,
ret_imap
->
br_state
);
}
}
*
ret_imap
=
imap
[
0
];
*
nmaps
=
1
;
return
0
;
return
0
;
}
}
...
@@ -820,17 +857,21 @@ xfs_iomap_write_allocate(
...
@@ -820,17 +857,21 @@ xfs_iomap_write_allocate(
*/
*/
for
(
i
=
0
;
i
<
nimaps
;
i
++
)
{
for
(
i
=
0
;
i
<
nimaps
;
i
++
)
{
if
(
!
(
io
->
io_flags
&
XFS_IOCORE_RT
)
&&
if
(
!
(
io
->
io_flags
&
XFS_IOCORE_RT
)
&&
!
imap
[
i
].
br_startblock
)
{
!
imap
[
i
].
br_startblock
)
{
cmn_err
(
CE_PANIC
,
"Access to block zero: "
cmn_err
(
CE_PANIC
,
"Access to block zero: "
"fs <%s> inode: %lld "
"fs <%s> inode: %lld "
"start_block : %llx start_off : %llx "
"start_block : %llx start_off : %llx "
"blkcnt : %llx extent-state : %x
\n
"
,
"blkcnt : %llx extent-state : %x
\n
"
,
(
ip
->
i_mount
)
->
m_fsname
,
(
ip
->
i_mount
)
->
m_fsname
,
(
long
long
)
ip
->
i_ino
,
(
long
long
)
ip
->
i_ino
,
imap
[
i
].
br_startblock
,
(
unsigned
long
long
)
imap
[
i
].
br_startoff
,
imap
[
i
].
br_startblock
,
imap
[
i
].
br_blockcount
,
imap
[
i
].
br_state
);
(
unsigned
long
long
)
imap
[
i
].
br_startoff
,
(
unsigned
long
long
)
imap
[
i
].
br_blockcount
,
imap
[
i
].
br_state
);
}
}
if
((
offset_fsb
>=
imap
[
i
].
br_startoff
)
&&
if
((
offset_fsb
>=
imap
[
i
].
br_startoff
)
&&
(
offset_fsb
<
(
imap
[
i
].
br_startoff
+
(
offset_fsb
<
(
imap
[
i
].
br_startoff
+
...
@@ -867,17 +908,17 @@ xfs_iomap_write_unwritten(
...
@@ -867,17 +908,17 @@ xfs_iomap_write_unwritten(
{
{
xfs_mount_t
*
mp
=
ip
->
i_mount
;
xfs_mount_t
*
mp
=
ip
->
i_mount
;
xfs_iocore_t
*
io
=
&
ip
->
i_iocore
;
xfs_iocore_t
*
io
=
&
ip
->
i_iocore
;
xfs_trans_t
*
tp
;
xfs_fileoff_t
offset_fsb
;
xfs_fileoff_t
offset_fsb
;
xfs_filblks_t
count_fsb
;
xfs_filblks_t
count_fsb
;
xfs_filblks_t
numblks_fsb
;
xfs_filblks_t
numblks_fsb
;
xfs_bmbt_irec_t
imap
;
xfs_fsblock_t
firstfsb
;
int
nimaps
;
xfs_trans_t
*
tp
;
xfs_bmbt_irec_t
imap
;
xfs_bmap_free_t
free_list
;
uint
resblks
;
int
committed
;
int
committed
;
int
error
;
int
error
;
int
nres
;
int
nimaps
;
xfs_fsblock_t
firstfsb
;
xfs_bmap_free_t
free_list
;
xfs_iomap_enter_trace
(
XFS_IOMAP_UNWRITTEN
,
xfs_iomap_enter_trace
(
XFS_IOMAP_UNWRITTEN
,
&
ip
->
i_iocore
,
offset
,
count
);
&
ip
->
i_iocore
,
offset
,
count
);
...
@@ -886,9 +927,9 @@ xfs_iomap_write_unwritten(
...
@@ -886,9 +927,9 @@ xfs_iomap_write_unwritten(
count_fsb
=
XFS_B_TO_FSB
(
mp
,
(
xfs_ufsize_t
)
offset
+
count
);
count_fsb
=
XFS_B_TO_FSB
(
mp
,
(
xfs_ufsize_t
)
offset
+
count
);
count_fsb
=
(
xfs_filblks_t
)(
count_fsb
-
offset_fsb
);
count_fsb
=
(
xfs_filblks_t
)(
count_fsb
-
offset_fsb
);
do
{
resblks
=
XFS_DIOSTRAT_SPACE_RES
(
mp
,
0
)
<<
1
;
nres
=
XFS_DIOSTRAT_SPACE_RES
(
mp
,
0
);
do
{
/*
/*
* set up a transaction to convert the range of extents
* set up a transaction to convert the range of extents
* from unwritten to real. Do allocations in a loop until
* from unwritten to real. Do allocations in a loop until
...
@@ -896,7 +937,7 @@ xfs_iomap_write_unwritten(
...
@@ -896,7 +937,7 @@ xfs_iomap_write_unwritten(
*/
*/
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_STRAT_WRITE
);
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_STRAT_WRITE
);
error
=
xfs_trans_reserve
(
tp
,
nre
s
,
error
=
xfs_trans_reserve
(
tp
,
resblk
s
,
XFS_WRITE_LOG_RES
(
mp
),
0
,
XFS_WRITE_LOG_RES
(
mp
),
0
,
XFS_TRANS_PERM_LOG_RES
,
XFS_TRANS_PERM_LOG_RES
,
XFS_WRITE_LOG_COUNT
);
XFS_WRITE_LOG_COUNT
);
...
@@ -915,7 +956,7 @@ xfs_iomap_write_unwritten(
...
@@ -915,7 +956,7 @@ xfs_iomap_write_unwritten(
XFS_BMAP_INIT
(
&
free_list
,
&
firstfsb
);
XFS_BMAP_INIT
(
&
free_list
,
&
firstfsb
);
nimaps
=
1
;
nimaps
=
1
;
error
=
xfs_bmapi
(
tp
,
ip
,
offset_fsb
,
count_fsb
,
error
=
xfs_bmapi
(
tp
,
ip
,
offset_fsb
,
count_fsb
,
XFS_BMAPI_WRITE
,
&
firstfsb
,
XFS_BMAPI_WRITE
|
XFS_BMAPI_CONVERT
,
&
firstfsb
,
1
,
&
imap
,
&
nimaps
,
&
free_list
);
1
,
&
imap
,
&
nimaps
,
&
free_list
);
if
(
error
)
if
(
error
)
goto
error_on_bmapi_transaction
;
goto
error_on_bmapi_transaction
;
...
@@ -929,15 +970,17 @@ xfs_iomap_write_unwritten(
...
@@ -929,15 +970,17 @@ xfs_iomap_write_unwritten(
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
if
(
error
)
if
(
error
)
goto
error0
;
goto
error0
;
if
(
!
(
io
->
io_flags
&
XFS_IOCORE_RT
)
&&
!
imap
.
br_startblock
)
{
if
(
!
(
io
->
io_flags
&
XFS_IOCORE_RT
)
&&
!
imap
.
br_startblock
)
{
cmn_err
(
CE_PANIC
,
"Access to block zero: fs <%s> "
cmn_err
(
CE_PANIC
,
"Access to block zero: fs <%s> "
"inode: %lld start_block : %llx start_off : "
"inode: %lld start_block : %llx start_off : "
"%llx blkcnt : %llx extent-state : %x
\n
"
,
"%llx blkcnt : %llx extent-state : %x
\n
"
,
(
ip
->
i_mount
)
->
m_fsname
,
(
ip
->
i_mount
)
->
m_fsname
,
(
long
long
)
ip
->
i_ino
,
(
long
long
)
ip
->
i_ino
,
imap
.
br_startblock
,
imap
.
br_startoff
,
(
unsigned
long
long
)
imap
.
br_startblock
,
imap
.
br_blockcount
,
imap
.
br_state
);
(
unsigned
long
long
)
imap
.
br_startoff
,
(
unsigned
long
long
)
imap
.
br_blockcount
,
imap
.
br_state
);
}
}
if
((
numblks_fsb
=
imap
.
br_blockcount
)
==
0
)
{
if
((
numblks_fsb
=
imap
.
br_blockcount
)
==
0
)
{
...
...
fs/xfs/xfs_itable.c
浏览文件 @
9f5974c8
...
@@ -56,6 +56,7 @@ xfs_bulkstat_one_iget(
...
@@ -56,6 +56,7 @@ xfs_bulkstat_one_iget(
{
{
xfs_dinode_core_t
*
dic
;
/* dinode core info pointer */
xfs_dinode_core_t
*
dic
;
/* dinode core info pointer */
xfs_inode_t
*
ip
;
/* incore inode pointer */
xfs_inode_t
*
ip
;
/* incore inode pointer */
vnode_t
*
vp
;
int
error
;
int
error
;
error
=
xfs_iget
(
mp
,
NULL
,
ino
,
0
,
XFS_ILOCK_SHARED
,
&
ip
,
bno
);
error
=
xfs_iget
(
mp
,
NULL
,
ino
,
0
,
XFS_ILOCK_SHARED
,
&
ip
,
bno
);
...
@@ -72,6 +73,7 @@ xfs_bulkstat_one_iget(
...
@@ -72,6 +73,7 @@ xfs_bulkstat_one_iget(
goto
out_iput
;
goto
out_iput
;
}
}
vp
=
XFS_ITOV
(
ip
);
dic
=
&
ip
->
i_d
;
dic
=
&
ip
->
i_d
;
/* xfs_iget returns the following without needing
/* xfs_iget returns the following without needing
...
@@ -84,8 +86,7 @@ xfs_bulkstat_one_iget(
...
@@ -84,8 +86,7 @@ xfs_bulkstat_one_iget(
buf
->
bs_uid
=
dic
->
di_uid
;
buf
->
bs_uid
=
dic
->
di_uid
;
buf
->
bs_gid
=
dic
->
di_gid
;
buf
->
bs_gid
=
dic
->
di_gid
;
buf
->
bs_size
=
dic
->
di_size
;
buf
->
bs_size
=
dic
->
di_size
;
buf
->
bs_atime
.
tv_sec
=
dic
->
di_atime
.
t_sec
;
vn_atime_to_bstime
(
vp
,
&
buf
->
bs_atime
);
buf
->
bs_atime
.
tv_nsec
=
dic
->
di_atime
.
t_nsec
;
buf
->
bs_mtime
.
tv_sec
=
dic
->
di_mtime
.
t_sec
;
buf
->
bs_mtime
.
tv_sec
=
dic
->
di_mtime
.
t_sec
;
buf
->
bs_mtime
.
tv_nsec
=
dic
->
di_mtime
.
t_nsec
;
buf
->
bs_mtime
.
tv_nsec
=
dic
->
di_mtime
.
t_nsec
;
buf
->
bs_ctime
.
tv_sec
=
dic
->
di_ctime
.
t_sec
;
buf
->
bs_ctime
.
tv_sec
=
dic
->
di_ctime
.
t_sec
;
...
...
fs/xfs/xfs_log.c
浏览文件 @
9f5974c8
...
@@ -178,6 +178,83 @@ xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
...
@@ -178,6 +178,83 @@ xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
#define xlog_trace_iclog(iclog,state)
#define xlog_trace_iclog(iclog,state)
#endif
/* XFS_LOG_TRACE */
#endif
/* XFS_LOG_TRACE */
static
void
xlog_ins_ticketq
(
struct
xlog_ticket
**
qp
,
struct
xlog_ticket
*
tic
)
{
if
(
*
qp
)
{
tic
->
t_next
=
(
*
qp
);
tic
->
t_prev
=
(
*
qp
)
->
t_prev
;
(
*
qp
)
->
t_prev
->
t_next
=
tic
;
(
*
qp
)
->
t_prev
=
tic
;
}
else
{
tic
->
t_prev
=
tic
->
t_next
=
tic
;
*
qp
=
tic
;
}
tic
->
t_flags
|=
XLOG_TIC_IN_Q
;
}
static
void
xlog_del_ticketq
(
struct
xlog_ticket
**
qp
,
struct
xlog_ticket
*
tic
)
{
if
(
tic
==
tic
->
t_next
)
{
*
qp
=
NULL
;
}
else
{
*
qp
=
tic
->
t_next
;
tic
->
t_next
->
t_prev
=
tic
->
t_prev
;
tic
->
t_prev
->
t_next
=
tic
->
t_next
;
}
tic
->
t_next
=
tic
->
t_prev
=
NULL
;
tic
->
t_flags
&=
~
XLOG_TIC_IN_Q
;
}
static
void
xlog_grant_sub_space
(
struct
log
*
log
,
int
bytes
)
{
log
->
l_grant_write_bytes
-=
bytes
;
if
(
log
->
l_grant_write_bytes
<
0
)
{
log
->
l_grant_write_bytes
+=
log
->
l_logsize
;
log
->
l_grant_write_cycle
--
;
}
log
->
l_grant_reserve_bytes
-=
bytes
;
if
((
log
)
->
l_grant_reserve_bytes
<
0
)
{
log
->
l_grant_reserve_bytes
+=
log
->
l_logsize
;
log
->
l_grant_reserve_cycle
--
;
}
}
static
void
xlog_grant_add_space_write
(
struct
log
*
log
,
int
bytes
)
{
log
->
l_grant_write_bytes
+=
bytes
;
if
(
log
->
l_grant_write_bytes
>
log
->
l_logsize
)
{
log
->
l_grant_write_bytes
-=
log
->
l_logsize
;
log
->
l_grant_write_cycle
++
;
}
}
static
void
xlog_grant_add_space_reserve
(
struct
log
*
log
,
int
bytes
)
{
log
->
l_grant_reserve_bytes
+=
bytes
;
if
(
log
->
l_grant_reserve_bytes
>
log
->
l_logsize
)
{
log
->
l_grant_reserve_bytes
-=
log
->
l_logsize
;
log
->
l_grant_reserve_cycle
++
;
}
}
static
inline
void
xlog_grant_add_space
(
struct
log
*
log
,
int
bytes
)
{
xlog_grant_add_space_write
(
log
,
bytes
);
xlog_grant_add_space_reserve
(
log
,
bytes
);
}
/*
/*
* NOTES:
* NOTES:
*
*
...
@@ -428,7 +505,7 @@ xfs_log_mount(xfs_mount_t *mp,
...
@@ -428,7 +505,7 @@ xfs_log_mount(xfs_mount_t *mp,
if
(
readonly
)
if
(
readonly
)
vfsp
->
vfs_flag
&=
~
VFS_RDONLY
;
vfsp
->
vfs_flag
&=
~
VFS_RDONLY
;
error
=
xlog_recover
(
mp
->
m_log
,
readonly
);
error
=
xlog_recover
(
mp
->
m_log
);
if
(
readonly
)
if
(
readonly
)
vfsp
->
vfs_flag
|=
VFS_RDONLY
;
vfsp
->
vfs_flag
|=
VFS_RDONLY
;
...
@@ -1320,8 +1397,7 @@ xlog_sync(xlog_t *log,
...
@@ -1320,8 +1397,7 @@ xlog_sync(xlog_t *log,
/* move grant heads by roundoff in sync */
/* move grant heads by roundoff in sync */
s
=
GRANT_LOCK
(
log
);
s
=
GRANT_LOCK
(
log
);
XLOG_GRANT_ADD_SPACE
(
log
,
roundoff
,
'w'
);
xlog_grant_add_space
(
log
,
roundoff
);
XLOG_GRANT_ADD_SPACE
(
log
,
roundoff
,
'r'
);
GRANT_UNLOCK
(
log
,
s
);
GRANT_UNLOCK
(
log
,
s
);
/* put cycle number in every block */
/* put cycle number in every block */
...
@@ -1515,7 +1591,6 @@ xlog_state_finish_copy(xlog_t *log,
...
@@ -1515,7 +1591,6 @@ xlog_state_finish_copy(xlog_t *log,
* print out info relating to regions written which consume
* print out info relating to regions written which consume
* the reservation
* the reservation
*/
*/
#if defined(XFS_LOG_RES_DEBUG)
STATIC
void
STATIC
void
xlog_print_tic_res
(
xfs_mount_t
*
mp
,
xlog_ticket_t
*
ticket
)
xlog_print_tic_res
(
xfs_mount_t
*
mp
,
xlog_ticket_t
*
ticket
)
{
{
...
@@ -1605,11 +1680,11 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
...
@@ -1605,11 +1680,11 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
ticket
->
t_res_arr_sum
,
ticket
->
t_res_o_flow
,
ticket
->
t_res_arr_sum
,
ticket
->
t_res_o_flow
,
ticket
->
t_res_num_ophdrs
,
ophdr_spc
,
ticket
->
t_res_num_ophdrs
,
ophdr_spc
,
ticket
->
t_res_arr_sum
+
ticket
->
t_res_arr_sum
+
ticket
->
t_res_o_flow
+
ophdr_spc
,
ticket
->
t_res_o_flow
+
ophdr_spc
,
ticket
->
t_res_num
);
ticket
->
t_res_num
);
for
(
i
=
0
;
i
<
ticket
->
t_res_num
;
i
++
)
{
for
(
i
=
0
;
i
<
ticket
->
t_res_num
;
i
++
)
{
uint
r_type
=
ticket
->
t_res_arr
[
i
].
r_type
;
uint
r_type
=
ticket
->
t_res_arr
[
i
].
r_type
;
cmn_err
(
CE_WARN
,
cmn_err
(
CE_WARN
,
"region[%u]: %s - %u bytes
\n
"
,
"region[%u]: %s - %u bytes
\n
"
,
i
,
i
,
...
@@ -1618,9 +1693,6 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
...
@@ -1618,9 +1693,6 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
ticket
->
t_res_arr
[
i
].
r_len
);
ticket
->
t_res_arr
[
i
].
r_len
);
}
}
}
}
#else
#define xlog_print_tic_res(mp, ticket)
#endif
/*
/*
* Write some region out to in-core log
* Write some region out to in-core log
...
@@ -2389,7 +2461,7 @@ xlog_grant_log_space(xlog_t *log,
...
@@ -2389,7 +2461,7 @@ xlog_grant_log_space(xlog_t *log,
/* something is already sleeping; insert new transaction at end */
/* something is already sleeping; insert new transaction at end */
if
(
log
->
l_reserve_headq
)
{
if
(
log
->
l_reserve_headq
)
{
XLOG_INS_TICKETQ
(
log
->
l_reserve_headq
,
tic
);
xlog_ins_ticketq
(
&
log
->
l_reserve_headq
,
tic
);
xlog_trace_loggrant
(
log
,
tic
,
xlog_trace_loggrant
(
log
,
tic
,
"xlog_grant_log_space: sleep 1"
);
"xlog_grant_log_space: sleep 1"
);
/*
/*
...
@@ -2422,7 +2494,7 @@ xlog_grant_log_space(xlog_t *log,
...
@@ -2422,7 +2494,7 @@ xlog_grant_log_space(xlog_t *log,
log
->
l_grant_reserve_bytes
);
log
->
l_grant_reserve_bytes
);
if
(
free_bytes
<
need_bytes
)
{
if
(
free_bytes
<
need_bytes
)
{
if
((
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
==
0
)
if
((
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
==
0
)
XLOG_INS_TICKETQ
(
log
->
l_reserve_headq
,
tic
);
xlog_ins_ticketq
(
&
log
->
l_reserve_headq
,
tic
);
xlog_trace_loggrant
(
log
,
tic
,
xlog_trace_loggrant
(
log
,
tic
,
"xlog_grant_log_space: sleep 2"
);
"xlog_grant_log_space: sleep 2"
);
XFS_STATS_INC
(
xs_sleep_logspace
);
XFS_STATS_INC
(
xs_sleep_logspace
);
...
@@ -2439,11 +2511,10 @@ xlog_grant_log_space(xlog_t *log,
...
@@ -2439,11 +2511,10 @@ xlog_grant_log_space(xlog_t *log,
s
=
GRANT_LOCK
(
log
);
s
=
GRANT_LOCK
(
log
);
goto
redo
;
goto
redo
;
}
else
if
(
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
}
else
if
(
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
XLOG_DEL_TICKETQ
(
log
->
l_reserve_headq
,
tic
);
xlog_del_ticketq
(
&
log
->
l_reserve_headq
,
tic
);
/* we've got enough space */
/* we've got enough space */
XLOG_GRANT_ADD_SPACE
(
log
,
need_bytes
,
'w'
);
xlog_grant_add_space
(
log
,
need_bytes
);
XLOG_GRANT_ADD_SPACE
(
log
,
need_bytes
,
'r'
);
#ifdef DEBUG
#ifdef DEBUG
tail_lsn
=
log
->
l_tail_lsn
;
tail_lsn
=
log
->
l_tail_lsn
;
/*
/*
...
@@ -2464,7 +2535,7 @@ xlog_grant_log_space(xlog_t *log,
...
@@ -2464,7 +2535,7 @@ xlog_grant_log_space(xlog_t *log,
error_return:
error_return:
if
(
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
if
(
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
XLOG_DEL_TICKETQ
(
log
->
l_reserve_headq
,
tic
);
xlog_del_ticketq
(
&
log
->
l_reserve_headq
,
tic
);
xlog_trace_loggrant
(
log
,
tic
,
"xlog_grant_log_space: err_ret"
);
xlog_trace_loggrant
(
log
,
tic
,
"xlog_grant_log_space: err_ret"
);
/*
/*
* If we are failing, make sure the ticket doesn't have any
* If we are failing, make sure the ticket doesn't have any
...
@@ -2533,7 +2604,7 @@ xlog_regrant_write_log_space(xlog_t *log,
...
@@ -2533,7 +2604,7 @@ xlog_regrant_write_log_space(xlog_t *log,
if
(
ntic
!=
log
->
l_write_headq
)
{
if
(
ntic
!=
log
->
l_write_headq
)
{
if
((
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
==
0
)
if
((
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
==
0
)
XLOG_INS_TICKETQ
(
log
->
l_write_headq
,
tic
);
xlog_ins_ticketq
(
&
log
->
l_write_headq
,
tic
);
xlog_trace_loggrant
(
log
,
tic
,
xlog_trace_loggrant
(
log
,
tic
,
"xlog_regrant_write_log_space: sleep 1"
);
"xlog_regrant_write_log_space: sleep 1"
);
...
@@ -2565,7 +2636,7 @@ xlog_regrant_write_log_space(xlog_t *log,
...
@@ -2565,7 +2636,7 @@ xlog_regrant_write_log_space(xlog_t *log,
log
->
l_grant_write_bytes
);
log
->
l_grant_write_bytes
);
if
(
free_bytes
<
need_bytes
)
{
if
(
free_bytes
<
need_bytes
)
{
if
((
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
==
0
)
if
((
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
==
0
)
XLOG_INS_TICKETQ
(
log
->
l_write_headq
,
tic
);
xlog_ins_ticketq
(
&
log
->
l_write_headq
,
tic
);
XFS_STATS_INC
(
xs_sleep_logspace
);
XFS_STATS_INC
(
xs_sleep_logspace
);
sv_wait
(
&
tic
->
t_sema
,
PINOD
|
PLTWAIT
,
&
log
->
l_grant_lock
,
s
);
sv_wait
(
&
tic
->
t_sema
,
PINOD
|
PLTWAIT
,
&
log
->
l_grant_lock
,
s
);
...
@@ -2581,9 +2652,10 @@ xlog_regrant_write_log_space(xlog_t *log,
...
@@ -2581,9 +2652,10 @@ xlog_regrant_write_log_space(xlog_t *log,
s
=
GRANT_LOCK
(
log
);
s
=
GRANT_LOCK
(
log
);
goto
redo
;
goto
redo
;
}
else
if
(
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
}
else
if
(
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
XLOG_DEL_TICKETQ
(
log
->
l_write_headq
,
tic
);
xlog_del_ticketq
(
&
log
->
l_write_headq
,
tic
);
XLOG_GRANT_ADD_SPACE
(
log
,
need_bytes
,
'w'
);
/* we've got enough space */
/* we've got enough space */
xlog_grant_add_space_write
(
log
,
need_bytes
);
#ifdef DEBUG
#ifdef DEBUG
tail_lsn
=
log
->
l_tail_lsn
;
tail_lsn
=
log
->
l_tail_lsn
;
if
(
CYCLE_LSN
(
tail_lsn
)
!=
log
->
l_grant_write_cycle
)
{
if
(
CYCLE_LSN
(
tail_lsn
)
!=
log
->
l_grant_write_cycle
)
{
...
@@ -2600,7 +2672,7 @@ xlog_regrant_write_log_space(xlog_t *log,
...
@@ -2600,7 +2672,7 @@ xlog_regrant_write_log_space(xlog_t *log,
error_return:
error_return:
if
(
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
if
(
tic
->
t_flags
&
XLOG_TIC_IN_Q
)
XLOG_DEL_TICKETQ
(
log
->
l_reserve_headq
,
tic
);
xlog_del_ticketq
(
&
log
->
l_reserve_headq
,
tic
);
xlog_trace_loggrant
(
log
,
tic
,
"xlog_regrant_write_log_space: err_ret"
);
xlog_trace_loggrant
(
log
,
tic
,
"xlog_regrant_write_log_space: err_ret"
);
/*
/*
* If we are failing, make sure the ticket doesn't have any
* If we are failing, make sure the ticket doesn't have any
...
@@ -2633,8 +2705,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
...
@@ -2633,8 +2705,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
ticket
->
t_cnt
--
;
ticket
->
t_cnt
--
;
s
=
GRANT_LOCK
(
log
);
s
=
GRANT_LOCK
(
log
);
XLOG_GRANT_SUB_SPACE
(
log
,
ticket
->
t_curr_res
,
'w'
);
xlog_grant_sub_space
(
log
,
ticket
->
t_curr_res
);
XLOG_GRANT_SUB_SPACE
(
log
,
ticket
->
t_curr_res
,
'r'
);
ticket
->
t_curr_res
=
ticket
->
t_unit_res
;
ticket
->
t_curr_res
=
ticket
->
t_unit_res
;
XLOG_TIC_RESET_RES
(
ticket
);
XLOG_TIC_RESET_RES
(
ticket
);
xlog_trace_loggrant
(
log
,
ticket
,
xlog_trace_loggrant
(
log
,
ticket
,
...
@@ -2647,7 +2718,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
...
@@ -2647,7 +2718,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
return
;
return
;
}
}
XLOG_GRANT_ADD_SPACE
(
log
,
ticket
->
t_unit_res
,
'r'
);
xlog_grant_add_space_reserve
(
log
,
ticket
->
t_unit_res
);
xlog_trace_loggrant
(
log
,
ticket
,
xlog_trace_loggrant
(
log
,
ticket
,
"xlog_regrant_reserve_log_space: exit"
);
"xlog_regrant_reserve_log_space: exit"
);
xlog_verify_grant_head
(
log
,
0
);
xlog_verify_grant_head
(
log
,
0
);
...
@@ -2683,8 +2754,7 @@ xlog_ungrant_log_space(xlog_t *log,
...
@@ -2683,8 +2754,7 @@ xlog_ungrant_log_space(xlog_t *log,
s
=
GRANT_LOCK
(
log
);
s
=
GRANT_LOCK
(
log
);
xlog_trace_loggrant
(
log
,
ticket
,
"xlog_ungrant_log_space: enter"
);
xlog_trace_loggrant
(
log
,
ticket
,
"xlog_ungrant_log_space: enter"
);
XLOG_GRANT_SUB_SPACE
(
log
,
ticket
->
t_curr_res
,
'w'
);
xlog_grant_sub_space
(
log
,
ticket
->
t_curr_res
);
XLOG_GRANT_SUB_SPACE
(
log
,
ticket
->
t_curr_res
,
'r'
);
xlog_trace_loggrant
(
log
,
ticket
,
"xlog_ungrant_log_space: sub current"
);
xlog_trace_loggrant
(
log
,
ticket
,
"xlog_ungrant_log_space: sub current"
);
...
@@ -2693,8 +2763,7 @@ xlog_ungrant_log_space(xlog_t *log,
...
@@ -2693,8 +2763,7 @@ xlog_ungrant_log_space(xlog_t *log,
*/
*/
if
(
ticket
->
t_cnt
>
0
)
{
if
(
ticket
->
t_cnt
>
0
)
{
ASSERT
(
ticket
->
t_flags
&
XLOG_TIC_PERM_RESERV
);
ASSERT
(
ticket
->
t_flags
&
XLOG_TIC_PERM_RESERV
);
XLOG_GRANT_SUB_SPACE
(
log
,
ticket
->
t_unit_res
*
ticket
->
t_cnt
,
'w'
);
xlog_grant_sub_space
(
log
,
ticket
->
t_unit_res
*
ticket
->
t_cnt
);
XLOG_GRANT_SUB_SPACE
(
log
,
ticket
->
t_unit_res
*
ticket
->
t_cnt
,
'r'
);
}
}
xlog_trace_loggrant
(
log
,
ticket
,
"xlog_ungrant_log_space: exit"
);
xlog_trace_loggrant
(
log
,
ticket
,
"xlog_ungrant_log_space: exit"
);
...
...
fs/xfs/xfs_log.h
浏览文件 @
9f5974c8
...
@@ -96,7 +96,6 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
...
@@ -96,7 +96,6 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
/* Region types for iovec's i_type */
/* Region types for iovec's i_type */
#if defined(XFS_LOG_RES_DEBUG)
#define XLOG_REG_TYPE_BFORMAT 1
#define XLOG_REG_TYPE_BFORMAT 1
#define XLOG_REG_TYPE_BCHUNK 2
#define XLOG_REG_TYPE_BCHUNK 2
#define XLOG_REG_TYPE_EFI_FORMAT 3
#define XLOG_REG_TYPE_EFI_FORMAT 3
...
@@ -117,21 +116,13 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
...
@@ -117,21 +116,13 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
#define XLOG_REG_TYPE_COMMIT 18
#define XLOG_REG_TYPE_COMMIT 18
#define XLOG_REG_TYPE_TRANSHDR 19
#define XLOG_REG_TYPE_TRANSHDR 19
#define XLOG_REG_TYPE_MAX 19
#define XLOG_REG_TYPE_MAX 19
#endif
#if defined(XFS_LOG_RES_DEBUG)
#define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t))
#define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t))
#else
#define XLOG_VEC_SET_TYPE(vecp, t)
#endif
typedef
struct
xfs_log_iovec
{
typedef
struct
xfs_log_iovec
{
xfs_caddr_t
i_addr
;
/* beginning address of region */
xfs_caddr_t
i_addr
;
/* beginning address of region */
int
i_len
;
/* length in bytes of region */
int
i_len
;
/* length in bytes of region */
#if defined(XFS_LOG_RES_DEBUG)
uint
i_type
;
/* type of region */
uint
i_type
;
/* type of region */
#endif
}
xfs_log_iovec_t
;
}
xfs_log_iovec_t
;
typedef
void
*
xfs_log_ticket_t
;
typedef
void
*
xfs_log_ticket_t
;
...
...
fs/xfs/xfs_log_priv.h
浏览文件 @
9f5974c8
...
@@ -253,7 +253,6 @@ typedef __uint32_t xlog_tid_t;
...
@@ -253,7 +253,6 @@ typedef __uint32_t xlog_tid_t;
/* Ticket reservation region accounting */
/* Ticket reservation region accounting */
#if defined(XFS_LOG_RES_DEBUG)
#define XLOG_TIC_LEN_MAX 15
#define XLOG_TIC_LEN_MAX 15
#define XLOG_TIC_RESET_RES(t) ((t)->t_res_num = \
#define XLOG_TIC_RESET_RES(t) ((t)->t_res_num = \
(t)->t_res_arr_sum = (t)->t_res_num_ophdrs = 0)
(t)->t_res_arr_sum = (t)->t_res_num_ophdrs = 0)
...
@@ -278,15 +277,9 @@ typedef __uint32_t xlog_tid_t;
...
@@ -278,15 +277,9 @@ typedef __uint32_t xlog_tid_t;
* we don't care about.
* we don't care about.
*/
*/
typedef
struct
xlog_res
{
typedef
struct
xlog_res
{
uint
r_len
;
uint
r_len
;
/* region length :4 */
uint
r_type
;
uint
r_type
;
/* region's transaction type :4 */
}
xlog_res_t
;
}
xlog_res_t
;
#else
#define XLOG_TIC_RESET_RES(t)
#define XLOG_TIC_ADD_OPHDR(t)
#define XLOG_TIC_ADD_REGION(t, len, type)
#endif
typedef
struct
xlog_ticket
{
typedef
struct
xlog_ticket
{
sv_t
t_sema
;
/* sleep on this semaphore : 20 */
sv_t
t_sema
;
/* sleep on this semaphore : 20 */
...
@@ -301,14 +294,12 @@ typedef struct xlog_ticket {
...
@@ -301,14 +294,12 @@ typedef struct xlog_ticket {
char
t_flags
;
/* properties of reservation : 1 */
char
t_flags
;
/* properties of reservation : 1 */
uint
t_trans_type
;
/* transaction type : 4 */
uint
t_trans_type
;
/* transaction type : 4 */
#if defined (XFS_LOG_RES_DEBUG)
/* reservation array fields */
/* reservation array fields */
uint
t_res_num
;
/* num in array : 4 */
uint
t_res_num
;
/* num in array : 4 */
xlog_res_t
t_res_arr
[
XLOG_TIC_LEN_MAX
];
/* array of res : X */
uint
t_res_num_ophdrs
;
/* num op hdrs : 4 */
uint
t_res_num_ophdrs
;
/* num op hdrs : 4 */
uint
t_res_arr_sum
;
/* array sum : 4 */
uint
t_res_arr_sum
;
/* array sum : 4 */
uint
t_res_o_flow
;
/* sum overflow : 4 */
uint
t_res_o_flow
;
/* sum overflow : 4 */
#endif
xlog_res_t
t_res_arr
[
XLOG_TIC_LEN_MAX
];
/* array of res : 8 * 15 */
}
xlog_ticket_t
;
}
xlog_ticket_t
;
#endif
#endif
...
@@ -494,71 +485,13 @@ typedef struct log {
...
@@ -494,71 +485,13 @@ typedef struct log {
#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
#define XLOG_GRANT_SUB_SPACE(log,bytes,type) \
{ \
if (type == 'w') { \
(log)->l_grant_write_bytes -= (bytes); \
if ((log)->l_grant_write_bytes < 0) { \
(log)->l_grant_write_bytes += (log)->l_logsize; \
(log)->l_grant_write_cycle--; \
} \
} else { \
(log)->l_grant_reserve_bytes -= (bytes); \
if ((log)->l_grant_reserve_bytes < 0) { \
(log)->l_grant_reserve_bytes += (log)->l_logsize;\
(log)->l_grant_reserve_cycle--; \
} \
} \
}
#define XLOG_GRANT_ADD_SPACE(log,bytes,type) \
{ \
if (type == 'w') { \
(log)->l_grant_write_bytes += (bytes); \
if ((log)->l_grant_write_bytes > (log)->l_logsize) { \
(log)->l_grant_write_bytes -= (log)->l_logsize; \
(log)->l_grant_write_cycle++; \
} \
} else { \
(log)->l_grant_reserve_bytes += (bytes); \
if ((log)->l_grant_reserve_bytes > (log)->l_logsize) { \
(log)->l_grant_reserve_bytes -= (log)->l_logsize;\
(log)->l_grant_reserve_cycle++; \
} \
} \
}
#define XLOG_INS_TICKETQ(q, tic) \
{ \
if (q) { \
(tic)->t_next = (q); \
(tic)->t_prev = (q)->t_prev; \
(q)->t_prev->t_next = (tic); \
(q)->t_prev = (tic); \
} else { \
(tic)->t_prev = (tic)->t_next = (tic); \
(q) = (tic); \
} \
(tic)->t_flags |= XLOG_TIC_IN_Q; \
}
#define XLOG_DEL_TICKETQ(q, tic) \
{ \
if ((tic) == (tic)->t_next) { \
(q) = NULL; \
} else { \
(q) = (tic)->t_next; \
(tic)->t_next->t_prev = (tic)->t_prev; \
(tic)->t_prev->t_next = (tic)->t_next; \
} \
(tic)->t_next = (tic)->t_prev = NULL; \
(tic)->t_flags &= ~XLOG_TIC_IN_Q; \
}
/* common routines */
/* common routines */
extern
xfs_lsn_t
xlog_assign_tail_lsn
(
struct
xfs_mount
*
mp
);
extern
xfs_lsn_t
xlog_assign_tail_lsn
(
struct
xfs_mount
*
mp
);
extern
int
xlog_find_tail
(
xlog_t
*
log
,
extern
int
xlog_find_tail
(
xlog_t
*
log
,
xfs_daddr_t
*
head_blk
,
xfs_daddr_t
*
head_blk
,
xfs_daddr_t
*
tail_blk
,
xfs_daddr_t
*
tail_blk
);
int
readonly
);
extern
int
xlog_recover
(
xlog_t
*
log
);
extern
int
xlog_recover
(
xlog_t
*
log
,
int
readonly
);
extern
int
xlog_recover_finish
(
xlog_t
*
log
,
int
mfsi_flags
);
extern
int
xlog_recover_finish
(
xlog_t
*
log
,
int
mfsi_flags
);
extern
void
xlog_pack_data
(
xlog_t
*
log
,
xlog_in_core_t
*
iclog
,
int
);
extern
void
xlog_pack_data
(
xlog_t
*
log
,
xlog_in_core_t
*
iclog
,
int
);
extern
void
xlog_recover_process_iunlinks
(
xlog_t
*
log
);
extern
void
xlog_recover_process_iunlinks
(
xlog_t
*
log
);
...
...
fs/xfs/xfs_log_recover.c
浏览文件 @
9f5974c8
...
@@ -783,8 +783,7 @@ int
...
@@ -783,8 +783,7 @@ int
xlog_find_tail
(
xlog_find_tail
(
xlog_t
*
log
,
xlog_t
*
log
,
xfs_daddr_t
*
head_blk
,
xfs_daddr_t
*
head_blk
,
xfs_daddr_t
*
tail_blk
,
xfs_daddr_t
*
tail_blk
)
int
readonly
)
{
{
xlog_rec_header_t
*
rhead
;
xlog_rec_header_t
*
rhead
;
xlog_op_header_t
*
op_head
;
xlog_op_header_t
*
op_head
;
...
@@ -2563,10 +2562,12 @@ xlog_recover_do_quotaoff_trans(
...
@@ -2563,10 +2562,12 @@ xlog_recover_do_quotaoff_trans(
/*
/*
* The logitem format's flag tells us if this was user quotaoff,
* The logitem format's flag tells us if this was user quotaoff,
* group quotaoff or both.
* group
/project
quotaoff or both.
*/
*/
if
(
qoff_f
->
qf_flags
&
XFS_UQUOTA_ACCT
)
if
(
qoff_f
->
qf_flags
&
XFS_UQUOTA_ACCT
)
log
->
l_quotaoffs_flag
|=
XFS_DQ_USER
;
log
->
l_quotaoffs_flag
|=
XFS_DQ_USER
;
if
(
qoff_f
->
qf_flags
&
XFS_PQUOTA_ACCT
)
log
->
l_quotaoffs_flag
|=
XFS_DQ_PROJ
;
if
(
qoff_f
->
qf_flags
&
XFS_GQUOTA_ACCT
)
if
(
qoff_f
->
qf_flags
&
XFS_GQUOTA_ACCT
)
log
->
l_quotaoffs_flag
|=
XFS_DQ_GROUP
;
log
->
l_quotaoffs_flag
|=
XFS_DQ_GROUP
;
...
@@ -3890,14 +3891,13 @@ xlog_do_recover(
...
@@ -3890,14 +3891,13 @@ xlog_do_recover(
*/
*/
int
int
xlog_recover
(
xlog_recover
(
xlog_t
*
log
,
xlog_t
*
log
)
int
readonly
)
{
{
xfs_daddr_t
head_blk
,
tail_blk
;
xfs_daddr_t
head_blk
,
tail_blk
;
int
error
;
int
error
;
/* find the tail of the log */
/* find the tail of the log */
if
((
error
=
xlog_find_tail
(
log
,
&
head_blk
,
&
tail_blk
,
readonly
)))
if
((
error
=
xlog_find_tail
(
log
,
&
head_blk
,
&
tail_blk
)))
return
error
;
return
error
;
if
(
tail_blk
!=
head_blk
)
{
if
(
tail_blk
!=
head_blk
)
{
...
...
fs/xfs/xfs_mount.c
浏览文件 @
9f5974c8
...
@@ -51,7 +51,7 @@ STATIC int xfs_uuid_mount(xfs_mount_t *);
...
@@ -51,7 +51,7 @@ STATIC int xfs_uuid_mount(xfs_mount_t *);
STATIC
void
xfs_uuid_unmount
(
xfs_mount_t
*
mp
);
STATIC
void
xfs_uuid_unmount
(
xfs_mount_t
*
mp
);
STATIC
void
xfs_unmountfs_wait
(
xfs_mount_t
*
);
STATIC
void
xfs_unmountfs_wait
(
xfs_mount_t
*
);
static
struct
{
static
const
struct
{
short
offset
;
short
offset
;
short
type
;
/* 0 = integer
short
type
;
/* 0 = integer
* 1 = binary / string (no translation)
* 1 = binary / string (no translation)
...
@@ -1077,8 +1077,7 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
...
@@ -1077,8 +1077,7 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
xfs_iflush_all
(
mp
);
xfs_iflush_all
(
mp
);
XFS_QM_DQPURGEALL
(
mp
,
XFS_QM_DQPURGEALL
(
mp
,
XFS_QMOPT_QUOTALL
|
XFS_QMOPT_UMOUNTING
);
XFS_QMOPT_UQUOTA
|
XFS_QMOPT_GQUOTA
|
XFS_QMOPT_UMOUNTING
);
/*
/*
* Flush out the log synchronously so that we know for sure
* Flush out the log synchronously so that we know for sure
...
...
fs/xfs/xfs_mount.h
浏览文件 @
9f5974c8
...
@@ -308,7 +308,6 @@ typedef struct xfs_mount {
...
@@ -308,7 +308,6 @@ typedef struct xfs_mount {
xfs_buftarg_t
*
m_ddev_targp
;
/* saves taking the address */
xfs_buftarg_t
*
m_ddev_targp
;
/* saves taking the address */
xfs_buftarg_t
*
m_logdev_targp
;
/* ptr to log device */
xfs_buftarg_t
*
m_logdev_targp
;
/* ptr to log device */
xfs_buftarg_t
*
m_rtdev_targp
;
/* ptr to rt device */
xfs_buftarg_t
*
m_rtdev_targp
;
/* ptr to rt device */
#define m_dev m_ddev_targp->pbr_dev
__uint8_t
m_dircook_elog
;
/* log d-cookie entry bits */
__uint8_t
m_dircook_elog
;
/* log d-cookie entry bits */
__uint8_t
m_blkbit_log
;
/* blocklog + NBBY */
__uint8_t
m_blkbit_log
;
/* blocklog + NBBY */
__uint8_t
m_blkbb_log
;
/* blocklog - BBSHIFT */
__uint8_t
m_blkbb_log
;
/* blocklog - BBSHIFT */
...
@@ -393,7 +392,7 @@ typedef struct xfs_mount {
...
@@ -393,7 +392,7 @@ typedef struct xfs_mount {
user */
user */
#define XFS_MOUNT_NOALIGN (1ULL << 7)
/* turn off stripe alignment
#define XFS_MOUNT_NOALIGN (1ULL << 7)
/* turn off stripe alignment
allocations */
allocations */
#define XFS_MOUNT_
COMPAT_ATTR (1ULL << 8)
/* do not use
attr2 format */
#define XFS_MOUNT_
ATTR2 (1ULL << 8)
/* allow use of
attr2 format */
/* (1ULL << 9) -- currently unused */
/* (1ULL << 9) -- currently unused */
#define XFS_MOUNT_NORECOVERY (1ULL << 10)
/* no recovery - dirty fs */
#define XFS_MOUNT_NORECOVERY (1ULL << 10)
/* no recovery - dirty fs */
#define XFS_MOUNT_SHARED (1ULL << 11)
/* shared mount */
#define XFS_MOUNT_SHARED (1ULL << 11)
/* shared mount */
...
...
fs/xfs/xfs_rename.c
浏览文件 @
9f5974c8
...
@@ -243,7 +243,6 @@ xfs_rename(
...
@@ -243,7 +243,6 @@ xfs_rename(
xfs_inode_t
*
inodes
[
4
];
xfs_inode_t
*
inodes
[
4
];
int
target_ip_dropped
=
0
;
/* dropped target_ip link? */
int
target_ip_dropped
=
0
;
/* dropped target_ip link? */
vnode_t
*
src_dir_vp
;
vnode_t
*
src_dir_vp
;
bhv_desc_t
*
target_dir_bdp
;
int
spaceres
;
int
spaceres
;
int
target_link_zero
=
0
;
int
target_link_zero
=
0
;
int
num_inodes
;
int
num_inodes
;
...
@@ -260,14 +259,12 @@ xfs_rename(
...
@@ -260,14 +259,12 @@ xfs_rename(
* Find the XFS behavior descriptor for the target directory
* Find the XFS behavior descriptor for the target directory
* vnode since it was not handed to us.
* vnode since it was not handed to us.
*/
*/
target_dir_bdp
=
vn_bhv_lookup_unlocked
(
VN_BHV_HEAD
(
target_dir_vp
),
target_dp
=
xfs_vtoi
(
target_dir_vp
);
&
xfs_vnodeops
);
if
(
target_dp
==
NULL
)
{
if
(
target_dir_bdp
==
NULL
)
{
return
XFS_ERROR
(
EXDEV
);
return
XFS_ERROR
(
EXDEV
);
}
}
src_dp
=
XFS_BHVTOI
(
src_dir_bdp
);
src_dp
=
XFS_BHVTOI
(
src_dir_bdp
);
target_dp
=
XFS_BHVTOI
(
target_dir_bdp
);
mp
=
src_dp
->
i_mount
;
mp
=
src_dp
->
i_mount
;
if
(
DM_EVENT_ENABLED
(
src_dir_vp
->
v_vfsp
,
src_dp
,
DM_EVENT_RENAME
)
||
if
(
DM_EVENT_ENABLED
(
src_dir_vp
->
v_vfsp
,
src_dp
,
DM_EVENT_RENAME
)
||
...
...
fs/xfs/xfs_rw.c
浏览文件 @
9f5974c8
...
@@ -238,6 +238,7 @@ xfs_bioerror_relse(
...
@@ -238,6 +238,7 @@ xfs_bioerror_relse(
}
}
return
(
EIO
);
return
(
EIO
);
}
}
/*
/*
* Prints out an ALERT message about I/O error.
* Prints out an ALERT message about I/O error.
*/
*/
...
@@ -252,11 +253,9 @@ xfs_ioerror_alert(
...
@@ -252,11 +253,9 @@ xfs_ioerror_alert(
"I/O error in filesystem (
\"
%s
\"
) meta-data dev %s block 0x%llx"
"I/O error in filesystem (
\"
%s
\"
) meta-data dev %s block 0x%llx"
" (
\"
%s
\"
) error %d buf count %zd"
,
" (
\"
%s
\"
) error %d buf count %zd"
,
(
!
mp
||
!
mp
->
m_fsname
)
?
"(fs name not set)"
:
mp
->
m_fsname
,
(
!
mp
||
!
mp
->
m_fsname
)
?
"(fs name not set)"
:
mp
->
m_fsname
,
XFS_BUFTARG_NAME
(
bp
->
pb_target
),
XFS_BUFTARG_NAME
(
XFS_BUF_TARGET
(
bp
)),
(
__uint64_t
)
blkno
,
(
__uint64_t
)
blkno
,
func
,
func
,
XFS_BUF_GETERROR
(
bp
),
XFS_BUF_COUNT
(
bp
));
XFS_BUF_GETERROR
(
bp
),
XFS_BUF_COUNT
(
bp
));
}
}
/*
/*
...
...
fs/xfs/xfs_sb.h
浏览文件 @
9f5974c8
...
@@ -68,18 +68,6 @@ struct xfs_mount;
...
@@ -68,18 +68,6 @@ struct xfs_mount;
(XFS_SB_VERSION_NUMBITS | \
(XFS_SB_VERSION_NUMBITS | \
XFS_SB_VERSION_OKREALFBITS | \
XFS_SB_VERSION_OKREALFBITS | \
XFS_SB_VERSION_OKSASHFBITS)
XFS_SB_VERSION_OKSASHFBITS)
#define XFS_SB_VERSION_MKFS(ia,dia,extflag,dirv2,na,sflag,morebits) \
(((ia) || (dia) || (extflag) || (dirv2) || (na) || (sflag) || \
(morebits)) ? \
(XFS_SB_VERSION_4 | \
((ia) ? XFS_SB_VERSION_ALIGNBIT : 0) | \
((dia) ? XFS_SB_VERSION_DALIGNBIT : 0) | \
((extflag) ? XFS_SB_VERSION_EXTFLGBIT : 0) | \
((dirv2) ? XFS_SB_VERSION_DIRV2BIT : 0) | \
((na) ? XFS_SB_VERSION_LOGV2BIT : 0) | \
((sflag) ? XFS_SB_VERSION_SECTORBIT : 0) | \
((morebits) ? XFS_SB_VERSION_MOREBITSBIT : 0)) : \
XFS_SB_VERSION_1)
/*
/*
* There are two words to hold XFS "feature" bits: the original
* There are two words to hold XFS "feature" bits: the original
...
@@ -105,11 +93,6 @@ struct xfs_mount;
...
@@ -105,11 +93,6 @@ struct xfs_mount;
(XFS_SB_VERSION2_OKREALFBITS | \
(XFS_SB_VERSION2_OKREALFBITS | \
XFS_SB_VERSION2_OKSASHFBITS )
XFS_SB_VERSION2_OKSASHFBITS )
/*
* mkfs macro to set up sb_features2 word
*/
#define XFS_SB_VERSION2_MKFS(resvd1, sbcntr) 0
typedef
struct
xfs_sb
typedef
struct
xfs_sb
{
{
__uint32_t
sb_magicnum
;
/* magic number == XFS_SB_MAGIC */
__uint32_t
sb_magicnum
;
/* magic number == XFS_SB_MAGIC */
...
...
fs/xfs/xfs_trans.c
浏览文件 @
9f5974c8
...
@@ -1014,6 +1014,7 @@ xfs_trans_cancel(
...
@@ -1014,6 +1014,7 @@ xfs_trans_cancel(
xfs_log_item_t
*
lip
;
xfs_log_item_t
*
lip
;
int
i
;
int
i
;
#endif
#endif
xfs_mount_t
*
mp
=
tp
->
t_mountp
;
/*
/*
* See if the caller is being too lazy to figure out if
* See if the caller is being too lazy to figure out if
...
@@ -1026,9 +1027,10 @@ xfs_trans_cancel(
...
@@ -1026,9 +1027,10 @@ xfs_trans_cancel(
* filesystem. This happens in paths where we detect
* filesystem. This happens in paths where we detect
* corruption and decide to give up.
* corruption and decide to give up.
*/
*/
if
((
tp
->
t_flags
&
XFS_TRANS_DIRTY
)
&&
if
((
tp
->
t_flags
&
XFS_TRANS_DIRTY
)
&&
!
XFS_FORCED_SHUTDOWN
(
mp
))
{
!
XFS_FORCED_SHUTDOWN
(
tp
->
t_mountp
))
XFS_ERROR_REPORT
(
"xfs_trans_cancel"
,
XFS_ERRLEVEL_LOW
,
mp
);
xfs_force_shutdown
(
tp
->
t_mountp
,
XFS_CORRUPT_INCORE
);
xfs_force_shutdown
(
mp
,
XFS_CORRUPT_INCORE
);
}
#ifdef DEBUG
#ifdef DEBUG
if
(
!
(
flags
&
XFS_TRANS_ABORT
))
{
if
(
!
(
flags
&
XFS_TRANS_ABORT
))
{
licp
=
&
(
tp
->
t_items
);
licp
=
&
(
tp
->
t_items
);
...
@@ -1040,7 +1042,7 @@ xfs_trans_cancel(
...
@@ -1040,7 +1042,7 @@ xfs_trans_cancel(
}
}
lip
=
lidp
->
lid_item
;
lip
=
lidp
->
lid_item
;
if
(
!
XFS_FORCED_SHUTDOWN
(
tp
->
t_mount
p
))
if
(
!
XFS_FORCED_SHUTDOWN
(
m
p
))
ASSERT
(
!
(
lip
->
li_type
==
XFS_LI_EFD
));
ASSERT
(
!
(
lip
->
li_type
==
XFS_LI_EFD
));
}
}
licp
=
licp
->
lic_next
;
licp
=
licp
->
lic_next
;
...
@@ -1048,7 +1050,7 @@ xfs_trans_cancel(
...
@@ -1048,7 +1050,7 @@ xfs_trans_cancel(
}
}
#endif
#endif
xfs_trans_unreserve_and_mod_sb
(
tp
);
xfs_trans_unreserve_and_mod_sb
(
tp
);
XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS
(
tp
->
t_mount
p
,
tp
);
XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS
(
m
p
,
tp
);
if
(
tp
->
t_ticket
)
{
if
(
tp
->
t_ticket
)
{
if
(
flags
&
XFS_TRANS_RELEASE_LOG_RES
)
{
if
(
flags
&
XFS_TRANS_RELEASE_LOG_RES
)
{
...
@@ -1057,7 +1059,7 @@ xfs_trans_cancel(
...
@@ -1057,7 +1059,7 @@ xfs_trans_cancel(
}
else
{
}
else
{
log_flags
=
0
;
log_flags
=
0
;
}
}
xfs_log_done
(
tp
->
t_mount
p
,
tp
->
t_ticket
,
NULL
,
log_flags
);
xfs_log_done
(
m
p
,
tp
->
t_ticket
,
NULL
,
log_flags
);
}
}
/* mark this thread as no longer being in a transaction */
/* mark this thread as no longer being in a transaction */
...
...
fs/xfs/xfs_trans.h
浏览文件 @
9f5974c8
...
@@ -973,7 +973,6 @@ void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *);
...
@@ -973,7 +973,6 @@ void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *);
void
xfs_trans_bhold_release
(
xfs_trans_t
*
,
struct
xfs_buf
*
);
void
xfs_trans_bhold_release
(
xfs_trans_t
*
,
struct
xfs_buf
*
);
void
xfs_trans_binval
(
xfs_trans_t
*
,
struct
xfs_buf
*
);
void
xfs_trans_binval
(
xfs_trans_t
*
,
struct
xfs_buf
*
);
void
xfs_trans_inode_buf
(
xfs_trans_t
*
,
struct
xfs_buf
*
);
void
xfs_trans_inode_buf
(
xfs_trans_t
*
,
struct
xfs_buf
*
);
void
xfs_trans_inode_buf
(
xfs_trans_t
*
,
struct
xfs_buf
*
);
void
xfs_trans_stale_inode_buf
(
xfs_trans_t
*
,
struct
xfs_buf
*
);
void
xfs_trans_stale_inode_buf
(
xfs_trans_t
*
,
struct
xfs_buf
*
);
void
xfs_trans_dquot_buf
(
xfs_trans_t
*
,
struct
xfs_buf
*
,
uint
);
void
xfs_trans_dquot_buf
(
xfs_trans_t
*
,
struct
xfs_buf
*
,
uint
);
void
xfs_trans_inode_alloc_buf
(
xfs_trans_t
*
,
struct
xfs_buf
*
);
void
xfs_trans_inode_alloc_buf
(
xfs_trans_t
*
,
struct
xfs_buf
*
);
...
...
fs/xfs/xfs_utils.c
浏览文件 @
9f5974c8
...
@@ -55,16 +55,13 @@ xfs_get_dir_entry(
...
@@ -55,16 +55,13 @@ xfs_get_dir_entry(
xfs_inode_t
**
ipp
)
xfs_inode_t
**
ipp
)
{
{
vnode_t
*
vp
;
vnode_t
*
vp
;
bhv_desc_t
*
bdp
;
vp
=
VNAME_TO_VNODE
(
dentry
);
vp
=
VNAME_TO_VNODE
(
dentry
);
bdp
=
vn_bhv_lookup_unlocked
(
VN_BHV_HEAD
(
vp
),
&
xfs_vnodeops
);
if
(
!
bdp
)
{
*
ipp
=
xfs_vtoi
(
vp
);
*
ipp
=
NULL
;
if
(
!*
ipp
)
return
XFS_ERROR
(
ENOENT
);
return
XFS_ERROR
(
ENOENT
);
}
VN_HOLD
(
vp
);
VN_HOLD
(
vp
);
*
ipp
=
XFS_BHVTOI
(
bdp
);
return
0
;
return
0
;
}
}
...
...
fs/xfs/xfs_vfsops.c
浏览文件 @
9f5974c8
...
@@ -53,6 +53,7 @@
...
@@ -53,6 +53,7 @@
#include "xfs_acl.h"
#include "xfs_acl.h"
#include "xfs_attr.h"
#include "xfs_attr.h"
#include "xfs_clnt.h"
#include "xfs_clnt.h"
#include "xfs_fsops.h"
STATIC
int
xfs_sync
(
bhv_desc_t
*
,
int
,
cred_t
*
);
STATIC
int
xfs_sync
(
bhv_desc_t
*
,
int
,
cred_t
*
);
...
@@ -290,8 +291,8 @@ xfs_start_flags(
...
@@ -290,8 +291,8 @@ xfs_start_flags(
mp
->
m_flags
|=
XFS_MOUNT_IDELETE
;
mp
->
m_flags
|=
XFS_MOUNT_IDELETE
;
if
(
ap
->
flags
&
XFSMNT_DIRSYNC
)
if
(
ap
->
flags
&
XFSMNT_DIRSYNC
)
mp
->
m_flags
|=
XFS_MOUNT_DIRSYNC
;
mp
->
m_flags
|=
XFS_MOUNT_DIRSYNC
;
if
(
ap
->
flags
&
XFSMNT_
COMPAT_ATTR
)
if
(
ap
->
flags
&
XFSMNT_
ATTR2
)
mp
->
m_flags
|=
XFS_MOUNT_
COMPAT_ATTR
;
mp
->
m_flags
|=
XFS_MOUNT_
ATTR2
;
if
(
ap
->
flags2
&
XFSMNT2_COMPAT_IOSIZE
)
if
(
ap
->
flags2
&
XFSMNT2_COMPAT_IOSIZE
)
mp
->
m_flags
|=
XFS_MOUNT_COMPAT_IOSIZE
;
mp
->
m_flags
|=
XFS_MOUNT_COMPAT_IOSIZE
;
...
@@ -312,6 +313,8 @@ xfs_start_flags(
...
@@ -312,6 +313,8 @@ xfs_start_flags(
mp
->
m_flags
|=
XFS_MOUNT_NOUUID
;
mp
->
m_flags
|=
XFS_MOUNT_NOUUID
;
if
(
ap
->
flags
&
XFSMNT_BARRIER
)
if
(
ap
->
flags
&
XFSMNT_BARRIER
)
mp
->
m_flags
|=
XFS_MOUNT_BARRIER
;
mp
->
m_flags
|=
XFS_MOUNT_BARRIER
;
else
mp
->
m_flags
&=
~
XFS_MOUNT_BARRIER
;
return
0
;
return
0
;
}
}
...
@@ -330,10 +333,11 @@ xfs_finish_flags(
...
@@ -330,10 +333,11 @@ xfs_finish_flags(
/* Fail a mount where the logbuf is smaller then the log stripe */
/* Fail a mount where the logbuf is smaller then the log stripe */
if
(
XFS_SB_VERSION_HASLOGV2
(
&
mp
->
m_sb
))
{
if
(
XFS_SB_VERSION_HASLOGV2
(
&
mp
->
m_sb
))
{
if
((
ap
->
logbufsize
==
-
1
)
&&
if
((
ap
->
logbufsize
<=
0
)
&&
(
mp
->
m_sb
.
sb_logsunit
>
XLOG_BIG_RECORD_BSIZE
))
{
(
mp
->
m_sb
.
sb_logsunit
>
XLOG_BIG_RECORD_BSIZE
))
{
mp
->
m_logbsize
=
mp
->
m_sb
.
sb_logsunit
;
mp
->
m_logbsize
=
mp
->
m_sb
.
sb_logsunit
;
}
else
if
(
ap
->
logbufsize
<
mp
->
m_sb
.
sb_logsunit
)
{
}
else
if
(
ap
->
logbufsize
>
0
&&
ap
->
logbufsize
<
mp
->
m_sb
.
sb_logsunit
)
{
cmn_err
(
CE_WARN
,
cmn_err
(
CE_WARN
,
"XFS: logbuf size must be greater than or equal to log stripe size"
);
"XFS: logbuf size must be greater than or equal to log stripe size"
);
return
XFS_ERROR
(
EINVAL
);
return
XFS_ERROR
(
EINVAL
);
...
@@ -347,6 +351,10 @@ xfs_finish_flags(
...
@@ -347,6 +351,10 @@ xfs_finish_flags(
}
}
}
}
if
(
XFS_SB_VERSION_HASATTR2
(
&
mp
->
m_sb
))
{
mp
->
m_flags
|=
XFS_MOUNT_ATTR2
;
}
/*
/*
* prohibit r/w mounts of read-only filesystems
* prohibit r/w mounts of read-only filesystems
*/
*/
...
@@ -382,10 +390,6 @@ xfs_finish_flags(
...
@@ -382,10 +390,6 @@ xfs_finish_flags(
return
XFS_ERROR
(
EINVAL
);
return
XFS_ERROR
(
EINVAL
);
}
}
if
(
XFS_SB_VERSION_HASATTR2
(
&
mp
->
m_sb
))
{
mp
->
m_flags
&=
~
XFS_MOUNT_COMPAT_ATTR
;
}
return
0
;
return
0
;
}
}
...
@@ -504,13 +508,13 @@ xfs_mount(
...
@@ -504,13 +508,13 @@ xfs_mount(
if
(
error
)
if
(
error
)
goto
error2
;
goto
error2
;
if
((
mp
->
m_flags
&
XFS_MOUNT_BARRIER
)
&&
!
(
vfsp
->
vfs_flag
&
VFS_RDONLY
))
xfs_mountfs_check_barriers
(
mp
);
error
=
XFS_IOINIT
(
vfsp
,
args
,
flags
);
error
=
XFS_IOINIT
(
vfsp
,
args
,
flags
);
if
(
error
)
if
(
error
)
goto
error2
;
goto
error2
;
if
((
args
->
flags
&
XFSMNT_BARRIER
)
&&
!
(
XFS_MTOVFS
(
mp
)
->
vfs_flag
&
VFS_RDONLY
))
xfs_mountfs_check_barriers
(
mp
);
return
0
;
return
0
;
error2:
error2:
...
@@ -655,6 +659,11 @@ xfs_mntupdate(
...
@@ -655,6 +659,11 @@ xfs_mntupdate(
else
else
mp
->
m_flags
&=
~
XFS_MOUNT_NOATIME
;
mp
->
m_flags
&=
~
XFS_MOUNT_NOATIME
;
if
(
args
->
flags
&
XFSMNT_BARRIER
)
mp
->
m_flags
|=
XFS_MOUNT_BARRIER
;
else
mp
->
m_flags
&=
~
XFS_MOUNT_BARRIER
;
if
((
vfsp
->
vfs_flag
&
VFS_RDONLY
)
&&
if
((
vfsp
->
vfs_flag
&
VFS_RDONLY
)
&&
!
(
*
flags
&
MS_RDONLY
))
{
!
(
*
flags
&
MS_RDONLY
))
{
vfsp
->
vfs_flag
&=
~
VFS_RDONLY
;
vfsp
->
vfs_flag
&=
~
VFS_RDONLY
;
...
@@ -1634,6 +1643,7 @@ xfs_vget(
...
@@ -1634,6 +1643,7 @@ xfs_vget(
#define MNTOPT_NORECOVERY "norecovery"
/* don't run XFS recovery */
#define MNTOPT_NORECOVERY "norecovery"
/* don't run XFS recovery */
#define MNTOPT_BARRIER "barrier"
/* use writer barriers for log write and
#define MNTOPT_BARRIER "barrier"
/* use writer barriers for log write and
* unwritten extent conversion */
* unwritten extent conversion */
#define MNTOPT_NOBARRIER "nobarrier"
/* .. disable */
#define MNTOPT_OSYNCISOSYNC "osyncisosync"
/* o_sync is REALLY o_sync */
#define MNTOPT_OSYNCISOSYNC "osyncisosync"
/* o_sync is REALLY o_sync */
#define MNTOPT_64BITINODE "inode64"
/* inodes can be allocated anywhere */
#define MNTOPT_64BITINODE "inode64"
/* inodes can be allocated anywhere */
#define MNTOPT_IKEEP "ikeep"
/* do not free empty inode clusters */
#define MNTOPT_IKEEP "ikeep"
/* do not free empty inode clusters */
...
@@ -1680,7 +1690,6 @@ xfs_parseargs(
...
@@ -1680,7 +1690,6 @@ xfs_parseargs(
int
iosize
;
int
iosize
;
args
->
flags2
|=
XFSMNT2_COMPAT_IOSIZE
;
args
->
flags2
|=
XFSMNT2_COMPAT_IOSIZE
;
args
->
flags
|=
XFSMNT_COMPAT_ATTR
;
#if 0 /* XXX: off by default, until some remaining issues ironed out */
#if 0 /* XXX: off by default, until some remaining issues ironed out */
args->flags |= XFSMNT_IDELETE; /* default to on */
args->flags |= XFSMNT_IDELETE; /* default to on */
...
@@ -1806,6 +1815,8 @@ xfs_parseargs(
...
@@ -1806,6 +1815,8 @@ xfs_parseargs(
args
->
flags
|=
XFSMNT_NOUUID
;
args
->
flags
|=
XFSMNT_NOUUID
;
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_BARRIER
))
{
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_BARRIER
))
{
args
->
flags
|=
XFSMNT_BARRIER
;
args
->
flags
|=
XFSMNT_BARRIER
;
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_NOBARRIER
))
{
args
->
flags
&=
~
XFSMNT_BARRIER
;
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_IKEEP
))
{
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_IKEEP
))
{
args
->
flags
&=
~
XFSMNT_IDELETE
;
args
->
flags
&=
~
XFSMNT_IDELETE
;
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_NOIKEEP
))
{
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_NOIKEEP
))
{
...
@@ -1815,9 +1826,9 @@ xfs_parseargs(
...
@@ -1815,9 +1826,9 @@ xfs_parseargs(
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_NOLARGEIO
))
{
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_NOLARGEIO
))
{
args
->
flags2
|=
XFSMNT2_COMPAT_IOSIZE
;
args
->
flags2
|=
XFSMNT2_COMPAT_IOSIZE
;
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_ATTR2
))
{
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_ATTR2
))
{
args
->
flags
&=
~
XFSMNT_COMPAT_ATTR
;
args
->
flags
|=
XFSMNT_ATTR2
;
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_NOATTR2
))
{
}
else
if
(
!
strcmp
(
this_char
,
MNTOPT_NOATTR2
))
{
args
->
flags
|=
XFSMNT_COMPAT_ATTR
;
args
->
flags
&=
~
XFSMNT_ATTR2
;
}
else
if
(
!
strcmp
(
this_char
,
"osyncisdsync"
))
{
}
else
if
(
!
strcmp
(
this_char
,
"osyncisdsync"
))
{
/* no-op, this is now the default */
/* no-op, this is now the default */
printk
(
"XFS: osyncisdsync is now the default, option is deprecated.
\n
"
);
printk
(
"XFS: osyncisdsync is now the default, option is deprecated.
\n
"
);
...
@@ -1892,7 +1903,6 @@ xfs_showargs(
...
@@ -1892,7 +1903,6 @@ xfs_showargs(
{
XFS_MOUNT_NOUUID
,
","
MNTOPT_NOUUID
},
{
XFS_MOUNT_NOUUID
,
","
MNTOPT_NOUUID
},
{
XFS_MOUNT_NORECOVERY
,
","
MNTOPT_NORECOVERY
},
{
XFS_MOUNT_NORECOVERY
,
","
MNTOPT_NORECOVERY
},
{
XFS_MOUNT_OSYNCISOSYNC
,
","
MNTOPT_OSYNCISOSYNC
},
{
XFS_MOUNT_OSYNCISOSYNC
,
","
MNTOPT_OSYNCISOSYNC
},
{
XFS_MOUNT_BARRIER
,
","
MNTOPT_BARRIER
},
{
XFS_MOUNT_IDELETE
,
","
MNTOPT_NOIKEEP
},
{
XFS_MOUNT_IDELETE
,
","
MNTOPT_NOIKEEP
},
{
0
,
NULL
}
{
0
,
NULL
}
};
};
...
@@ -1914,33 +1924,28 @@ xfs_showargs(
...
@@ -1914,33 +1924,28 @@ xfs_showargs(
if
(
mp
->
m_logbufs
>
0
)
if
(
mp
->
m_logbufs
>
0
)
seq_printf
(
m
,
","
MNTOPT_LOGBUFS
"=%d"
,
mp
->
m_logbufs
);
seq_printf
(
m
,
","
MNTOPT_LOGBUFS
"=%d"
,
mp
->
m_logbufs
);
if
(
mp
->
m_logbsize
>
0
)
if
(
mp
->
m_logbsize
>
0
)
seq_printf
(
m
,
","
MNTOPT_LOGBSIZE
"=%dk"
,
mp
->
m_logbsize
>>
10
);
seq_printf
(
m
,
","
MNTOPT_LOGBSIZE
"=%dk"
,
mp
->
m_logbsize
>>
10
);
if
(
mp
->
m_logname
)
if
(
mp
->
m_logname
)
seq_printf
(
m
,
","
MNTOPT_LOGDEV
"=%s"
,
mp
->
m_logname
);
seq_printf
(
m
,
","
MNTOPT_LOGDEV
"=%s"
,
mp
->
m_logname
);
if
(
mp
->
m_rtname
)
if
(
mp
->
m_rtname
)
seq_printf
(
m
,
","
MNTOPT_RTDEV
"=%s"
,
mp
->
m_rtname
);
seq_printf
(
m
,
","
MNTOPT_RTDEV
"=%s"
,
mp
->
m_rtname
);
if
(
mp
->
m_dalign
>
0
)
if
(
mp
->
m_dalign
>
0
)
seq_printf
(
m
,
","
MNTOPT_SUNIT
"=%d"
,
seq_printf
(
m
,
","
MNTOPT_SUNIT
"=%d"
,
(
int
)
XFS_FSB_TO_BB
(
mp
,
mp
->
m_dalign
));
(
int
)
XFS_FSB_TO_BB
(
mp
,
mp
->
m_dalign
));
if
(
mp
->
m_swidth
>
0
)
if
(
mp
->
m_swidth
>
0
)
seq_printf
(
m
,
","
MNTOPT_SWIDTH
"=%d"
,
seq_printf
(
m
,
","
MNTOPT_SWIDTH
"=%d"
,
(
int
)
XFS_FSB_TO_BB
(
mp
,
mp
->
m_swidth
));
(
int
)
XFS_FSB_TO_BB
(
mp
,
mp
->
m_swidth
));
if
(
!
(
mp
->
m_flags
&
XFS_MOUNT_COMPAT_ATTR
))
seq_printf
(
m
,
","
MNTOPT_ATTR2
);
if
(
!
(
mp
->
m_flags
&
XFS_MOUNT_COMPAT_IOSIZE
))
if
(
!
(
mp
->
m_flags
&
XFS_MOUNT_COMPAT_IOSIZE
))
seq_printf
(
m
,
","
MNTOPT_LARGEIO
);
seq_printf
(
m
,
","
MNTOPT_LARGEIO
);
if
(
mp
->
m_flags
&
XFS_MOUNT_BARRIER
)
seq_printf
(
m
,
","
MNTOPT_BARRIER
);
if
(
!
(
vfsp
->
vfs_flag
&
VFS_32BITINODES
))
if
(
!
(
vfsp
->
vfs_flag
&
VFS_32BITINODES
))
seq_printf
(
m
,
","
MNTOPT_64BITINODE
);
seq_printf
(
m
,
","
MNTOPT_64BITINODE
);
if
(
vfsp
->
vfs_flag
&
VFS_GRPID
)
if
(
vfsp
->
vfs_flag
&
VFS_GRPID
)
seq_printf
(
m
,
","
MNTOPT_GRPID
);
seq_printf
(
m
,
","
MNTOPT_GRPID
);
...
@@ -1959,6 +1964,7 @@ xfs_freeze(
...
@@ -1959,6 +1964,7 @@ xfs_freeze(
/* Push the superblock and write an unmount record */
/* Push the superblock and write an unmount record */
xfs_log_unmount_write
(
mp
);
xfs_log_unmount_write
(
mp
);
xfs_unmountfs_writesb
(
mp
);
xfs_unmountfs_writesb
(
mp
);
xfs_fs_log_dummy
(
mp
);
}
}
...
...
fs/xfs/xfs_vnodeops.c
浏览文件 @
9f5974c8
...
@@ -185,8 +185,7 @@ xfs_getattr(
...
@@ -185,8 +185,7 @@ xfs_getattr(
break
;
break
;
}
}
vap
->
va_atime
.
tv_sec
=
ip
->
i_d
.
di_atime
.
t_sec
;
vn_atime_to_timespec
(
vp
,
&
vap
->
va_atime
);
vap
->
va_atime
.
tv_nsec
=
ip
->
i_d
.
di_atime
.
t_nsec
;
vap
->
va_mtime
.
tv_sec
=
ip
->
i_d
.
di_mtime
.
t_sec
;
vap
->
va_mtime
.
tv_sec
=
ip
->
i_d
.
di_mtime
.
t_sec
;
vap
->
va_mtime
.
tv_nsec
=
ip
->
i_d
.
di_mtime
.
t_nsec
;
vap
->
va_mtime
.
tv_nsec
=
ip
->
i_d
.
di_mtime
.
t_nsec
;
vap
->
va_ctime
.
tv_sec
=
ip
->
i_d
.
di_ctime
.
t_sec
;
vap
->
va_ctime
.
tv_sec
=
ip
->
i_d
.
di_ctime
.
t_sec
;
...
@@ -543,24 +542,6 @@ xfs_setattr(
...
@@ -543,24 +542,6 @@ xfs_setattr(
goto
error_return
;
goto
error_return
;
}
}
/*
* Can't set extent size unless the file is marked, or
* about to be marked as a realtime file.
*
* This check will be removed when fixed size extents
* with buffered data writes is implemented.
*
*/
if
((
mask
&
XFS_AT_EXTSIZE
)
&&
((
ip
->
i_d
.
di_extsize
<<
mp
->
m_sb
.
sb_blocklog
)
!=
vap
->
va_extsize
)
&&
(
!
((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
||
((
mask
&
XFS_AT_XFLAGS
)
&&
(
vap
->
va_xflags
&
XFS_XFLAG_REALTIME
)))))
{
code
=
XFS_ERROR
(
EINVAL
);
goto
error_return
;
}
/*
/*
* Can't change realtime flag if any extents are allocated.
* Can't change realtime flag if any extents are allocated.
*/
*/
...
@@ -823,13 +804,17 @@ xfs_setattr(
...
@@ -823,13 +804,17 @@ xfs_setattr(
di_flags
|=
XFS_DIFLAG_RTINHERIT
;
di_flags
|=
XFS_DIFLAG_RTINHERIT
;
if
(
vap
->
va_xflags
&
XFS_XFLAG_NOSYMLINKS
)
if
(
vap
->
va_xflags
&
XFS_XFLAG_NOSYMLINKS
)
di_flags
|=
XFS_DIFLAG_NOSYMLINKS
;
di_flags
|=
XFS_DIFLAG_NOSYMLINKS
;
}
else
{
if
(
vap
->
va_xflags
&
XFS_XFLAG_EXTSZINHERIT
)
di_flags
|=
XFS_DIFLAG_EXTSZINHERIT
;
}
else
if
((
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFREG
)
{
if
(
vap
->
va_xflags
&
XFS_XFLAG_REALTIME
)
{
if
(
vap
->
va_xflags
&
XFS_XFLAG_REALTIME
)
{
di_flags
|=
XFS_DIFLAG_REALTIME
;
di_flags
|=
XFS_DIFLAG_REALTIME
;
ip
->
i_iocore
.
io_flags
|=
XFS_IOCORE_RT
;
ip
->
i_iocore
.
io_flags
|=
XFS_IOCORE_RT
;
}
else
{
}
else
{
ip
->
i_iocore
.
io_flags
&=
~
XFS_IOCORE_RT
;
ip
->
i_iocore
.
io_flags
&=
~
XFS_IOCORE_RT
;
}
}
if
(
vap
->
va_xflags
&
XFS_XFLAG_EXTSIZE
)
di_flags
|=
XFS_DIFLAG_EXTSIZE
;
}
}
ip
->
i_d
.
di_flags
=
di_flags
;
ip
->
i_d
.
di_flags
=
di_flags
;
}
}
...
@@ -999,10 +984,6 @@ xfs_readlink(
...
@@ -999,10 +984,6 @@ xfs_readlink(
goto
error_return
;
goto
error_return
;
}
}
if
(
!
(
ioflags
&
IO_INVIS
))
{
xfs_ichgtime
(
ip
,
XFS_ICHGTIME_ACC
);
}
/*
/*
* See if the symlink is stored inline.
* See if the symlink is stored inline.
*/
*/
...
@@ -1234,7 +1215,8 @@ xfs_inactive_free_eofblocks(
...
@@ -1234,7 +1215,8 @@ xfs_inactive_free_eofblocks(
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
if
(
!
error
&&
(
nimaps
!=
0
)
&&
if
(
!
error
&&
(
nimaps
!=
0
)
&&
(
imap
.
br_startblock
!=
HOLESTARTBLOCK
))
{
(
imap
.
br_startblock
!=
HOLESTARTBLOCK
||
ip
->
i_delayed_blks
))
{
/*
/*
* Attach the dquots to the inode up front.
* Attach the dquots to the inode up front.
*/
*/
...
@@ -1569,9 +1551,11 @@ xfs_release(
...
@@ -1569,9 +1551,11 @@ xfs_release(
if
(
ip
->
i_d
.
di_nlink
!=
0
)
{
if
(
ip
->
i_d
.
di_nlink
!=
0
)
{
if
((((
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFREG
)
&&
if
((((
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFREG
)
&&
((
ip
->
i_d
.
di_size
>
0
)
||
(
VN_CACHED
(
vp
)
>
0
))
&&
((
ip
->
i_d
.
di_size
>
0
)
||
(
VN_CACHED
(
vp
)
>
0
||
ip
->
i_delayed_blks
>
0
))
&&
(
ip
->
i_df
.
if_flags
&
XFS_IFEXTENTS
))
&&
(
ip
->
i_df
.
if_flags
&
XFS_IFEXTENTS
))
&&
(
!
(
ip
->
i_d
.
di_flags
&
(
XFS_DIFLAG_PREALLOC
|
XFS_DIFLAG_APPEND
))))
{
(
!
(
ip
->
i_d
.
di_flags
&
(
XFS_DIFLAG_PREALLOC
|
XFS_DIFLAG_APPEND
))))
{
if
((
error
=
xfs_inactive_free_eofblocks
(
mp
,
ip
)))
if
((
error
=
xfs_inactive_free_eofblocks
(
mp
,
ip
)))
return
(
error
);
return
(
error
);
/* Update linux inode block count after free above */
/* Update linux inode block count after free above */
...
@@ -1628,7 +1612,8 @@ xfs_inactive(
...
@@ -1628,7 +1612,8 @@ xfs_inactive(
* only one with a reference to the inode.
* only one with a reference to the inode.
*/
*/
truncate
=
((
ip
->
i_d
.
di_nlink
==
0
)
&&
truncate
=
((
ip
->
i_d
.
di_nlink
==
0
)
&&
((
ip
->
i_d
.
di_size
!=
0
)
||
(
ip
->
i_d
.
di_nextents
>
0
))
&&
((
ip
->
i_d
.
di_size
!=
0
)
||
(
ip
->
i_d
.
di_nextents
>
0
)
||
(
ip
->
i_delayed_blks
>
0
))
&&
((
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFREG
));
((
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFREG
));
mp
=
ip
->
i_mount
;
mp
=
ip
->
i_mount
;
...
@@ -1646,10 +1631,12 @@ xfs_inactive(
...
@@ -1646,10 +1631,12 @@ xfs_inactive(
if
(
ip
->
i_d
.
di_nlink
!=
0
)
{
if
(
ip
->
i_d
.
di_nlink
!=
0
)
{
if
((((
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFREG
)
&&
if
((((
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFREG
)
&&
((
ip
->
i_d
.
di_size
>
0
)
||
(
VN_CACHED
(
vp
)
>
0
))
&&
((
ip
->
i_d
.
di_size
>
0
)
||
(
VN_CACHED
(
vp
)
>
0
||
(
ip
->
i_df
.
if_flags
&
XFS_IFEXTENTS
))
&&
ip
->
i_delayed_blks
>
0
))
&&
(
!
(
ip
->
i_d
.
di_flags
&
(
XFS_DIFLAG_PREALLOC
|
XFS_DIFLAG_APPEND
))
||
(
ip
->
i_df
.
if_flags
&
XFS_IFEXTENTS
)
&&
(
ip
->
i_delayed_blks
!=
0
)))
{
(
!
(
ip
->
i_d
.
di_flags
&
(
XFS_DIFLAG_PREALLOC
|
XFS_DIFLAG_APPEND
))
||
(
ip
->
i_delayed_blks
!=
0
))))
{
if
((
error
=
xfs_inactive_free_eofblocks
(
mp
,
ip
)))
if
((
error
=
xfs_inactive_free_eofblocks
(
mp
,
ip
)))
return
(
VN_INACTIVE_CACHE
);
return
(
VN_INACTIVE_CACHE
);
/* Update linux inode block count after free above */
/* Update linux inode block count after free above */
...
@@ -2593,7 +2580,6 @@ xfs_link(
...
@@ -2593,7 +2580,6 @@ xfs_link(
int
cancel_flags
;
int
cancel_flags
;
int
committed
;
int
committed
;
vnode_t
*
target_dir_vp
;
vnode_t
*
target_dir_vp
;
bhv_desc_t
*
src_bdp
;
int
resblks
;
int
resblks
;
char
*
target_name
=
VNAME
(
dentry
);
char
*
target_name
=
VNAME
(
dentry
);
int
target_namelen
;
int
target_namelen
;
...
@@ -2606,8 +2592,7 @@ xfs_link(
...
@@ -2606,8 +2592,7 @@ xfs_link(
if
(
VN_ISDIR
(
src_vp
))
if
(
VN_ISDIR
(
src_vp
))
return
XFS_ERROR
(
EPERM
);
return
XFS_ERROR
(
EPERM
);
src_bdp
=
vn_bhv_lookup_unlocked
(
VN_BHV_HEAD
(
src_vp
),
&
xfs_vnodeops
);
sip
=
xfs_vtoi
(
src_vp
);
sip
=
XFS_BHVTOI
(
src_bdp
);
tdp
=
XFS_BHVTOI
(
target_dir_bdp
);
tdp
=
XFS_BHVTOI
(
target_dir_bdp
);
mp
=
tdp
->
i_mount
;
mp
=
tdp
->
i_mount
;
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
...
@@ -3240,7 +3225,6 @@ xfs_readdir(
...
@@ -3240,7 +3225,6 @@ xfs_readdir(
xfs_trans_t
*
tp
=
NULL
;
xfs_trans_t
*
tp
=
NULL
;
int
error
=
0
;
int
error
=
0
;
uint
lock_mode
;
uint
lock_mode
;
xfs_off_t
start_offset
;
vn_trace_entry
(
BHV_TO_VNODE
(
dir_bdp
),
__FUNCTION__
,
vn_trace_entry
(
BHV_TO_VNODE
(
dir_bdp
),
__FUNCTION__
,
(
inst_t
*
)
__return_address
);
(
inst_t
*
)
__return_address
);
...
@@ -3251,11 +3235,7 @@ xfs_readdir(
...
@@ -3251,11 +3235,7 @@ xfs_readdir(
}
}
lock_mode
=
xfs_ilock_map_shared
(
dp
);
lock_mode
=
xfs_ilock_map_shared
(
dp
);
start_offset
=
uiop
->
uio_offset
;
error
=
XFS_DIR_GETDENTS
(
dp
->
i_mount
,
tp
,
dp
,
uiop
,
eofp
);
error
=
XFS_DIR_GETDENTS
(
dp
->
i_mount
,
tp
,
dp
,
uiop
,
eofp
);
if
(
start_offset
!=
uiop
->
uio_offset
)
{
xfs_ichgtime
(
dp
,
XFS_ICHGTIME_ACC
);
}
xfs_iunlock_map_shared
(
dp
,
lock_mode
);
xfs_iunlock_map_shared
(
dp
,
lock_mode
);
return
error
;
return
error
;
}
}
...
@@ -3832,7 +3812,12 @@ xfs_reclaim(
...
@@ -3832,7 +3812,12 @@ xfs_reclaim(
vn_iowait
(
vp
);
vn_iowait
(
vp
);
ASSERT
(
XFS_FORCED_SHUTDOWN
(
ip
->
i_mount
)
||
ip
->
i_delayed_blks
==
0
);
ASSERT
(
XFS_FORCED_SHUTDOWN
(
ip
->
i_mount
)
||
ip
->
i_delayed_blks
==
0
);
ASSERT
(
VN_CACHED
(
vp
)
==
0
);
/*
* Make sure the atime in the XFS inode is correct before freeing the
* Linux inode.
*/
xfs_synchronize_atime
(
ip
);
/* If we have nothing to flush with this inode then complete the
/* If we have nothing to flush with this inode then complete the
* teardown now, otherwise break the link between the xfs inode
* teardown now, otherwise break the link between the xfs inode
...
@@ -4002,42 +3987,36 @@ xfs_alloc_file_space(
...
@@ -4002,42 +3987,36 @@ xfs_alloc_file_space(
int
alloc_type
,
int
alloc_type
,
int
attr_flags
)
int
attr_flags
)
{
{
xfs_mount_t
*
mp
=
ip
->
i_mount
;
xfs_off_t
count
;
xfs_filblks_t
allocated_fsb
;
xfs_filblks_t
allocated_fsb
;
xfs_filblks_t
allocatesize_fsb
;
xfs_filblks_t
allocatesize_fsb
;
int
committed
;
xfs_extlen_t
extsz
,
temp
;
xfs_off_t
count
;
xfs_fileoff_t
startoffset_fsb
;
xfs_filblks_t
datablocks
;
int
error
;
xfs_fsblock_t
firstfsb
;
xfs_fsblock_t
firstfsb
;
xfs_bmap_free_t
free_list
;
int
nimaps
;
xfs_bmbt_irec_t
*
imapp
;
int
bmapi_flag
;
xfs_bmbt_irec_t
imaps
[
1
];
int
quota_flag
;
xfs_mount_t
*
mp
;
int
numrtextents
;
int
reccount
;
uint
resblks
;
int
rt
;
int
rt
;
int
rtextsize
;
xfs_fileoff_t
startoffset_fsb
;
xfs_trans_t
*
tp
;
xfs_trans_t
*
tp
;
int
xfs_bmapi_flags
;
xfs_bmbt_irec_t
imaps
[
1
],
*
imapp
;
xfs_bmap_free_t
free_list
;
uint
qblocks
,
resblks
,
resrtextents
;
int
committed
;
int
error
;
vn_trace_entry
(
XFS_ITOV
(
ip
),
__FUNCTION__
,
(
inst_t
*
)
__return_address
);
vn_trace_entry
(
XFS_ITOV
(
ip
),
__FUNCTION__
,
(
inst_t
*
)
__return_address
);
mp
=
ip
->
i_mount
;
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
return
XFS_ERROR
(
EIO
);
return
XFS_ERROR
(
EIO
);
/*
rt
=
XFS_IS_REALTIME_INODE
(
ip
);
* determine if this is a realtime file
if
(
unlikely
(
rt
))
{
*/
if
(
!
(
extsz
=
ip
->
i_d
.
di_extsize
))
if
((
rt
=
XFS_IS_REALTIME_INODE
(
ip
))
!=
0
)
{
extsz
=
mp
->
m_sb
.
sb_rextsize
;
if
(
ip
->
i_d
.
di_extsize
)
}
else
{
rtextsize
=
ip
->
i_d
.
di_extsize
;
extsz
=
ip
->
i_d
.
di_extsize
;
else
}
rtextsize
=
mp
->
m_sb
.
sb_rextsize
;
}
else
rtextsize
=
0
;
if
((
error
=
XFS_QM_DQATTACH
(
mp
,
ip
,
0
)))
if
((
error
=
XFS_QM_DQATTACH
(
mp
,
ip
,
0
)))
return
error
;
return
error
;
...
@@ -4048,8 +4027,8 @@ xfs_alloc_file_space(
...
@@ -4048,8 +4027,8 @@ xfs_alloc_file_space(
count
=
len
;
count
=
len
;
error
=
0
;
error
=
0
;
imapp
=
&
imaps
[
0
];
imapp
=
&
imaps
[
0
];
reccount
=
1
;
nimaps
=
1
;
xfs_bmapi_flags
=
XFS_BMAPI_WRITE
|
(
alloc_type
?
XFS_BMAPI_PREALLOC
:
0
);
bmapi_flag
=
XFS_BMAPI_WRITE
|
(
alloc_type
?
XFS_BMAPI_PREALLOC
:
0
);
startoffset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
startoffset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
allocatesize_fsb
=
XFS_B_TO_FSB
(
mp
,
count
);
allocatesize_fsb
=
XFS_B_TO_FSB
(
mp
,
count
);
...
@@ -4070,43 +4049,51 @@ xfs_alloc_file_space(
...
@@ -4070,43 +4049,51 @@ xfs_alloc_file_space(
}
}
/*
/*
*
a
llocate file space until done or until there is an error
*
A
llocate file space until done or until there is an error
*/
*/
retry:
retry:
while
(
allocatesize_fsb
&&
!
error
)
{
while
(
allocatesize_fsb
&&
!
error
)
{
xfs_fileoff_t
s
,
e
;
/*
/*
* determine if reserving space on
* Determine space reservations for data/realtime.
* the data or realtime partition.
*/
*/
if
(
rt
)
{
if
(
unlikely
(
extsz
))
{
xfs_fileoff_t
s
,
e
;
s
=
startoffset_fsb
;
s
=
startoffset_fsb
;
do_div
(
s
,
rtextsize
);
do_div
(
s
,
extsz
);
s
*=
rtextsize
;
s
*=
extsz
;
e
=
roundup_64
(
startoffset_fsb
+
allocatesize_fsb
,
e
=
startoffset_fsb
+
allocatesize_fsb
;
rtextsize
);
if
((
temp
=
do_mod
(
startoffset_fsb
,
extsz
)))
numrtextents
=
(
int
)(
e
-
s
)
/
mp
->
m_sb
.
sb_rextsize
;
e
+=
temp
;
datablocks
=
0
;
if
((
temp
=
do_mod
(
e
,
extsz
)))
e
+=
extsz
-
temp
;
}
else
{
s
=
0
;
e
=
allocatesize_fsb
;
}
if
(
unlikely
(
rt
))
{
resrtextents
=
qblocks
=
(
uint
)(
e
-
s
);
resrtextents
/=
mp
->
m_sb
.
sb_rextsize
;
resblks
=
XFS_DIOSTRAT_SPACE_RES
(
mp
,
0
);
quota_flag
=
XFS_QMOPT_RES_RTBLKS
;
}
else
{
}
else
{
datablocks
=
allocatesize_fsb
;
resrtextents
=
0
;
numrtextents
=
0
;
resblks
=
qblocks
=
\
XFS_DIOSTRAT_SPACE_RES
(
mp
,
(
uint
)(
e
-
s
));
quota_flag
=
XFS_QMOPT_RES_REGBLKS
;
}
}
/*
/*
*
allocate and setup the transaction
*
Allocate and setup the transaction.
*/
*/
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_DIOSTRAT
);
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_DIOSTRAT
);
resblks
=
XFS_DIOSTRAT_SPACE_RES
(
mp
,
datablocks
);
error
=
xfs_trans_reserve
(
tp
,
resblks
,
error
=
xfs_trans_reserve
(
tp
,
XFS_WRITE_LOG_RES
(
mp
),
resrtextents
,
resblks
,
XFS_WRITE_LOG_RES
(
mp
),
numrtextents
,
XFS_TRANS_PERM_LOG_RES
,
XFS_TRANS_PERM_LOG_RES
,
XFS_WRITE_LOG_COUNT
);
XFS_WRITE_LOG_COUNT
);
/*
/*
*
c
heck for running out of space
*
C
heck for running out of space
*/
*/
if
(
error
)
{
if
(
error
)
{
/*
/*
...
@@ -4117,8 +4104,8 @@ xfs_alloc_file_space(
...
@@ -4117,8 +4104,8 @@ xfs_alloc_file_space(
break
;
break
;
}
}
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
error
=
XFS_TRANS_RESERVE_QUOTA
(
mp
,
t
p
,
error
=
XFS_TRANS_RESERVE_QUOTA
_NBLKS
(
mp
,
tp
,
i
p
,
ip
->
i_udquot
,
ip
->
i_gdquot
,
resblks
,
0
,
0
);
qblocks
,
0
,
quota_flag
);
if
(
error
)
if
(
error
)
goto
error1
;
goto
error1
;
...
@@ -4126,19 +4113,19 @@ xfs_alloc_file_space(
...
@@ -4126,19 +4113,19 @@ xfs_alloc_file_space(
xfs_trans_ihold
(
tp
,
ip
);
xfs_trans_ihold
(
tp
,
ip
);
/*
/*
*
issue the
bmapi() call to allocate the blocks
*
Issue the xfs_
bmapi() call to allocate the blocks
*/
*/
XFS_BMAP_INIT
(
&
free_list
,
&
firstfsb
);
XFS_BMAP_INIT
(
&
free_list
,
&
firstfsb
);
error
=
xfs_bmapi
(
tp
,
ip
,
startoffset_fsb
,
error
=
xfs_bmapi
(
tp
,
ip
,
startoffset_fsb
,
allocatesize_fsb
,
xfs_bmapi_flags
,
allocatesize_fsb
,
bmapi_flag
,
&
firstfsb
,
0
,
imapp
,
&
reccount
,
&
firstfsb
,
0
,
imapp
,
&
nimaps
,
&
free_list
);
&
free_list
);
if
(
error
)
{
if
(
error
)
{
goto
error0
;
goto
error0
;
}
}
/*
/*
*
c
omplete the transaction
*
C
omplete the transaction
*/
*/
error
=
xfs_bmap_finish
(
&
tp
,
&
free_list
,
firstfsb
,
&
committed
);
error
=
xfs_bmap_finish
(
&
tp
,
&
free_list
,
firstfsb
,
&
committed
);
if
(
error
)
{
if
(
error
)
{
...
@@ -4153,7 +4140,7 @@ xfs_alloc_file_space(
...
@@ -4153,7 +4140,7 @@ xfs_alloc_file_space(
allocated_fsb
=
imapp
->
br_blockcount
;
allocated_fsb
=
imapp
->
br_blockcount
;
if
(
reccount
==
0
)
{
if
(
nimaps
==
0
)
{
error
=
XFS_ERROR
(
ENOSPC
);
error
=
XFS_ERROR
(
ENOSPC
);
break
;
break
;
}
}
...
@@ -4176,9 +4163,11 @@ xfs_alloc_file_space(
...
@@ -4176,9 +4163,11 @@ xfs_alloc_file_space(
return
error
;
return
error
;
error0:
error0:
/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
xfs_bmap_cancel
(
&
free_list
);
xfs_bmap_cancel
(
&
free_list
);
error1:
XFS_TRANS_UNRESERVE_QUOTA_NBLKS
(
mp
,
tp
,
ip
,
qblocks
,
0
,
quota_flag
);
error1:
/* Just cancel transaction */
xfs_trans_cancel
(
tp
,
XFS_TRANS_RELEASE_LOG_RES
|
XFS_TRANS_ABORT
);
xfs_trans_cancel
(
tp
,
XFS_TRANS_RELEASE_LOG_RES
|
XFS_TRANS_ABORT
);
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
goto
dmapi_enospc_check
;
goto
dmapi_enospc_check
;
...
@@ -4423,8 +4412,8 @@ xfs_free_file_space(
...
@@ -4423,8 +4412,8 @@ xfs_free_file_space(
}
}
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
error
=
XFS_TRANS_RESERVE_QUOTA
(
mp
,
tp
,
error
=
XFS_TRANS_RESERVE_QUOTA
(
mp
,
tp
,
ip
->
i_udquot
,
ip
->
i_gdquot
,
resblks
,
0
,
rt
?
ip
->
i_udquot
,
ip
->
i_gdquot
,
resblks
,
0
,
XFS_QMOPT_RES_R
TBLKS
:
XFS_QMOPT_RES_R
EGBLKS
);
XFS_QMOPT_RES_REGBLKS
);
if
(
error
)
if
(
error
)
goto
error1
;
goto
error1
;
...
...
mm/swap.c
浏览文件 @
9f5974c8
...
@@ -384,6 +384,8 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
...
@@ -384,6 +384,8 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
return
pagevec_count
(
pvec
);
return
pagevec_count
(
pvec
);
}
}
EXPORT_SYMBOL
(
pagevec_lookup
);
unsigned
pagevec_lookup_tag
(
struct
pagevec
*
pvec
,
struct
address_space
*
mapping
,
unsigned
pagevec_lookup_tag
(
struct
pagevec
*
pvec
,
struct
address_space
*
mapping
,
pgoff_t
*
index
,
int
tag
,
unsigned
nr_pages
)
pgoff_t
*
index
,
int
tag
,
unsigned
nr_pages
)
{
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录