Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
e9cee8e6
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 大约 4 年
通知
14
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
e9cee8e6
编写于
5月 19, 2010
作者:
A
Alex Elder
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' into for-linus
上级
537b60d1
b4ed4626
变更
39
展开全部
隐藏空白更改
内联
并排
Showing
39 changed file
with
1905 addition
and
2118 deletion
+1905
-2118
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_aops.c
+136
-95
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_buf.c
+12
-15
fs/xfs/linux-2.6/xfs_buf.h
fs/xfs/linux-2.6/xfs_buf.h
+1
-1
fs/xfs/linux-2.6/xfs_file.c
fs/xfs/linux-2.6/xfs_file.c
+2
-0
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_ioctl.c
+4
-0
fs/xfs/linux-2.6/xfs_ioctl32.c
fs/xfs/linux-2.6/xfs_ioctl32.c
+4
-0
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_iops.c
+4
-1
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_super.c
+6
-4
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/linux-2.6/xfs_sync.c
+27
-64
fs/xfs/linux-2.6/xfs_trace.c
fs/xfs/linux-2.6/xfs_trace.c
+3
-1
fs/xfs/linux-2.6/xfs_trace.h
fs/xfs/linux-2.6/xfs_trace.h
+146
-4
fs/xfs/quota/xfs_dquot.c
fs/xfs/quota/xfs_dquot.c
+89
-104
fs/xfs/quota/xfs_dquot.h
fs/xfs/quota/xfs_dquot.h
+5
-30
fs/xfs/quota/xfs_dquot_item.c
fs/xfs/quota/xfs_dquot_item.c
+11
-19
fs/xfs/quota/xfs_qm.c
fs/xfs/quota/xfs_qm.c
+225
-384
fs/xfs/quota/xfs_qm.h
fs/xfs/quota/xfs_qm.h
+6
-17
fs/xfs/quota/xfs_qm_stats.c
fs/xfs/quota/xfs_qm_stats.c
+1
-1
fs/xfs/quota/xfs_qm_syscalls.c
fs/xfs/quota/xfs_qm_syscalls.c
+71
-81
fs/xfs/quota/xfs_quota_priv.h
fs/xfs/quota/xfs_quota_priv.h
+0
-102
fs/xfs/quota/xfs_trans_dquot.c
fs/xfs/quota/xfs_trans_dquot.c
+14
-15
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.c
+1
-1
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item.c
+23
-32
fs/xfs/xfs_buf_item.h
fs/xfs/xfs_buf_item.h
+1
-1
fs/xfs/xfs_error.c
fs/xfs/xfs_error.c
+15
-15
fs/xfs/xfs_error.h
fs/xfs/xfs_error.h
+5
-4
fs/xfs/xfs_extfree_item.c
fs/xfs/xfs_extfree_item.c
+6
-12
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.c
+2
-0
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_inode_item.c
+7
-14
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.c
+39
-84
fs/xfs/xfs_iomap.h
fs/xfs/xfs_iomap.h
+1
-46
fs/xfs/xfs_log.c
fs/xfs/xfs_log.c
+445
-257
fs/xfs/xfs_log.h
fs/xfs/xfs_log.h
+13
-0
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_priv.h
+9
-3
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_log_recover.c
+154
-157
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.c
+0
-7
fs/xfs/xfs_quota.h
fs/xfs/xfs_quota.h
+0
-3
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans.c
+359
-401
fs/xfs/xfs_trans.h
fs/xfs/xfs_trans.h
+11
-3
fs/xfs/xfs_trans_buf.c
fs/xfs/xfs_trans_buf.c
+47
-140
未找到文件。
fs/xfs/linux-2.6/xfs_aops.c
浏览文件 @
e9cee8e6
...
...
@@ -45,6 +45,15 @@
#include <linux/pagevec.h>
#include <linux/writeback.h>
/*
* Types of I/O for bmap clustering and I/O completion tracking.
*/
enum
{
IO_READ
,
/* mapping for a read */
IO_DELAY
,
/* mapping covers delalloc region */
IO_UNWRITTEN
,
/* mapping covers allocated but uninitialized data */
IO_NEW
/* just allocated */
};
/*
* Prime number of hash buckets since address is used as the key.
...
...
@@ -103,8 +112,9 @@ xfs_count_page_state(
STATIC
struct
block_device
*
xfs_find_bdev_for_inode
(
struct
xfs_inode
*
ip
)
struct
inode
*
inode
)
{
struct
xfs_inode
*
ip
=
XFS_I
(
inode
);
struct
xfs_mount
*
mp
=
ip
->
i_mount
;
if
(
XFS_IS_REALTIME_INODE
(
ip
))
...
...
@@ -183,7 +193,7 @@ xfs_setfilesize(
xfs_fsize_t
isize
;
ASSERT
((
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFREG
);
ASSERT
(
ioend
->
io_type
!=
IO
MAP
_READ
);
ASSERT
(
ioend
->
io_type
!=
IO_READ
);
if
(
unlikely
(
ioend
->
io_error
))
return
0
;
...
...
@@ -214,7 +224,7 @@ xfs_finish_ioend(
if
(
atomic_dec_and_test
(
&
ioend
->
io_remaining
))
{
struct
workqueue_struct
*
wq
;
wq
=
(
ioend
->
io_type
==
IO
MAP
_UNWRITTEN
)
?
wq
=
(
ioend
->
io_type
==
IO_UNWRITTEN
)
?
xfsconvertd_workqueue
:
xfsdatad_workqueue
;
queue_work
(
wq
,
&
ioend
->
io_work
);
if
(
wait
)
...
...
@@ -237,7 +247,7 @@ xfs_end_io(
* For unwritten extents we need to issue transactions to convert a
* range to normal written extens after the data I/O has finished.
*/
if
(
ioend
->
io_type
==
IO
MAP
_UNWRITTEN
&&
if
(
ioend
->
io_type
==
IO_UNWRITTEN
&&
likely
(
!
ioend
->
io_error
&&
!
XFS_FORCED_SHUTDOWN
(
ip
->
i_mount
)))
{
error
=
xfs_iomap_write_unwritten
(
ip
,
ioend
->
io_offset
,
...
...
@@ -250,7 +260,7 @@ xfs_end_io(
* We might have to update the on-disk file size after extending
* writes.
*/
if
(
ioend
->
io_type
!=
IO
MAP
_READ
)
{
if
(
ioend
->
io_type
!=
IO_READ
)
{
error
=
xfs_setfilesize
(
ioend
);
ASSERT
(
!
error
||
error
==
EAGAIN
);
}
...
...
@@ -309,21 +319,25 @@ xfs_map_blocks(
struct
inode
*
inode
,
loff_t
offset
,
ssize_t
count
,
xfs_iomap_t
*
map
p
,
struct
xfs_bmbt_irec
*
ima
p
,
int
flags
)
{
int
nmaps
=
1
;
int
new
=
0
;
return
-
xfs_iomap
(
XFS_I
(
inode
),
offset
,
count
,
flags
,
mapp
,
&
nmaps
);
return
-
xfs_iomap
(
XFS_I
(
inode
),
offset
,
count
,
flags
,
imap
,
&
nmaps
,
&
new
);
}
STATIC
int
xfs_iomap_valid
(
xfs_iomap_t
*
iomapp
,
loff_t
offset
)
xfs_imap_valid
(
struct
inode
*
inode
,
struct
xfs_bmbt_irec
*
imap
,
xfs_off_t
offset
)
{
return
offset
>=
iomapp
->
iomap_offset
&&
offset
<
iomapp
->
iomap_offset
+
iomapp
->
iomap_bsize
;
offset
>>=
inode
->
i_blkbits
;
return
offset
>=
imap
->
br_startoff
&&
offset
<
imap
->
br_startoff
+
imap
->
br_blockcount
;
}
/*
...
...
@@ -554,19 +568,23 @@ xfs_add_to_ioend(
STATIC
void
xfs_map_buffer
(
struct
inode
*
inode
,
struct
buffer_head
*
bh
,
xfs_iomap_t
*
mp
,
xfs_off_t
offset
,
uint
block_bits
)
struct
xfs_bmbt_irec
*
imap
,
xfs_off_t
offset
)
{
sector_t
bn
;
struct
xfs_mount
*
m
=
XFS_I
(
inode
)
->
i_mount
;
xfs_off_t
iomap_offset
=
XFS_FSB_TO_B
(
m
,
imap
->
br_startoff
);
xfs_daddr_t
iomap_bn
=
xfs_fsb_to_db
(
XFS_I
(
inode
),
imap
->
br_startblock
);
ASSERT
(
mp
->
iomap_bn
!=
IOMAP_DADDR_NULL
);
ASSERT
(
imap
->
br_startblock
!=
HOLESTARTBLOCK
);
ASSERT
(
imap
->
br_startblock
!=
DELAYSTARTBLOCK
);
bn
=
(
mp
->
iomap_bn
>>
(
block_
bits
-
BBSHIFT
))
+
((
offset
-
mp
->
iomap_offset
)
>>
block_
bits
);
bn
=
(
iomap_bn
>>
(
inode
->
i_blk
bits
-
BBSHIFT
))
+
((
offset
-
iomap_offset
)
>>
inode
->
i_blk
bits
);
ASSERT
(
bn
||
(
mp
->
iomap_flags
&
IOMAP_REALTIME
));
ASSERT
(
bn
||
XFS_IS_REALTIME_INODE
(
XFS_I
(
inode
)
));
bh
->
b_blocknr
=
bn
;
set_buffer_mapped
(
bh
);
...
...
@@ -574,17 +592,17 @@ xfs_map_buffer(
STATIC
void
xfs_map_at_offset
(
struct
inode
*
inode
,
struct
buffer_head
*
bh
,
loff_t
offset
,
int
block_bits
,
xfs_iomap_t
*
iomapp
)
struct
xfs_bmbt_irec
*
imap
,
xfs_off_t
offset
)
{
ASSERT
(
!
(
iomapp
->
iomap_flags
&
IOMAP_HOLE
)
);
ASSERT
(
!
(
iomapp
->
iomap_flags
&
IOMAP_DELAY
)
);
ASSERT
(
imap
->
br_startblock
!=
HOLESTARTBLOCK
);
ASSERT
(
imap
->
br_startblock
!=
DELAYSTARTBLOCK
);
lock_buffer
(
bh
);
xfs_map_buffer
(
bh
,
iomapp
,
offset
,
block_bits
);
bh
->
b_bdev
=
iomapp
->
iomap_target
->
bt_bdev
;
xfs_map_buffer
(
inode
,
bh
,
imap
,
offset
);
bh
->
b_bdev
=
xfs_find_bdev_for_inode
(
inode
)
;
set_buffer_mapped
(
bh
);
clear_buffer_delay
(
bh
);
clear_buffer_unwritten
(
bh
);
...
...
@@ -713,11 +731,11 @@ xfs_is_delayed_page(
bh
=
head
=
page_buffers
(
page
);
do
{
if
(
buffer_unwritten
(
bh
))
acceptable
=
(
type
==
IO
MAP
_UNWRITTEN
);
acceptable
=
(
type
==
IO_UNWRITTEN
);
else
if
(
buffer_delay
(
bh
))
acceptable
=
(
type
==
IO
MAP
_DELAY
);
acceptable
=
(
type
==
IO_DELAY
);
else
if
(
buffer_dirty
(
bh
)
&&
buffer_mapped
(
bh
))
acceptable
=
(
type
==
IO
MAP
_NEW
);
acceptable
=
(
type
==
IO_NEW
);
else
break
;
}
while
((
bh
=
bh
->
b_this_page
)
!=
head
);
...
...
@@ -740,7 +758,7 @@ xfs_convert_page(
struct
inode
*
inode
,
struct
page
*
page
,
loff_t
tindex
,
xfs_iomap_t
*
m
p
,
struct
xfs_bmbt_irec
*
ima
p
,
xfs_ioend_t
**
ioendp
,
struct
writeback_control
*
wbc
,
int
startio
,
...
...
@@ -750,7 +768,6 @@ xfs_convert_page(
xfs_off_t
end_offset
;
unsigned
long
p_offset
;
unsigned
int
type
;
int
bbits
=
inode
->
i_blkbits
;
int
len
,
page_dirty
;
int
count
=
0
,
done
=
0
,
uptodate
=
1
;
xfs_off_t
offset
=
page_offset
(
page
);
...
...
@@ -802,19 +819,19 @@ xfs_convert_page(
if
(
buffer_unwritten
(
bh
)
||
buffer_delay
(
bh
))
{
if
(
buffer_unwritten
(
bh
))
type
=
IO
MAP
_UNWRITTEN
;
type
=
IO_UNWRITTEN
;
else
type
=
IO
MAP
_DELAY
;
type
=
IO_DELAY
;
if
(
!
xfs_i
omap_valid
(
m
p
,
offset
))
{
if
(
!
xfs_i
map_valid
(
inode
,
ima
p
,
offset
))
{
done
=
1
;
continue
;
}
ASSERT
(
!
(
mp
->
iomap_flags
&
IOMAP_HOLE
)
);
ASSERT
(
!
(
mp
->
iomap_flags
&
IOMAP_DELAY
)
);
ASSERT
(
imap
->
br_startblock
!=
HOLESTARTBLOCK
);
ASSERT
(
imap
->
br_startblock
!=
DELAYSTARTBLOCK
);
xfs_map_at_offset
(
bh
,
offset
,
bbits
,
mp
);
xfs_map_at_offset
(
inode
,
bh
,
imap
,
offset
);
if
(
startio
)
{
xfs_add_to_ioend
(
inode
,
bh
,
offset
,
type
,
ioendp
,
done
);
...
...
@@ -826,7 +843,7 @@ xfs_convert_page(
page_dirty
--
;
count
++
;
}
else
{
type
=
IO
MAP
_NEW
;
type
=
IO_NEW
;
if
(
buffer_mapped
(
bh
)
&&
all_bh
&&
startio
)
{
lock_buffer
(
bh
);
xfs_add_to_ioend
(
inode
,
bh
,
offset
,
...
...
@@ -866,7 +883,7 @@ STATIC void
xfs_cluster_write
(
struct
inode
*
inode
,
pgoff_t
tindex
,
xfs_iomap_t
*
iomap
p
,
struct
xfs_bmbt_irec
*
ima
p
,
xfs_ioend_t
**
ioendp
,
struct
writeback_control
*
wbc
,
int
startio
,
...
...
@@ -885,7 +902,7 @@ xfs_cluster_write(
for
(
i
=
0
;
i
<
pagevec_count
(
&
pvec
);
i
++
)
{
done
=
xfs_convert_page
(
inode
,
pvec
.
pages
[
i
],
tindex
++
,
i
omap
p
,
ioendp
,
wbc
,
startio
,
all_bh
);
i
ma
p
,
ioendp
,
wbc
,
startio
,
all_bh
);
if
(
done
)
break
;
}
...
...
@@ -930,7 +947,7 @@ xfs_aops_discard_page(
loff_t
offset
=
page_offset
(
page
);
ssize_t
len
=
1
<<
inode
->
i_blkbits
;
if
(
!
xfs_is_delayed_page
(
page
,
IO
MAP
_DELAY
))
if
(
!
xfs_is_delayed_page
(
page
,
IO_DELAY
))
goto
out_invalidate
;
if
(
XFS_FORCED_SHUTDOWN
(
ip
->
i_mount
))
...
...
@@ -1042,15 +1059,15 @@ xfs_page_state_convert(
int
unmapped
)
/* also implies page uptodate */
{
struct
buffer_head
*
bh
,
*
head
;
xfs_iomap_t
io
map
;
struct
xfs_bmbt_irec
i
map
;
xfs_ioend_t
*
ioend
=
NULL
,
*
iohead
=
NULL
;
loff_t
offset
;
unsigned
long
p_offset
=
0
;
unsigned
int
type
;
__uint64_t
end_offset
;
pgoff_t
end_index
,
last_index
,
tlast
;
pgoff_t
end_index
,
last_index
;
ssize_t
size
,
len
;
int
flags
,
err
,
i
o
map_valid
=
0
,
uptodate
=
1
;
int
flags
,
err
,
imap_valid
=
0
,
uptodate
=
1
;
int
page_dirty
,
count
=
0
;
int
trylock
=
0
;
int
all_bh
=
unmapped
;
...
...
@@ -1097,7 +1114,7 @@ xfs_page_state_convert(
bh
=
head
=
page_buffers
(
page
);
offset
=
page_offset
(
page
);
flags
=
BMAPI_READ
;
type
=
IO
MAP
_NEW
;
type
=
IO_NEW
;
/* TODO: cleanup count and page_dirty */
...
...
@@ -1111,12 +1128,12 @@ xfs_page_state_convert(
* the iomap is actually still valid, but the ioend
* isn't. shouldn't happen too often.
*/
i
o
map_valid
=
0
;
imap_valid
=
0
;
continue
;
}
if
(
i
o
map_valid
)
i
omap_valid
=
xfs_iomap_valid
(
&
io
map
,
offset
);
if
(
imap_valid
)
i
map_valid
=
xfs_imap_valid
(
inode
,
&
i
map
,
offset
);
/*
* First case, map an unwritten extent and prepare for
...
...
@@ -1137,20 +1154,20 @@ xfs_page_state_convert(
* Make sure we don't use a read-only iomap
*/
if
(
flags
==
BMAPI_READ
)
i
o
map_valid
=
0
;
imap_valid
=
0
;
if
(
buffer_unwritten
(
bh
))
{
type
=
IO
MAP
_UNWRITTEN
;
type
=
IO_UNWRITTEN
;
flags
=
BMAPI_WRITE
|
BMAPI_IGNSTATE
;
}
else
if
(
buffer_delay
(
bh
))
{
type
=
IO
MAP
_DELAY
;
type
=
IO_DELAY
;
flags
=
BMAPI_ALLOCATE
|
trylock
;
}
else
{
type
=
IO
MAP
_NEW
;
type
=
IO_NEW
;
flags
=
BMAPI_WRITE
|
BMAPI_MMAP
;
}
if
(
!
i
o
map_valid
)
{
if
(
!
imap_valid
)
{
/*
* if we didn't have a valid mapping then we
* need to ensure that we put the new mapping
...
...
@@ -1160,7 +1177,7 @@ xfs_page_state_convert(
* for unwritten extent conversion.
*/
new_ioend
=
1
;
if
(
type
==
IO
MAP
_NEW
)
{
if
(
type
==
IO_NEW
)
{
size
=
xfs_probe_cluster
(
inode
,
page
,
bh
,
head
,
0
);
}
else
{
...
...
@@ -1168,14 +1185,14 @@ xfs_page_state_convert(
}
err
=
xfs_map_blocks
(
inode
,
offset
,
size
,
&
i
o
map
,
flags
);
&
imap
,
flags
);
if
(
err
)
goto
error
;
iomap_valid
=
xfs_iomap_valid
(
&
iomap
,
offset
);
imap_valid
=
xfs_imap_valid
(
inode
,
&
imap
,
offset
);
}
if
(
iomap_valid
)
{
xfs_map_at_offset
(
bh
,
offset
,
inode
->
i_blkbits
,
&
iomap
);
if
(
imap_valid
)
{
xfs_map_at_offset
(
inode
,
bh
,
&
imap
,
offset
);
if
(
startio
)
{
xfs_add_to_ioend
(
inode
,
bh
,
offset
,
type
,
&
ioend
,
...
...
@@ -1194,40 +1211,41 @@ xfs_page_state_convert(
* That means it must already have extents allocated
* underneath it. Map the extent by reading it.
*/
if
(
!
i
o
map_valid
||
flags
!=
BMAPI_READ
)
{
if
(
!
imap_valid
||
flags
!=
BMAPI_READ
)
{
flags
=
BMAPI_READ
;
size
=
xfs_probe_cluster
(
inode
,
page
,
bh
,
head
,
1
);
err
=
xfs_map_blocks
(
inode
,
offset
,
size
,
&
i
o
map
,
flags
);
&
imap
,
flags
);
if
(
err
)
goto
error
;
iomap_valid
=
xfs_iomap_valid
(
&
iomap
,
offset
);
imap_valid
=
xfs_imap_valid
(
inode
,
&
imap
,
offset
);
}
/*
* We set the type to IO
MAP
_NEW in case we are doing a
* We set the type to IO_NEW in case we are doing a
* small write at EOF that is extending the file but
* without needing an allocation. We need to update the
* file size on I/O completion in this case so it is
* the same case as having just allocated a new extent
* that we are writing into for the first time.
*/
type
=
IO
MAP
_NEW
;
type
=
IO_NEW
;
if
(
trylock_buffer
(
bh
))
{
ASSERT
(
buffer_mapped
(
bh
));
if
(
i
o
map_valid
)
if
(
imap_valid
)
all_bh
=
1
;
xfs_add_to_ioend
(
inode
,
bh
,
offset
,
type
,
&
ioend
,
!
i
o
map_valid
);
&
ioend
,
!
imap_valid
);
page_dirty
--
;
count
++
;
}
else
{
i
o
map_valid
=
0
;
imap_valid
=
0
;
}
}
else
if
((
buffer_uptodate
(
bh
)
||
PageUptodate
(
page
))
&&
(
unmapped
||
startio
))
{
i
o
map_valid
=
0
;
imap_valid
=
0
;
}
if
(
!
iohead
)
...
...
@@ -1241,12 +1259,23 @@ xfs_page_state_convert(
if
(
startio
)
xfs_start_page_writeback
(
page
,
1
,
count
);
if
(
ioend
&&
iomap_valid
)
{
offset
=
(
iomap
.
iomap_offset
+
iomap
.
iomap_bsize
-
1
)
>>
PAGE_CACHE_SHIFT
;
tlast
=
min_t
(
pgoff_t
,
offset
,
last_index
);
xfs_cluster_write
(
inode
,
page
->
index
+
1
,
&
iomap
,
&
ioend
,
wbc
,
startio
,
all_bh
,
tlast
);
if
(
ioend
&&
imap_valid
)
{
xfs_off_t
end_index
;
end_index
=
imap
.
br_startoff
+
imap
.
br_blockcount
;
/* to bytes */
end_index
<<=
inode
->
i_blkbits
;
/* to pages */
end_index
=
(
end_index
-
1
)
>>
PAGE_CACHE_SHIFT
;
/* check against file size */
if
(
end_index
>
last_index
)
end_index
=
last_index
;
xfs_cluster_write
(
inode
,
page
->
index
+
1
,
&
imap
,
&
ioend
,
wbc
,
startio
,
all_bh
,
end_index
);
}
if
(
iohead
)
...
...
@@ -1448,10 +1477,11 @@ __xfs_get_blocks(
int
direct
,
bmapi_flags_t
flags
)
{
xfs_iomap_t
io
map
;
struct
xfs_bmbt_irec
i
map
;
xfs_off_t
offset
;
ssize_t
size
;
int
niomap
=
1
;
int
nimap
=
1
;
int
new
=
0
;
int
error
;
offset
=
(
xfs_off_t
)
iblock
<<
inode
->
i_blkbits
;
...
...
@@ -1462,22 +1492,21 @@ __xfs_get_blocks(
return
0
;
error
=
xfs_iomap
(
XFS_I
(
inode
),
offset
,
size
,
create
?
flags
:
BMAPI_READ
,
&
i
omap
,
&
niomap
);
create
?
flags
:
BMAPI_READ
,
&
i
map
,
&
nimap
,
&
new
);
if
(
error
)
return
-
error
;
if
(
ni
o
map
==
0
)
if
(
nimap
==
0
)
return
0
;
if
(
iomap
.
iomap_bn
!=
IOMAP_DADDR_NULL
)
{
if
(
imap
.
br_startblock
!=
HOLESTARTBLOCK
&&
imap
.
br_startblock
!=
DELAYSTARTBLOCK
)
{
/*
* For unwritten extents do not report a disk address on
* the read case (treat as if we're reading into a hole).
*/
if
(
create
||
!
(
iomap
.
iomap_flags
&
IOMAP_UNWRITTEN
))
{
xfs_map_buffer
(
bh_result
,
&
iomap
,
offset
,
inode
->
i_blkbits
);
}
if
(
create
&&
(
iomap
.
iomap_flags
&
IOMAP_UNWRITTEN
))
{
if
(
create
||
!
ISUNWRITTEN
(
&
imap
))
xfs_map_buffer
(
inode
,
bh_result
,
&
imap
,
offset
);
if
(
create
&&
ISUNWRITTEN
(
&
imap
))
{
if
(
direct
)
bh_result
->
b_private
=
inode
;
set_buffer_unwritten
(
bh_result
);
...
...
@@ -1488,7 +1517,7 @@ __xfs_get_blocks(
* If this is a realtime file, data may be on a different device.
* to that pointed to from the buffer_head b_bdev currently.
*/
bh_result
->
b_bdev
=
iomap
.
iomap_target
->
bt_bdev
;
bh_result
->
b_bdev
=
xfs_find_bdev_for_inode
(
inode
)
;
/*
* If we previously allocated a block out beyond eof and we are now
...
...
@@ -1502,10 +1531,10 @@ __xfs_get_blocks(
if
(
create
&&
((
!
buffer_mapped
(
bh_result
)
&&
!
buffer_uptodate
(
bh_result
))
||
(
offset
>=
i_size_read
(
inode
))
||
(
iomap
.
iomap_flags
&
(
IOMAP_NEW
|
IOMAP_UNWRITTEN
))))
(
new
||
ISUNWRITTEN
(
&
imap
))))
set_buffer_new
(
bh_result
);
if
(
i
omap
.
iomap_flags
&
IOMAP_DELAY
)
{
if
(
i
map
.
br_startblock
==
DELAYSTARTBLOCK
)
{
BUG_ON
(
direct
);
if
(
create
)
{
set_buffer_uptodate
(
bh_result
);
...
...
@@ -1514,11 +1543,23 @@ __xfs_get_blocks(
}
}
/*
* If this is O_DIRECT or the mpage code calling tell them how large
* the mapping is, so that we can avoid repeated get_blocks calls.
*/
if
(
direct
||
size
>
(
1
<<
inode
->
i_blkbits
))
{
ASSERT
(
iomap
.
iomap_bsize
-
iomap
.
iomap_delta
>
0
);
offset
=
min_t
(
xfs_off_t
,
iomap
.
iomap_bsize
-
iomap
.
iomap_delta
,
size
);
bh_result
->
b_size
=
(
ssize_t
)
min_t
(
xfs_off_t
,
LONG_MAX
,
offset
);
xfs_off_t
mapping_size
;
mapping_size
=
imap
.
br_startoff
+
imap
.
br_blockcount
-
iblock
;
mapping_size
<<=
inode
->
i_blkbits
;
ASSERT
(
mapping_size
>
0
);
if
(
mapping_size
>
size
)
mapping_size
=
size
;
if
(
mapping_size
>
LONG_MAX
)
mapping_size
=
LONG_MAX
;
bh_result
->
b_size
=
mapping_size
;
}
return
0
;
...
...
@@ -1576,7 +1617,7 @@ xfs_end_io_direct(
*/
ioend
->
io_offset
=
offset
;
ioend
->
io_size
=
size
;
if
(
ioend
->
io_type
==
IO
MAP
_READ
)
{
if
(
ioend
->
io_type
==
IO_READ
)
{
xfs_finish_ioend
(
ioend
,
0
);
}
else
if
(
private
&&
size
>
0
)
{
xfs_finish_ioend
(
ioend
,
is_sync_kiocb
(
iocb
));
...
...
@@ -1587,7 +1628,7 @@ xfs_end_io_direct(
* didn't map an unwritten extent so switch it's completion
* handler.
*/
ioend
->
io_type
=
IO
MAP
_NEW
;
ioend
->
io_type
=
IO_NEW
;
xfs_finish_ioend
(
ioend
,
0
);
}
...
...
@@ -1612,10 +1653,10 @@ xfs_vm_direct_IO(
struct
block_device
*
bdev
;
ssize_t
ret
;
bdev
=
xfs_find_bdev_for_inode
(
XFS_I
(
inode
)
);
bdev
=
xfs_find_bdev_for_inode
(
inode
);
iocb
->
private
=
xfs_alloc_ioend
(
inode
,
rw
==
WRITE
?
IO
MAP_UNWRITTEN
:
IOMAP
_READ
);
IO
_UNWRITTEN
:
IO
_READ
);
ret
=
blockdev_direct_IO_no_locking
(
rw
,
iocb
,
inode
,
bdev
,
iov
,
offset
,
nr_segs
,
...
...
fs/xfs/linux-2.6/xfs_buf.c
浏览文件 @
e9cee8e6
...
...
@@ -1007,25 +1007,20 @@ xfs_bwrite(
struct
xfs_mount
*
mp
,
struct
xfs_buf
*
bp
)
{
int
iowait
=
(
bp
->
b_flags
&
XBF_ASYNC
)
==
0
;
int
error
=
0
;
int
error
;
bp
->
b_strat
=
xfs_bdstrat_cb
;
bp
->
b_mount
=
mp
;
bp
->
b_flags
|=
XBF_WRITE
;
if
(
!
iowait
)
bp
->
b_flags
|=
_XBF_RUN_QUEUES
;
bp
->
b_flags
&=
~
(
XBF_ASYNC
|
XBF_READ
);
xfs_buf_delwri_dequeue
(
bp
);
xfs_buf_iostrategy
(
bp
);
if
(
iowait
)
{
error
=
xfs_buf_iowait
(
bp
);
if
(
error
)
xfs_force_shutdown
(
mp
,
SHUTDOWN_META_IO_ERROR
);
xfs_buf_relse
(
bp
);
}
error
=
xfs_buf_iowait
(
bp
);
if
(
error
)
xfs_force_shutdown
(
mp
,
SHUTDOWN_META_IO_ERROR
);
xfs_buf_relse
(
bp
);
return
error
;
}
...
...
@@ -1614,7 +1609,8 @@ xfs_mapping_buftarg(
STATIC
int
xfs_alloc_delwrite_queue
(
xfs_buftarg_t
*
btp
)
xfs_buftarg_t
*
btp
,
const
char
*
fsname
)
{
int
error
=
0
;
...
...
@@ -1622,7 +1618,7 @@ xfs_alloc_delwrite_queue(
INIT_LIST_HEAD
(
&
btp
->
bt_delwrite_queue
);
spin_lock_init
(
&
btp
->
bt_delwrite_lock
);
btp
->
bt_flags
=
0
;
btp
->
bt_task
=
kthread_run
(
xfsbufd
,
btp
,
"xfsbufd
"
);
btp
->
bt_task
=
kthread_run
(
xfsbufd
,
btp
,
"xfsbufd
/%s"
,
fsname
);
if
(
IS_ERR
(
btp
->
bt_task
))
{
error
=
PTR_ERR
(
btp
->
bt_task
);
goto
out_error
;
...
...
@@ -1635,7 +1631,8 @@ xfs_alloc_delwrite_queue(
xfs_buftarg_t
*
xfs_alloc_buftarg
(
struct
block_device
*
bdev
,
int
external
)
int
external
,
const
char
*
fsname
)
{
xfs_buftarg_t
*
btp
;
...
...
@@ -1647,7 +1644,7 @@ xfs_alloc_buftarg(
goto
error
;
if
(
xfs_mapping_buftarg
(
btp
,
bdev
))
goto
error
;
if
(
xfs_alloc_delwrite_queue
(
btp
))
if
(
xfs_alloc_delwrite_queue
(
btp
,
fsname
))
goto
error
;
xfs_alloc_bufhash
(
btp
,
external
);
return
btp
;
...
...
fs/xfs/linux-2.6/xfs_buf.h
浏览文件 @
e9cee8e6
...
...
@@ -390,7 +390,7 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
/*
* Handling of buftargs.
*/
extern
xfs_buftarg_t
*
xfs_alloc_buftarg
(
struct
block_device
*
,
int
);
extern
xfs_buftarg_t
*
xfs_alloc_buftarg
(
struct
block_device
*
,
int
,
const
char
*
);
extern
void
xfs_free_buftarg
(
struct
xfs_mount
*
,
struct
xfs_buftarg
*
);
extern
void
xfs_wait_buftarg
(
xfs_buftarg_t
*
);
extern
int
xfs_setsize_buftarg
(
xfs_buftarg_t
*
,
unsigned
int
,
unsigned
int
);
...
...
fs/xfs/linux-2.6/xfs_file.c
浏览文件 @
e9cee8e6
...
...
@@ -115,6 +115,8 @@ xfs_file_fsync(
xfs_iflags_clear
(
ip
,
XFS_ITRUNCATED
);
xfs_ioend_wait
(
ip
);
/*
* We always need to make sure that the required inode state is safe on
* disk. The inode might be clean but we still might need to force the
...
...
fs/xfs/linux-2.6/xfs_ioctl.c
浏览文件 @
e9cee8e6
...
...
@@ -527,6 +527,10 @@ xfs_attrmulti_by_handle(
if
(
copy_from_user
(
&
am_hreq
,
arg
,
sizeof
(
xfs_fsop_attrmulti_handlereq_t
)))
return
-
XFS_ERROR
(
EFAULT
);
/* overflow check */
if
(
am_hreq
.
opcount
>=
INT_MAX
/
sizeof
(
xfs_attr_multiop_t
))
return
-
E2BIG
;
dentry
=
xfs_handlereq_to_dentry
(
parfilp
,
&
am_hreq
.
hreq
);
if
(
IS_ERR
(
dentry
))
return
PTR_ERR
(
dentry
);
...
...
fs/xfs/linux-2.6/xfs_ioctl32.c
浏览文件 @
e9cee8e6
...
...
@@ -420,6 +420,10 @@ xfs_compat_attrmulti_by_handle(
sizeof
(
compat_xfs_fsop_attrmulti_handlereq_t
)))
return
-
XFS_ERROR
(
EFAULT
);
/* overflow check */
if
(
am_hreq
.
opcount
>=
INT_MAX
/
sizeof
(
compat_xfs_attr_multiop_t
))
return
-
E2BIG
;
dentry
=
xfs_compat_handlereq_to_dentry
(
parfilp
,
&
am_hreq
.
hreq
);
if
(
IS_ERR
(
dentry
))
return
PTR_ERR
(
dentry
);
...
...
fs/xfs/linux-2.6/xfs_iops.c
浏览文件 @
e9cee8e6
...
...
@@ -673,7 +673,10 @@ xfs_vn_fiemap(
bm
.
bmv_length
=
BTOBB
(
length
);
/* We add one because in getbmap world count includes the header */
bm
.
bmv_count
=
fieinfo
->
fi_extents_max
+
1
;
bm
.
bmv_count
=
!
fieinfo
->
fi_extents_max
?
MAXEXTNUM
:
fieinfo
->
fi_extents_max
+
1
;
bm
.
bmv_count
=
min_t
(
__s32
,
bm
.
bmv_count
,
(
PAGE_SIZE
*
16
/
sizeof
(
struct
getbmapx
)));
bm
.
bmv_iflags
=
BMV_IF_PREALLOC
;
if
(
fieinfo
->
fi_flags
&
FIEMAP_FLAG_XATTR
)
bm
.
bmv_iflags
|=
BMV_IF_ATTRFORK
;
...
...
fs/xfs/linux-2.6/xfs_super.c
浏览文件 @
e9cee8e6
...
...
@@ -789,18 +789,18 @@ xfs_open_devices(
* Setup xfs_mount buffer target pointers
*/
error
=
ENOMEM
;
mp
->
m_ddev_targp
=
xfs_alloc_buftarg
(
ddev
,
0
);
mp
->
m_ddev_targp
=
xfs_alloc_buftarg
(
ddev
,
0
,
mp
->
m_fsname
);
if
(
!
mp
->
m_ddev_targp
)
goto
out_close_rtdev
;
if
(
rtdev
)
{
mp
->
m_rtdev_targp
=
xfs_alloc_buftarg
(
rtdev
,
1
);
mp
->
m_rtdev_targp
=
xfs_alloc_buftarg
(
rtdev
,
1
,
mp
->
m_fsname
);
if
(
!
mp
->
m_rtdev_targp
)
goto
out_free_ddev_targ
;
}
if
(
logdev
&&
logdev
!=
ddev
)
{
mp
->
m_logdev_targp
=
xfs_alloc_buftarg
(
logdev
,
1
);
mp
->
m_logdev_targp
=
xfs_alloc_buftarg
(
logdev
,
1
,
mp
->
m_fsname
);
if
(
!
mp
->
m_logdev_targp
)
goto
out_free_rtdev_targ
;
}
else
{
...
...
@@ -902,7 +902,8 @@ xfsaild_start(
struct
xfs_ail
*
ailp
)
{
ailp
->
xa_target
=
0
;
ailp
->
xa_task
=
kthread_run
(
xfsaild
,
ailp
,
"xfsaild"
);
ailp
->
xa_task
=
kthread_run
(
xfsaild
,
ailp
,
"xfsaild/%s"
,
ailp
->
xa_mount
->
m_fsname
);
if
(
IS_ERR
(
ailp
->
xa_task
))
return
-
PTR_ERR
(
ailp
->
xa_task
);
return
0
;
...
...
@@ -1092,6 +1093,7 @@ xfs_fs_write_inode(
* the code will only flush the inode if it isn't already
* being flushed.
*/
xfs_ioend_wait
(
ip
);
xfs_ilock
(
ip
,
XFS_ILOCK_SHARED
);
if
(
ip
->
i_update_core
)
{
error
=
xfs_log_inode
(
ip
);
...
...
fs/xfs/linux-2.6/xfs_sync.c
浏览文件 @
e9cee8e6
...
...
@@ -356,68 +356,23 @@ xfs_commit_dummy_trans(
STATIC
int
xfs_sync_fsdata
(
struct
xfs_mount
*
mp
,
int
flags
)
struct
xfs_mount
*
mp
)
{
struct
xfs_buf
*
bp
;
struct
xfs_buf_log_item
*
bip
;
int
error
=
0
;
/*
* If this is xfssyncd() then only sync the superblock if we can
* lock it without sleeping and it is not pinned.
* If the buffer is pinned then push on the log so we won't get stuck
* waiting in the write for someone, maybe ourselves, to flush the log.
*
* Even though we just pushed the log above, we did not have the
* superblock buffer locked at that point so it can become pinned in
* between there and here.
*/
if
(
flags
&
SYNC_TRYLOCK
)
{
ASSERT
(
!
(
flags
&
SYNC_WAIT
));
bp
=
xfs_getsb
(
mp
,
XBF_TRYLOCK
);
if
(
!
bp
)
goto
out
;
bip
=
XFS_BUF_FSPRIVATE
(
bp
,
struct
xfs_buf_log_item
*
);
if
(
!
bip
||
!
xfs_buf_item_dirty
(
bip
)
||
XFS_BUF_ISPINNED
(
bp
))
goto
out_brelse
;
}
else
{
bp
=
xfs_getsb
(
mp
,
0
);
/*
* If the buffer is pinned then push on the log so we won't
* get stuck waiting in the write for someone, maybe
* ourselves, to flush the log.
*
* Even though we just pushed the log above, we did not have
* the superblock buffer locked at that point so it can
* become pinned in between there and here.
*/
if
(
XFS_BUF_ISPINNED
(
bp
))
xfs_log_force
(
mp
,
0
);
}
if
(
flags
&
SYNC_WAIT
)
XFS_BUF_UNASYNC
(
bp
);
else
XFS_BUF_ASYNC
(
bp
);
error
=
xfs_bwrite
(
mp
,
bp
);
if
(
error
)
return
error
;
/*
* If this is a data integrity sync make sure all pending buffers
* are flushed out for the log coverage check below.
*/
if
(
flags
&
SYNC_WAIT
)
xfs_flush_buftarg
(
mp
->
m_ddev_targp
,
1
);
if
(
xfs_log_need_covered
(
mp
))
error
=
xfs_commit_dummy_trans
(
mp
,
flags
);
return
error
;
bp
=
xfs_getsb
(
mp
,
0
);
if
(
XFS_BUF_ISPINNED
(
bp
))
xfs_log_force
(
mp
,
0
);
out_brelse:
xfs_buf_relse
(
bp
);
out:
return
error
;
return
xfs_bwrite
(
mp
,
bp
);
}
/*
...
...
@@ -441,7 +396,7 @@ int
xfs_quiesce_data
(
struct
xfs_mount
*
mp
)
{
int
error
;
int
error
,
error2
=
0
;
/* push non-blocking */
xfs_sync_data
(
mp
,
0
);
...
...
@@ -452,13 +407,20 @@ xfs_quiesce_data(
xfs_qm_sync
(
mp
,
SYNC_WAIT
);
/* write superblock and hoover up shutdown errors */
error
=
xfs_sync_fsdata
(
mp
,
SYNC_WAIT
);
error
=
xfs_sync_fsdata
(
mp
);
/* make sure all delwri buffers are written out */
xfs_flush_buftarg
(
mp
->
m_ddev_targp
,
1
);
/* mark the log as covered if needed */
if
(
xfs_log_need_covered
(
mp
))
error2
=
xfs_commit_dummy_trans
(
mp
,
SYNC_WAIT
);
/* flush data-only devices */
if
(
mp
->
m_rtdev_targp
)
XFS_bflush
(
mp
->
m_rtdev_targp
);
return
error
;
return
error
?
error
:
error2
;
}
STATIC
void
...
...
@@ -581,9 +543,9 @@ xfs_flush_inodes(
}
/*
* Every sync period we need to unpin all items, reclaim inodes
,
sync
*
quota and write out the superblock. We might need to cover the log
*
to indicate it
is idle.
* Every sync period we need to unpin all items, reclaim inodes
and
sync
*
disk quotas. We might need to cover the log to indicate that the
*
filesystem
is idle.
*/
STATIC
void
xfs_sync_worker
(
...
...
@@ -597,7 +559,8 @@ xfs_sync_worker(
xfs_reclaim_inodes
(
mp
,
0
);
/* dgc: errors ignored here */
error
=
xfs_qm_sync
(
mp
,
SYNC_TRYLOCK
);
error
=
xfs_sync_fsdata
(
mp
,
SYNC_TRYLOCK
);
if
(
xfs_log_need_covered
(
mp
))
error
=
xfs_commit_dummy_trans
(
mp
,
0
);
}
mp
->
m_sync_seq
++
;
wake_up
(
&
mp
->
m_wait_single_sync_task
);
...
...
@@ -660,7 +623,7 @@ xfs_syncd_init(
mp
->
m_sync_work
.
w_syncer
=
xfs_sync_worker
;
mp
->
m_sync_work
.
w_mount
=
mp
;
mp
->
m_sync_work
.
w_completion
=
NULL
;
mp
->
m_sync_task
=
kthread_run
(
xfssyncd
,
mp
,
"xfssyncd
"
);
mp
->
m_sync_task
=
kthread_run
(
xfssyncd
,
mp
,
"xfssyncd
/%s"
,
mp
->
m_fsname
);
if
(
IS_ERR
(
mp
->
m_sync_task
))
return
-
PTR_ERR
(
mp
->
m_sync_task
);
return
0
;
...
...
fs/xfs/linux-2.6/xfs_trace.c
浏览文件 @
e9cee8e6
...
...
@@ -41,7 +41,6 @@
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_sf.h"
#include "xfs_attr_leaf.h"
#include "xfs_log_priv.h"
#include "xfs_buf_item.h"
...
...
@@ -50,6 +49,9 @@
#include "xfs_aops.h"
#include "quota/xfs_dquot_item.h"
#include "quota/xfs_dquot.h"
#include "xfs_log_recover.h"
#include "xfs_buf_item.h"
#include "xfs_inode_item.h"
/*
* We include this last to have the helpers above available for the trace
...
...
fs/xfs/linux-2.6/xfs_trace.h
浏览文件 @
e9cee8e6
...
...
@@ -32,6 +32,10 @@ struct xfs_da_node_entry;
struct
xfs_dquot
;
struct
xlog_ticket
;
struct
log
;
struct
xlog_recover
;
struct
xlog_recover_item
;
struct
xfs_buf_log_format
;
struct
xfs_inode_log_format
;
DECLARE_EVENT_CLASS
(
xfs_attr_list_class
,
TP_PROTO
(
struct
xfs_attr_list_context
*
ctx
),
...
...
@@ -562,18 +566,21 @@ DECLARE_EVENT_CLASS(xfs_inode_class,
__field
(
dev_t
,
dev
)
__field
(
xfs_ino_t
,
ino
)
__field
(
int
,
count
)
__field
(
int
,
pincount
)
__field
(
unsigned
long
,
caller_ip
)
),
TP_fast_assign
(
__entry
->
dev
=
VFS_I
(
ip
)
->
i_sb
->
s_dev
;
__entry
->
ino
=
ip
->
i_ino
;
__entry
->
count
=
atomic_read
(
&
VFS_I
(
ip
)
->
i_count
);
__entry
->
pincount
=
atomic_read
(
&
ip
->
i_pincount
);
__entry
->
caller_ip
=
caller_ip
;
),
TP_printk
(
"dev %d:%d ino 0x%llx count %d caller %pf"
,
TP_printk
(
"dev %d:%d ino 0x%llx count %d
pincount %d
caller %pf"
,
MAJOR
(
__entry
->
dev
),
MINOR
(
__entry
->
dev
),
__entry
->
ino
,
__entry
->
count
,
__entry
->
pincount
,
(
char
*
)
__entry
->
caller_ip
)
)
...
...
@@ -583,6 +590,10 @@ DEFINE_EVENT(xfs_inode_class, name, \
TP_ARGS(ip, caller_ip))
DEFINE_INODE_EVENT
(
xfs_ihold
);
DEFINE_INODE_EVENT
(
xfs_irele
);
DEFINE_INODE_EVENT
(
xfs_inode_pin
);
DEFINE_INODE_EVENT
(
xfs_inode_unpin
);
DEFINE_INODE_EVENT
(
xfs_inode_unpin_nowait
);
/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
DEFINE_INODE_EVENT
(
xfs_inode
);
#define xfs_itrace_entry(ip) \
...
...
@@ -642,8 +653,6 @@ DEFINE_EVENT(xfs_dquot_class, name, \
TP_PROTO(struct xfs_dquot *dqp), \
TP_ARGS(dqp))
DEFINE_DQUOT_EVENT
(
xfs_dqadjust
);
DEFINE_DQUOT_EVENT
(
xfs_dqshake_dirty
);
DEFINE_DQUOT_EVENT
(
xfs_dqshake_unlink
);
DEFINE_DQUOT_EVENT
(
xfs_dqreclaim_want
);
DEFINE_DQUOT_EVENT
(
xfs_dqreclaim_dirty
);
DEFINE_DQUOT_EVENT
(
xfs_dqreclaim_unlink
);
...
...
@@ -658,7 +667,6 @@ DEFINE_DQUOT_EVENT(xfs_dqread_fail);
DEFINE_DQUOT_EVENT
(
xfs_dqlookup_found
);
DEFINE_DQUOT_EVENT
(
xfs_dqlookup_want
);
DEFINE_DQUOT_EVENT
(
xfs_dqlookup_freelist
);
DEFINE_DQUOT_EVENT
(
xfs_dqlookup_move
);
DEFINE_DQUOT_EVENT
(
xfs_dqlookup_done
);
DEFINE_DQUOT_EVENT
(
xfs_dqget_hit
);
DEFINE_DQUOT_EVENT
(
xfs_dqget_miss
);
...
...
@@ -1495,6 +1503,140 @@ DEFINE_EVENT(xfs_swap_extent_class, name, \
DEFINE_SWAPEXT_EVENT
(
xfs_swap_extent_before
);
DEFINE_SWAPEXT_EVENT
(
xfs_swap_extent_after
);
DECLARE_EVENT_CLASS
(
xfs_log_recover_item_class
,
TP_PROTO
(
struct
log
*
log
,
struct
xlog_recover
*
trans
,
struct
xlog_recover_item
*
item
,
int
pass
),
TP_ARGS
(
log
,
trans
,
item
,
pass
),
TP_STRUCT__entry
(
__field
(
dev_t
,
dev
)
__field
(
unsigned
long
,
item
)
__field
(
xlog_tid_t
,
tid
)
__field
(
int
,
type
)
__field
(
int
,
pass
)
__field
(
int
,
count
)
__field
(
int
,
total
)
),
TP_fast_assign
(
__entry
->
dev
=
log
->
l_mp
->
m_super
->
s_dev
;
__entry
->
item
=
(
unsigned
long
)
item
;
__entry
->
tid
=
trans
->
r_log_tid
;
__entry
->
type
=
ITEM_TYPE
(
item
);
__entry
->
pass
=
pass
;
__entry
->
count
=
item
->
ri_cnt
;
__entry
->
total
=
item
->
ri_total
;
),
TP_printk
(
"dev %d:%d trans 0x%x, pass %d, item 0x%p, item type %s "
"item region count/total %d/%d"
,
MAJOR
(
__entry
->
dev
),
MINOR
(
__entry
->
dev
),
__entry
->
tid
,
__entry
->
pass
,
(
void
*
)
__entry
->
item
,
__print_symbolic
(
__entry
->
type
,
XFS_LI_TYPE_DESC
),
__entry
->
count
,
__entry
->
total
)
)
#define DEFINE_LOG_RECOVER_ITEM(name) \
DEFINE_EVENT(xfs_log_recover_item_class, name, \
TP_PROTO(struct log *log, struct xlog_recover *trans, \
struct xlog_recover_item *item, int pass), \
TP_ARGS(log, trans, item, pass))
DEFINE_LOG_RECOVER_ITEM
(
xfs_log_recover_item_add
);
DEFINE_LOG_RECOVER_ITEM
(
xfs_log_recover_item_add_cont
);
DEFINE_LOG_RECOVER_ITEM
(
xfs_log_recover_item_reorder_head
);
DEFINE_LOG_RECOVER_ITEM
(
xfs_log_recover_item_reorder_tail
);
DEFINE_LOG_RECOVER_ITEM
(
xfs_log_recover_item_recover
);
DECLARE_EVENT_CLASS
(
xfs_log_recover_buf_item_class
,
TP_PROTO
(
struct
log
*
log
,
struct
xfs_buf_log_format
*
buf_f
),
TP_ARGS
(
log
,
buf_f
),
TP_STRUCT__entry
(
__field
(
dev_t
,
dev
)
__field
(
__int64_t
,
blkno
)
__field
(
unsigned
short
,
len
)
__field
(
unsigned
short
,
flags
)
__field
(
unsigned
short
,
size
)
__field
(
unsigned
int
,
map_size
)
),
TP_fast_assign
(
__entry
->
dev
=
log
->
l_mp
->
m_super
->
s_dev
;
__entry
->
blkno
=
buf_f
->
blf_blkno
;
__entry
->
len
=
buf_f
->
blf_len
;
__entry
->
flags
=
buf_f
->
blf_flags
;
__entry
->
size
=
buf_f
->
blf_size
;
__entry
->
map_size
=
buf_f
->
blf_map_size
;
),
TP_printk
(
"dev %d:%d blkno 0x%llx, len %u, flags 0x%x, size %d, "
"map_size %d"
,
MAJOR
(
__entry
->
dev
),
MINOR
(
__entry
->
dev
),
__entry
->
blkno
,
__entry
->
len
,
__entry
->
flags
,
__entry
->
size
,
__entry
->
map_size
)
)
#define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \
TP_ARGS(log, buf_f))
DEFINE_LOG_RECOVER_BUF_ITEM
(
xfs_log_recover_buf_not_cancel
);
DEFINE_LOG_RECOVER_BUF_ITEM
(
xfs_log_recover_buf_cancel
);
DEFINE_LOG_RECOVER_BUF_ITEM
(
xfs_log_recover_buf_cancel_add
);
DEFINE_LOG_RECOVER_BUF_ITEM
(
xfs_log_recover_buf_cancel_ref_inc
);
DEFINE_LOG_RECOVER_BUF_ITEM
(
xfs_log_recover_buf_recover
);
DEFINE_LOG_RECOVER_BUF_ITEM
(
xfs_log_recover_buf_inode_buf
);
DEFINE_LOG_RECOVER_BUF_ITEM
(
xfs_log_recover_buf_reg_buf
);
DEFINE_LOG_RECOVER_BUF_ITEM
(
xfs_log_recover_buf_dquot_buf
);
DECLARE_EVENT_CLASS
(
xfs_log_recover_ino_item_class
,
TP_PROTO
(
struct
log
*
log
,
struct
xfs_inode_log_format
*
in_f
),
TP_ARGS
(
log
,
in_f
),
TP_STRUCT__entry
(
__field
(
dev_t
,
dev
)
__field
(
xfs_ino_t
,
ino
)
__field
(
unsigned
short
,
size
)
__field
(
int
,
fields
)
__field
(
unsigned
short
,
asize
)
__field
(
unsigned
short
,
dsize
)
__field
(
__int64_t
,
blkno
)
__field
(
int
,
len
)
__field
(
int
,
boffset
)
),
TP_fast_assign
(
__entry
->
dev
=
log
->
l_mp
->
m_super
->
s_dev
;
__entry
->
ino
=
in_f
->
ilf_ino
;
__entry
->
size
=
in_f
->
ilf_size
;
__entry
->
fields
=
in_f
->
ilf_fields
;
__entry
->
asize
=
in_f
->
ilf_asize
;
__entry
->
dsize
=
in_f
->
ilf_dsize
;
__entry
->
blkno
=
in_f
->
ilf_blkno
;
__entry
->
len
=
in_f
->
ilf_len
;
__entry
->
boffset
=
in_f
->
ilf_boffset
;
),
TP_printk
(
"dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, "
"dsize %d, blkno 0x%llx, len %d, boffset %d"
,
MAJOR
(
__entry
->
dev
),
MINOR
(
__entry
->
dev
),
__entry
->
ino
,
__entry
->
size
,
__entry
->
fields
,
__entry
->
asize
,
__entry
->
dsize
,
__entry
->
blkno
,
__entry
->
len
,
__entry
->
boffset
)
)
#define DEFINE_LOG_RECOVER_INO_ITEM(name) \
DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \
TP_ARGS(log, in_f))
DEFINE_LOG_RECOVER_INO_ITEM
(
xfs_log_recover_inode_recover
);
DEFINE_LOG_RECOVER_INO_ITEM
(
xfs_log_recover_inode_cancel
);
DEFINE_LOG_RECOVER_INO_ITEM
(
xfs_log_recover_inode_skip
);
#endif
/* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
...
...
fs/xfs/quota/xfs_dquot.c
浏览文件 @
e9cee8e6
...
...
@@ -101,7 +101,7 @@ xfs_qm_dqinit(
* No need to re-initialize these if this is a reclaimed dquot.
*/
if
(
brandnewdquot
)
{
dqp
->
dq_flnext
=
dqp
->
dq_flprev
=
dqp
;
INIT_LIST_HEAD
(
&
dqp
->
q_freelist
)
;
mutex_init
(
&
dqp
->
q_qlock
);
init_waitqueue_head
(
&
dqp
->
q_pinwait
);
...
...
@@ -119,20 +119,20 @@ xfs_qm_dqinit(
* Only the q_core portion was zeroed in dqreclaim_one().
* So, we need to reset others.
*/
dqp
->
q_nrefs
=
0
;
dqp
->
q_blkno
=
0
;
dqp
->
MPL_NEXT
=
dqp
->
HL_NEXT
=
NULL
;
dqp
->
HL_PREVP
=
dqp
->
MPL_PREVP
=
NULL
;
dqp
->
q_bufoffset
=
0
;
dqp
->
q_fileoffset
=
0
;
dqp
->
q_transp
=
NULL
;
dqp
->
q_gdquot
=
NULL
;
dqp
->
q_res_bcount
=
0
;
dqp
->
q_res_icount
=
0
;
dqp
->
q_res_rtbcount
=
0
;
atomic_set
(
&
dqp
->
q_pincount
,
0
);
dqp
->
q_hash
=
NULL
;
ASSERT
(
dqp
->
dq_flnext
==
dqp
->
dq_flprev
);
dqp
->
q_nrefs
=
0
;
dqp
->
q_blkno
=
0
;
INIT_LIST_HEAD
(
&
dqp
->
q_mplist
)
;
INIT_LIST_HEAD
(
&
dqp
->
q_hashlist
)
;
dqp
->
q_bufoffset
=
0
;
dqp
->
q_fileoffset
=
0
;
dqp
->
q_transp
=
NULL
;
dqp
->
q_gdquot
=
NULL
;
dqp
->
q_res_bcount
=
0
;
dqp
->
q_res_icount
=
0
;
dqp
->
q_res_rtbcount
=
0
;
atomic_set
(
&
dqp
->
q_pincount
,
0
);
dqp
->
q_hash
=
NULL
;
ASSERT
(
list_empty
(
&
dqp
->
q_freelist
)
);
trace_xfs_dqreuse
(
dqp
);
}
...
...
@@ -158,7 +158,7 @@ void
xfs_qm_dqdestroy
(
xfs_dquot_t
*
dqp
)
{
ASSERT
(
!
XFS_DQ_IS_ON_FREELIST
(
dqp
));
ASSERT
(
list_empty
(
&
dqp
->
q_freelist
));
mutex_destroy
(
&
dqp
->
q_qlock
);
sv_destroy
(
&
dqp
->
q_pinwait
);
...
...
@@ -252,7 +252,7 @@ xfs_qm_adjust_dqtimers(
(
be64_to_cpu
(
d
->
d_bcount
)
>=
be64_to_cpu
(
d
->
d_blk_hardlimit
))))
{
d
->
d_btimer
=
cpu_to_be32
(
get_seconds
()
+
XFS_QI_BTIMELIMIT
(
mp
)
);
mp
->
m_quotainfo
->
qi_btimelimit
);
}
else
{
d
->
d_bwarns
=
0
;
}
...
...
@@ -275,7 +275,7 @@ xfs_qm_adjust_dqtimers(
(
be64_to_cpu
(
d
->
d_icount
)
>=
be64_to_cpu
(
d
->
d_ino_hardlimit
))))
{
d
->
d_itimer
=
cpu_to_be32
(
get_seconds
()
+
XFS_QI_ITIMELIMIT
(
mp
)
);
mp
->
m_quotainfo
->
qi_itimelimit
);
}
else
{
d
->
d_iwarns
=
0
;
}
...
...
@@ -298,7 +298,7 @@ xfs_qm_adjust_dqtimers(
(
be64_to_cpu
(
d
->
d_rtbcount
)
>=
be64_to_cpu
(
d
->
d_rtb_hardlimit
))))
{
d
->
d_rtbtimer
=
cpu_to_be32
(
get_seconds
()
+
XFS_QI_RTBTIMELIMIT
(
mp
)
);
mp
->
m_quotainfo
->
qi_rtbtimelimit
);
}
else
{
d
->
d_rtbwarns
=
0
;
}
...
...
@@ -325,6 +325,7 @@ xfs_qm_init_dquot_blk(
uint
type
,
xfs_buf_t
*
bp
)
{
struct
xfs_quotainfo
*
q
=
mp
->
m_quotainfo
;
xfs_dqblk_t
*
d
;
int
curid
,
i
;
...
...
@@ -337,16 +338,16 @@ xfs_qm_init_dquot_blk(
/*
* ID of the first dquot in the block - id's are zero based.
*/
curid
=
id
-
(
id
%
XFS_QM_DQPERBLK
(
mp
)
);
curid
=
id
-
(
id
%
q
->
qi_dqperchunk
);
ASSERT
(
curid
>=
0
);
memset
(
d
,
0
,
BBTOB
(
XFS_QI_DQCHUNKLEN
(
mp
)
));
for
(
i
=
0
;
i
<
XFS_QM_DQPERBLK
(
mp
)
;
i
++
,
d
++
,
curid
++
)
memset
(
d
,
0
,
BBTOB
(
q
->
qi_dqchunklen
));
for
(
i
=
0
;
i
<
q
->
qi_dqperchunk
;
i
++
,
d
++
,
curid
++
)
xfs_qm_dqinit_core
(
curid
,
type
,
d
);
xfs_trans_dquot_buf
(
tp
,
bp
,
(
type
&
XFS_DQ_USER
?
XFS_BLI_UDQUOT_BUF
:
((
type
&
XFS_DQ_PROJ
)
?
XFS_BLI_PDQUOT_BUF
:
XFS_BLI_GDQUOT_BUF
)));
xfs_trans_log_buf
(
tp
,
bp
,
0
,
BBTOB
(
XFS_QI_DQCHUNKLEN
(
mp
)
)
-
1
);
xfs_trans_log_buf
(
tp
,
bp
,
0
,
BBTOB
(
q
->
qi_dqchunklen
)
-
1
);
}
...
...
@@ -419,7 +420,7 @@ xfs_qm_dqalloc(
/* now we can just get the buffer (there's nothing to read yet) */
bp
=
xfs_trans_get_buf
(
tp
,
mp
->
m_ddev_targp
,
dqp
->
q_blkno
,
XFS_QI_DQCHUNKLEN
(
mp
)
,
mp
->
m_quotainfo
->
qi_dqchunklen
,
0
);
if
(
!
bp
||
(
error
=
XFS_BUF_GETERROR
(
bp
)))
goto
error1
;
...
...
@@ -500,7 +501,8 @@ xfs_qm_dqtobp(
*/
if
(
dqp
->
q_blkno
==
(
xfs_daddr_t
)
0
)
{
/* We use the id as an index */
dqp
->
q_fileoffset
=
(
xfs_fileoff_t
)
id
/
XFS_QM_DQPERBLK
(
mp
);
dqp
->
q_fileoffset
=
(
xfs_fileoff_t
)
id
/
mp
->
m_quotainfo
->
qi_dqperchunk
;
nmaps
=
1
;
quotip
=
XFS_DQ_TO_QIP
(
dqp
);
xfs_ilock
(
quotip
,
XFS_ILOCK_SHARED
);
...
...
@@ -529,7 +531,7 @@ xfs_qm_dqtobp(
/*
* offset of dquot in the (fixed sized) dquot chunk.
*/
dqp
->
q_bufoffset
=
(
id
%
XFS_QM_DQPERBLK
(
mp
)
)
*
dqp
->
q_bufoffset
=
(
id
%
mp
->
m_quotainfo
->
qi_dqperchunk
)
*
sizeof
(
xfs_dqblk_t
);
if
(
map
.
br_startblock
==
HOLESTARTBLOCK
)
{
/*
...
...
@@ -559,15 +561,13 @@ xfs_qm_dqtobp(
* Read in the buffer, unless we've just done the allocation
* (in which case we already have the buf).
*/
if
(
!
newdquot
)
{
if
(
!
newdquot
)
{
trace_xfs_dqtobp_read
(
dqp
);
if
((
error
=
xfs_trans_read_buf
(
mp
,
tp
,
mp
->
m_ddev_targp
,
dqp
->
q_blkno
,
XFS_QI_DQCHUNKLEN
(
mp
),
0
,
&
bp
)))
{
return
(
error
);
}
error
=
xfs_trans_read_buf
(
mp
,
tp
,
mp
->
m_ddev_targp
,
dqp
->
q_blkno
,
mp
->
m_quotainfo
->
qi_dqchunklen
,
0
,
&
bp
);
if
(
error
||
!
bp
)
return
XFS_ERROR
(
error
);
}
...
...
@@ -689,14 +689,14 @@ xfs_qm_idtodq(
tp
=
NULL
;
if
(
flags
&
XFS_QMOPT_DQALLOC
)
{
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_QM_DQALLOC
);
if
((
error
=
xfs_trans_reserve
(
tp
,
XFS_QM_DQALLOC_SPACE_RES
(
mp
),
XFS_WRITE_LOG_RES
(
mp
)
+
BBTOB
(
XFS_QI_DQCHUNKLEN
(
mp
))
-
1
+
128
,
0
,
XFS_TRANS_PERM_LOG_RES
,
XFS_WRITE_LOG_COUNT
))
)
{
error
=
xfs_trans_reserve
(
tp
,
XFS_QM_DQALLOC_SPACE_RES
(
mp
)
,
XFS_WRITE_LOG_RES
(
mp
)
+
BBTOB
(
mp
->
m_quotainfo
->
qi_dqchunklen
)
-
1
+
128
,
0
,
XFS_TRANS_PERM_LOG_RES
,
XFS_WRITE_LOG_COUNT
);
if
(
error
)
{
cancelflags
=
0
;
goto
error0
;
}
...
...
@@ -751,7 +751,6 @@ xfs_qm_dqlookup(
{
xfs_dquot_t
*
dqp
;
uint
flist_locked
;
xfs_dquot_t
*
d
;
ASSERT
(
mutex_is_locked
(
&
qh
->
qh_lock
));
...
...
@@ -760,7 +759,7 @@ xfs_qm_dqlookup(
/*
* Traverse the hashchain looking for a match
*/
for
(
dqp
=
qh
->
qh_next
;
dqp
!=
NULL
;
dqp
=
dqp
->
HL_NEXT
)
{
list_for_each_entry
(
dqp
,
&
qh
->
qh_list
,
q_hashlist
)
{
/*
* We already have the hashlock. We don't need the
* dqlock to look at the id field of the dquot, since the
...
...
@@ -772,12 +771,12 @@ xfs_qm_dqlookup(
/*
* All in core dquots must be on the dqlist of mp
*/
ASSERT
(
dqp
->
MPL_PREVP
!=
NULL
);
ASSERT
(
!
list_empty
(
&
dqp
->
q_mplist
)
);
xfs_dqlock
(
dqp
);
if
(
dqp
->
q_nrefs
==
0
)
{
ASSERT
(
XFS_DQ_IS_ON_FREELIST
(
dqp
));
if
(
!
xfs_qm_freelist_lock_nowait
(
xfs_Gqm
))
{
ASSERT
(
!
list_empty
(
&
dqp
->
q_freelist
));
if
(
!
mutex_trylock
(
&
xfs_Gqm
->
qm_dqfrlist_lock
))
{
trace_xfs_dqlookup_want
(
dqp
);
/*
...
...
@@ -787,7 +786,7 @@ xfs_qm_dqlookup(
*/
dqp
->
dq_flags
|=
XFS_DQ_WANT
;
xfs_dqunlock
(
dqp
);
xfs_qm_freelist_lock
(
xfs_Gqm
);
mutex_lock
(
&
xfs_Gqm
->
qm_dqfrlist_lock
);
xfs_dqlock
(
dqp
);
dqp
->
dq_flags
&=
~
(
XFS_DQ_WANT
);
}
...
...
@@ -802,46 +801,28 @@ xfs_qm_dqlookup(
if
(
flist_locked
)
{
if
(
dqp
->
q_nrefs
!=
0
)
{
xfs_qm_freelist_unlock
(
xfs_Gqm
);
mutex_unlock
(
&
xfs_Gqm
->
qm_dqfrlist_lock
);
flist_locked
=
B_FALSE
;
}
else
{
/*
* take it off the freelist
*/
/* take it off the freelist */
trace_xfs_dqlookup_freelist
(
dqp
);
XQM_FREELIST_REMOVE
(
dqp
);
/* xfs_qm_freelist_print(&(xfs_Gqm->
qm_dqfreelist),
"after removal"); */
list_del_init
(
&
dqp
->
q_freelist
);
xfs_Gqm
->
qm_dqfrlist_cnt
--
;
}
}
/*
* grab a reference
*/
XFS_DQHOLD
(
dqp
);
if
(
flist_locked
)
xfs_qm_freelist_unlock
(
xfs_Gqm
);
mutex_unlock
(
&
xfs_Gqm
->
qm_dqfrlist_lock
);
/*
* move the dquot to the front of the hashchain
*/
ASSERT
(
mutex_is_locked
(
&
qh
->
qh_lock
));
if
(
dqp
->
HL_PREVP
!=
&
qh
->
qh_next
)
{
trace_xfs_dqlookup_move
(
dqp
);
if
((
d
=
dqp
->
HL_NEXT
))
d
->
HL_PREVP
=
dqp
->
HL_PREVP
;
*
(
dqp
->
HL_PREVP
)
=
d
;
d
=
qh
->
qh_next
;
d
->
HL_PREVP
=
&
dqp
->
HL_NEXT
;
dqp
->
HL_NEXT
=
d
;
dqp
->
HL_PREVP
=
&
qh
->
qh_next
;
qh
->
qh_next
=
dqp
;
}
list_move
(
&
dqp
->
q_hashlist
,
&
qh
->
qh_list
);
trace_xfs_dqlookup_done
(
dqp
);
*
O_dqpp
=
dqp
;
ASSERT
(
mutex_is_locked
(
&
qh
->
qh_lock
));
return
(
0
);
return
0
;
}
}
...
...
@@ -975,16 +956,17 @@ xfs_qm_dqget(
*/
if
(
ip
)
{
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
if
(
!
XFS_IS_DQTYPE_ON
(
mp
,
type
))
{
/* inode stays locked on return */
xfs_qm_dqdestroy
(
dqp
);
return
XFS_ERROR
(
ESRCH
);
}
/*
* A dquot could be attached to this inode by now, since
* we had dropped the ilock.
*/
if
(
type
==
XFS_DQ_USER
)
{
if
(
!
XFS_IS_UQUOTA_ON
(
mp
))
{
/* inode stays locked on return */
xfs_qm_dqdestroy
(
dqp
);
return
XFS_ERROR
(
ESRCH
);
}
if
(
ip
->
i_udquot
)
{
xfs_qm_dqdestroy
(
dqp
);
dqp
=
ip
->
i_udquot
;
...
...
@@ -992,6 +974,11 @@ xfs_qm_dqget(
goto
dqret
;
}
}
else
{
if
(
!
XFS_IS_OQUOTA_ON
(
mp
))
{
/* inode stays locked on return */
xfs_qm_dqdestroy
(
dqp
);
return
XFS_ERROR
(
ESRCH
);
}
if
(
ip
->
i_gdquot
)
{
xfs_qm_dqdestroy
(
dqp
);
dqp
=
ip
->
i_gdquot
;
...
...
@@ -1033,13 +1020,14 @@ xfs_qm_dqget(
*/
ASSERT
(
mutex_is_locked
(
&
h
->
qh_lock
));
dqp
->
q_hash
=
h
;
XQM_HASHLIST_INSERT
(
h
,
dqp
);
list_add
(
&
dqp
->
q_hashlist
,
&
h
->
qh_list
);
h
->
qh_version
++
;
/*
* Attach this dquot to this filesystem's list of all dquots,
* kept inside the mount structure in m_quotainfo field
*/
xfs_qm_mplist_lock
(
mp
);
mutex_lock
(
&
mp
->
m_quotainfo
->
qi_dqlist_lock
);
/*
* We return a locked dquot to the caller, with a reference taken
...
...
@@ -1047,9 +1035,9 @@ xfs_qm_dqget(
xfs_dqlock
(
dqp
);
dqp
->
q_nrefs
=
1
;
XQM_MPLIST_INSERT
(
&
(
XFS_QI_MPL_LIST
(
mp
)),
dqp
);
xfs_qm_mplist_unlock
(
mp
);
list_add
(
&
dqp
->
q_mplist
,
&
mp
->
m_quotainfo
->
qi_dqlist
);
mp
->
m_quotainfo
->
qi_dquots
++
;
mutex_unlock
(
&
mp
->
m_quotainfo
->
qi_dqlist_lock
);
mutex_unlock
(
&
h
->
qh_lock
);
dqret:
ASSERT
((
ip
==
NULL
)
||
xfs_isilocked
(
ip
,
XFS_ILOCK_EXCL
));
...
...
@@ -1086,10 +1074,10 @@ xfs_qm_dqput(
* drop the dqlock and acquire the freelist and dqlock
* in the right order; but try to get it out-of-order first
*/
if
(
!
xfs_qm_freelist_lock_nowait
(
xfs_Gqm
))
{
if
(
!
mutex_trylock
(
&
xfs_Gqm
->
qm_dqfrlist_lock
))
{
trace_xfs_dqput_wait
(
dqp
);
xfs_dqunlock
(
dqp
);
xfs_qm_freelist_lock
(
xfs_Gqm
);
mutex_lock
(
&
xfs_Gqm
->
qm_dqfrlist_lock
);
xfs_dqlock
(
dqp
);
}
...
...
@@ -1100,10 +1088,8 @@ xfs_qm_dqput(
if
(
--
dqp
->
q_nrefs
==
0
)
{
trace_xfs_dqput_free
(
dqp
);
/*
* insert at end of the freelist.
*/
XQM_FREELIST_INSERT
(
&
(
xfs_Gqm
->
qm_dqfreelist
),
dqp
);
list_add_tail
(
&
dqp
->
q_freelist
,
&
xfs_Gqm
->
qm_dqfrlist
);
xfs_Gqm
->
qm_dqfrlist_cnt
++
;
/*
* If we just added a udquot to the freelist, then
...
...
@@ -1118,10 +1104,6 @@ xfs_qm_dqput(
xfs_dqlock
(
gdqp
);
dqp
->
q_gdquot
=
NULL
;
}
/* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist),
"@@@@@++ Free list (after append) @@@@@+");
*/
}
xfs_dqunlock
(
dqp
);
...
...
@@ -1133,7 +1115,7 @@ xfs_qm_dqput(
break
;
dqp
=
gdqp
;
}
xfs_qm_freelist_unlock
(
xfs_Gqm
);
mutex_unlock
(
&
xfs_Gqm
->
qm_dqfrlist_lock
);
}
/*
...
...
@@ -1386,10 +1368,10 @@ int
xfs_qm_dqpurge
(
xfs_dquot_t
*
dqp
)
{
xfs_dqhash_t
*
this
hash
;
xfs_dqhash_t
*
qh
=
dqp
->
q_
hash
;
xfs_mount_t
*
mp
=
dqp
->
q_mount
;
ASSERT
(
XFS_QM_IS_MPLIST_LOCKED
(
mp
));
ASSERT
(
mutex_is_locked
(
&
mp
->
m_quotainfo
->
qi_dqlist_lock
));
ASSERT
(
mutex_is_locked
(
&
dqp
->
q_hash
->
qh_lock
));
xfs_dqlock
(
dqp
);
...
...
@@ -1407,7 +1389,7 @@ xfs_qm_dqpurge(
return
(
1
);
}
ASSERT
(
XFS_DQ_IS_ON_FREELIST
(
dqp
));
ASSERT
(
!
list_empty
(
&
dqp
->
q_freelist
));
/*
* If we're turning off quotas, we have to make sure that, for
...
...
@@ -1452,14 +1434,16 @@ xfs_qm_dqpurge(
ASSERT
(
XFS_FORCED_SHUTDOWN
(
mp
)
||
!
(
dqp
->
q_logitem
.
qli_item
.
li_flags
&
XFS_LI_IN_AIL
));
thishash
=
dqp
->
q_hash
;
XQM_HASHLIST_REMOVE
(
thishash
,
dqp
);
XQM_MPLIST_REMOVE
(
&
(
XFS_QI_MPL_LIST
(
mp
)),
dqp
);
list_del_init
(
&
dqp
->
q_hashlist
);
qh
->
qh_version
++
;
list_del_init
(
&
dqp
->
q_mplist
);
mp
->
m_quotainfo
->
qi_dqreclaims
++
;
mp
->
m_quotainfo
->
qi_dquots
--
;
/*
* XXX Move this to the front of the freelist, if we can get the
* freelist lock.
*/
ASSERT
(
XFS_DQ_IS_ON_FREELIST
(
dqp
));
ASSERT
(
!
list_empty
(
&
dqp
->
q_freelist
));
dqp
->
q_mount
=
NULL
;
dqp
->
q_hash
=
NULL
;
...
...
@@ -1467,7 +1451,7 @@ xfs_qm_dqpurge(
memset
(
&
dqp
->
q_core
,
0
,
sizeof
(
dqp
->
q_core
));
xfs_dqfunlock
(
dqp
);
xfs_dqunlock
(
dqp
);
mutex_unlock
(
&
thishas
h
->
qh_lock
);
mutex_unlock
(
&
q
h
->
qh_lock
);
return
(
0
);
}
...
...
@@ -1517,6 +1501,7 @@ void
xfs_qm_dqflock_pushbuf_wait
(
xfs_dquot_t
*
dqp
)
{
xfs_mount_t
*
mp
=
dqp
->
q_mount
;
xfs_buf_t
*
bp
;
/*
...
...
@@ -1525,14 +1510,14 @@ xfs_qm_dqflock_pushbuf_wait(
* out immediately. We'll be able to acquire
* the flush lock when the I/O completes.
*/
bp
=
xfs_incore
(
dqp
->
q_mount
->
m_ddev_targp
,
dqp
->
q_blkno
,
XFS_QI_DQCHUNKLEN
(
dqp
->
q_mount
)
,
XBF_TRYLOCK
);
bp
=
xfs_incore
(
mp
->
m_ddev_targp
,
dqp
->
q_blkno
,
mp
->
m_quotainfo
->
qi_dqchunklen
,
XBF_TRYLOCK
);
if
(
!
bp
)
goto
out_lock
;
if
(
XFS_BUF_ISDELAYWRITE
(
bp
))
{
if
(
XFS_BUF_ISPINNED
(
bp
))
xfs_log_force
(
dqp
->
q_mount
,
0
);
xfs_log_force
(
mp
,
0
);
xfs_buf_delwri_promote
(
bp
);
wake_up_process
(
bp
->
b_target
->
bt_task
);
}
...
...
fs/xfs/quota/xfs_dquot.h
浏览文件 @
e9cee8e6
...
...
@@ -33,40 +33,23 @@
* The hash chain headers (hash buckets)
*/
typedef
struct
xfs_dqhash
{
struct
xfs_dquot
*
qh_nex
t
;
struct
list_head
qh_lis
t
;
struct
mutex
qh_lock
;
uint
qh_version
;
/* ever increasing version */
uint
qh_nelems
;
/* number of dquots on the list */
}
xfs_dqhash_t
;
typedef
struct
xfs_dqlink
{
struct
xfs_dquot
*
ql_next
;
/* forward link */
struct
xfs_dquot
**
ql_prevp
;
/* pointer to prev ql_next */
}
xfs_dqlink_t
;
struct
xfs_mount
;
struct
xfs_trans
;
/*
* This is the marker which is designed to occupy the first few
* bytes of the xfs_dquot_t structure. Even inside this, the freelist pointers
* must come first.
* This serves as the marker ("sentinel") when we have to restart list
* iterations because of locking considerations.
*/
typedef
struct
xfs_dqmarker
{
struct
xfs_dquot
*
dqm_flnext
;
/* link to freelist: must be first */
struct
xfs_dquot
*
dqm_flprev
;
xfs_dqlink_t
dqm_mplist
;
/* link to mount's list of dquots */
xfs_dqlink_t
dqm_hashlist
;
/* link to the hash chain */
uint
dqm_flags
;
/* various flags (XFS_DQ_*) */
}
xfs_dqmarker_t
;
/*
* The incore dquot structure
*/
typedef
struct
xfs_dquot
{
xfs_dqmarker_t
q_lists
;
/* list ptrs, q_flags (marker) */
uint
dq_flags
;
/* various flags (XFS_DQ_*) */
struct
list_head
q_freelist
;
/* global free list of dquots */
struct
list_head
q_mplist
;
/* mount's list of dquots */
struct
list_head
q_hashlist
;
/* gloabl hash list of dquots */
xfs_dqhash_t
*
q_hash
;
/* the hashchain header */
struct
xfs_mount
*
q_mount
;
/* filesystem this relates to */
struct
xfs_trans
*
q_transp
;
/* trans this belongs to currently */
...
...
@@ -87,13 +70,6 @@ typedef struct xfs_dquot {
wait_queue_head_t
q_pinwait
;
/* dquot pinning wait queue */
}
xfs_dquot_t
;
#define dq_flnext q_lists.dqm_flnext
#define dq_flprev q_lists.dqm_flprev
#define dq_mplist q_lists.dqm_mplist
#define dq_hashlist q_lists.dqm_hashlist
#define dq_flags q_lists.dqm_flags
/*
* Lock hierarchy for q_qlock:
* XFS_QLOCK_NORMAL is the implicit default,
...
...
@@ -127,7 +103,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
}
#define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock)))
#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ)
...
...
fs/xfs/quota/xfs_dquot_item.c
浏览文件 @
e9cee8e6
...
...
@@ -107,8 +107,7 @@ xfs_qm_dquot_logitem_pin(
/* ARGSUSED */
STATIC
void
xfs_qm_dquot_logitem_unpin
(
xfs_dq_logitem_t
*
logitem
,
int
stale
)
xfs_dq_logitem_t
*
logitem
)
{
xfs_dquot_t
*
dqp
=
logitem
->
qli_dquot
;
...
...
@@ -123,7 +122,7 @@ xfs_qm_dquot_logitem_unpin_remove(
xfs_dq_logitem_t
*
logitem
,
xfs_trans_t
*
tp
)
{
xfs_qm_dquot_logitem_unpin
(
logitem
,
0
);
xfs_qm_dquot_logitem_unpin
(
logitem
);
}
/*
...
...
@@ -228,7 +227,7 @@ xfs_qm_dquot_logitem_pushbuf(
}
mp
=
dqp
->
q_mount
;
bp
=
xfs_incore
(
mp
->
m_ddev_targp
,
qip
->
qli_format
.
qlf_blkno
,
XFS_QI_DQCHUNKLEN
(
mp
)
,
XBF_TRYLOCK
);
mp
->
m_quotainfo
->
qi_dqchunklen
,
XBF_TRYLOCK
);
xfs_dqunlock
(
dqp
);
if
(
!
bp
)
return
;
...
...
@@ -329,8 +328,7 @@ static struct xfs_item_ops xfs_dquot_item_ops = {
.
iop_format
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_log_iovec_t
*
))
xfs_qm_dquot_logitem_format
,
.
iop_pin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_qm_dquot_logitem_pin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
,
int
))
xfs_qm_dquot_logitem_unpin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_qm_dquot_logitem_unpin
,
.
iop_unpin_remove
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_trans_t
*
))
xfs_qm_dquot_logitem_unpin_remove
,
.
iop_trylock
=
(
uint
(
*
)(
xfs_log_item_t
*
))
...
...
@@ -357,9 +355,8 @@ xfs_qm_dquot_logitem_init(
xfs_dq_logitem_t
*
lp
;
lp
=
&
dqp
->
q_logitem
;
lp
->
qli_item
.
li_type
=
XFS_LI_DQUOT
;
lp
->
qli_item
.
li_ops
=
&
xfs_dquot_item_ops
;
lp
->
qli_item
.
li_mountp
=
dqp
->
q_mount
;
xfs_log_item_init
(
dqp
->
q_mount
,
&
lp
->
qli_item
,
XFS_LI_DQUOT
,
&
xfs_dquot_item_ops
);
lp
->
qli_dquot
=
dqp
;
lp
->
qli_format
.
qlf_type
=
XFS_LI_DQUOT
;
lp
->
qli_format
.
qlf_id
=
be32_to_cpu
(
dqp
->
q_core
.
d_id
);
...
...
@@ -426,7 +423,7 @@ xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf)
*/
/*ARGSUSED*/
STATIC
void
xfs_qm_qoff_logitem_unpin
(
xfs_qoff_logitem_t
*
qf
,
int
stale
)
xfs_qm_qoff_logitem_unpin
(
xfs_qoff_logitem_t
*
qf
)
{
return
;
}
...
...
@@ -537,8 +534,7 @@ static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
.
iop_format
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_log_iovec_t
*
))
xfs_qm_qoff_logitem_format
,
.
iop_pin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_qm_qoff_logitem_pin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
,
int
))
xfs_qm_qoff_logitem_unpin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_qm_qoff_logitem_unpin
,
.
iop_unpin_remove
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_trans_t
*
))
xfs_qm_qoff_logitem_unpin_remove
,
.
iop_trylock
=
(
uint
(
*
)(
xfs_log_item_t
*
))
xfs_qm_qoff_logitem_trylock
,
...
...
@@ -559,8 +555,7 @@ static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
.
iop_format
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_log_iovec_t
*
))
xfs_qm_qoff_logitem_format
,
.
iop_pin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_qm_qoff_logitem_pin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
,
int
))
xfs_qm_qoff_logitem_unpin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_qm_qoff_logitem_unpin
,
.
iop_unpin_remove
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_trans_t
*
))
xfs_qm_qoff_logitem_unpin_remove
,
.
iop_trylock
=
(
uint
(
*
)(
xfs_log_item_t
*
))
xfs_qm_qoff_logitem_trylock
,
...
...
@@ -586,11 +581,8 @@ xfs_qm_qoff_logitem_init(
qf
=
(
xfs_qoff_logitem_t
*
)
kmem_zalloc
(
sizeof
(
xfs_qoff_logitem_t
),
KM_SLEEP
);
qf
->
qql_item
.
li_type
=
XFS_LI_QUOTAOFF
;
if
(
start
)
qf
->
qql_item
.
li_ops
=
&
xfs_qm_qoffend_logitem_ops
;
else
qf
->
qql_item
.
li_ops
=
&
xfs_qm_qoff_logitem_ops
;
xfs_log_item_init
(
mp
,
&
qf
->
qql_item
,
XFS_LI_QUOTAOFF
,
start
?
&
xfs_qm_qoffend_logitem_ops
:
&
xfs_qm_qoff_logitem_ops
);
qf
->
qql_item
.
li_mountp
=
mp
;
qf
->
qql_format
.
qf_type
=
XFS_LI_QUOTAOFF
;
qf
->
qql_format
.
qf_flags
=
flags
;
...
...
fs/xfs/quota/xfs_qm.c
浏览文件 @
e9cee8e6
此差异已折叠。
点击以展开。
fs/xfs/quota/xfs_qm.h
浏览文件 @
e9cee8e6
...
...
@@ -72,17 +72,6 @@ extern kmem_zone_t *qm_dqtrxzone;
#define XFS_QM_MAX_DQCLUSTER_LOGSZ 3
typedef
xfs_dqhash_t
xfs_dqlist_t
;
/*
* The freelist head. The first two fields match the first two in the
* xfs_dquot_t structure (in xfs_dqmarker_t)
*/
typedef
struct
xfs_frlist
{
struct
xfs_dquot
*
qh_next
;
struct
xfs_dquot
*
qh_prev
;
struct
mutex
qh_lock
;
uint
qh_version
;
uint
qh_nelems
;
}
xfs_frlist_t
;
/*
* Quota Manager (global) structure. Lives only in core.
...
...
@@ -91,7 +80,9 @@ typedef struct xfs_qm {
xfs_dqlist_t
*
qm_usr_dqhtable
;
/* udquot hash table */
xfs_dqlist_t
*
qm_grp_dqhtable
;
/* gdquot hash table */
uint
qm_dqhashmask
;
/* # buckets in dq hashtab - 1 */
xfs_frlist_t
qm_dqfreelist
;
/* freelist of dquots */
struct
list_head
qm_dqfrlist
;
/* freelist of dquots */
struct
mutex
qm_dqfrlist_lock
;
int
qm_dqfrlist_cnt
;
atomic_t
qm_totaldquots
;
/* total incore dquots */
uint
qm_nrefs
;
/* file systems with quota on */
int
qm_dqfree_ratio
;
/* ratio of free to inuse dquots */
...
...
@@ -106,7 +97,9 @@ typedef struct xfs_qm {
typedef
struct
xfs_quotainfo
{
xfs_inode_t
*
qi_uquotaip
;
/* user quota inode */
xfs_inode_t
*
qi_gquotaip
;
/* group quota inode */
xfs_dqlist_t
qi_dqlist
;
/* all dquots in filesys */
struct
list_head
qi_dqlist
;
/* all dquots in filesys */
struct
mutex
qi_dqlist_lock
;
int
qi_dquots
;
int
qi_dqreclaims
;
/* a change here indicates
a removal in the dqlist */
time_t
qi_btimelimit
;
/* limit for blks timer */
...
...
@@ -175,10 +168,6 @@ extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
extern
int
xfs_qm_scall_quotaon
(
xfs_mount_t
*
,
uint
);
extern
int
xfs_qm_scall_quotaoff
(
xfs_mount_t
*
,
uint
);
/* list stuff */
extern
void
xfs_qm_freelist_append
(
xfs_frlist_t
*
,
xfs_dquot_t
*
);
extern
void
xfs_qm_freelist_unlink
(
xfs_dquot_t
*
);
#ifdef DEBUG
extern
int
xfs_qm_internalqcheck
(
xfs_mount_t
*
);
#else
...
...
fs/xfs/quota/xfs_qm_stats.c
浏览文件 @
e9cee8e6
...
...
@@ -55,7 +55,7 @@ static int xqm_proc_show(struct seq_file *m, void *v)
ndquot
,
xfs_Gqm
?
atomic_read
(
&
xfs_Gqm
->
qm_totaldquots
)
:
0
,
xfs_Gqm
?
xfs_Gqm
->
qm_dqfree_ratio
:
0
,
xfs_Gqm
?
xfs_Gqm
->
qm_dqfr
eelist
.
qh_nelems
:
0
);
xfs_Gqm
?
xfs_Gqm
->
qm_dqfr
list_cnt
:
0
);
return
0
;
}
...
...
fs/xfs/quota/xfs_qm_syscalls.c
浏览文件 @
e9cee8e6
...
...
@@ -79,6 +79,7 @@ xfs_qm_scall_quotaoff(
xfs_mount_t
*
mp
,
uint
flags
)
{
struct
xfs_quotainfo
*
q
=
mp
->
m_quotainfo
;
uint
dqtype
;
int
error
;
uint
inactivate_flags
;
...
...
@@ -102,11 +103,8 @@ xfs_qm_scall_quotaoff(
* critical thing.
* If quotaoff, then we must be dealing with the root filesystem.
*/
ASSERT
(
mp
->
m_quotainfo
);
if
(
mp
->
m_quotainfo
)
mutex_lock
(
&
(
XFS_QI_QOFFLOCK
(
mp
)));
ASSERT
(
mp
->
m_quotainfo
);
ASSERT
(
q
);
mutex_lock
(
&
q
->
qi_quotaofflock
);
/*
* If we're just turning off quota enforcement, change mp and go.
...
...
@@ -117,7 +115,7 @@ xfs_qm_scall_quotaoff(
spin_lock
(
&
mp
->
m_sb_lock
);
mp
->
m_sb
.
sb_qflags
=
mp
->
m_qflags
;
spin_unlock
(
&
mp
->
m_sb_lock
);
mutex_unlock
(
&
(
XFS_QI_QOFFLOCK
(
mp
))
);
mutex_unlock
(
&
q
->
qi_quotaofflock
);
/* XXX what to do if error ? Revert back to old vals incore ? */
error
=
xfs_qm_write_sb_changes
(
mp
,
XFS_SB_QFLAGS
);
...
...
@@ -150,10 +148,8 @@ xfs_qm_scall_quotaoff(
* Nothing to do? Don't complain. This happens when we're just
* turning off quota enforcement.
*/
if
((
mp
->
m_qflags
&
flags
)
==
0
)
{
mutex_unlock
(
&
(
XFS_QI_QOFFLOCK
(
mp
)));
return
(
0
);
}
if
((
mp
->
m_qflags
&
flags
)
==
0
)
goto
out_unlock
;
/*
* Write the LI_QUOTAOFF log record, and do SB changes atomically,
...
...
@@ -162,7 +158,7 @@ xfs_qm_scall_quotaoff(
*/
error
=
xfs_qm_log_quotaoff
(
mp
,
&
qoffstart
,
flags
);
if
(
error
)
goto
out_
error
;
goto
out_
unlock
;
/*
* Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
...
...
@@ -204,7 +200,7 @@ xfs_qm_scall_quotaoff(
* So, if we couldn't purge all the dquots from the filesystem,
* we can't get rid of the incore data structures.
*/
while
((
nculprits
=
xfs_qm_dqpurge_all
(
mp
,
dqtype
|
XFS_QMOPT_QUOTAOFF
)))
while
((
nculprits
=
xfs_qm_dqpurge_all
(
mp
,
dqtype
)))
delay
(
10
*
nculprits
);
/*
...
...
@@ -222,7 +218,7 @@ xfs_qm_scall_quotaoff(
if
(
error
)
{
/* We're screwed now. Shutdown is the only option. */
xfs_force_shutdown
(
mp
,
SHUTDOWN_CORRUPT_INCORE
);
goto
out_
error
;
goto
out_
unlock
;
}
/*
...
...
@@ -230,27 +226,26 @@ xfs_qm_scall_quotaoff(
*/
if
(((
flags
&
XFS_MOUNT_QUOTA_ALL
)
==
XFS_MOUNT_QUOTA_SET1
)
||
((
flags
&
XFS_MOUNT_QUOTA_ALL
)
==
XFS_MOUNT_QUOTA_SET2
))
{
mutex_unlock
(
&
(
XFS_QI_QOFFLOCK
(
mp
))
);
mutex_unlock
(
&
q
->
qi_quotaofflock
);
xfs_qm_destroy_quotainfo
(
mp
);
return
(
0
);
}
/*
* Release our quotainode references, and vn_purge them,
* if we don't need them anymore.
* Release our quotainode references if we don't need them anymore.
*/
if
((
dqtype
&
XFS_QMOPT_UQUOTA
)
&&
XFS_QI_UQIP
(
mp
)
)
{
IRELE
(
XFS_QI_UQIP
(
mp
)
);
XFS_QI_UQIP
(
mp
)
=
NULL
;
if
((
dqtype
&
XFS_QMOPT_UQUOTA
)
&&
q
->
qi_uquotaip
)
{
IRELE
(
q
->
qi_uquotaip
);
q
->
qi_uquotaip
=
NULL
;
}
if
((
dqtype
&
(
XFS_QMOPT_GQUOTA
|
XFS_QMOPT_PQUOTA
))
&&
XFS_QI_GQIP
(
mp
)
)
{
IRELE
(
XFS_QI_GQIP
(
mp
)
);
XFS_QI_GQIP
(
mp
)
=
NULL
;
if
((
dqtype
&
(
XFS_QMOPT_GQUOTA
|
XFS_QMOPT_PQUOTA
))
&&
q
->
qi_gquotaip
)
{
IRELE
(
q
->
qi_gquotaip
);
q
->
qi_gquotaip
=
NULL
;
}
out_error:
mutex_unlock
(
&
(
XFS_QI_QOFFLOCK
(
mp
)));
return
(
error
);
out_unlock:
mutex_unlock
(
&
q
->
qi_quotaofflock
);
return
error
;
}
int
...
...
@@ -379,9 +374,9 @@ xfs_qm_scall_quotaon(
/*
* Switch on quota enforcement in core.
*/
mutex_lock
(
&
(
XFS_QI_QOFFLOCK
(
mp
))
);
mutex_lock
(
&
mp
->
m_quotainfo
->
qi_quotaofflock
);
mp
->
m_qflags
|=
(
flags
&
XFS_ALL_QUOTA_ENFD
);
mutex_unlock
(
&
(
XFS_QI_QOFFLOCK
(
mp
))
);
mutex_unlock
(
&
mp
->
m_quotainfo
->
qi_quotaofflock
);
return
(
0
);
}
...
...
@@ -392,11 +387,12 @@ xfs_qm_scall_quotaon(
*/
int
xfs_qm_scall_getqstat
(
xfs_mount_
t
*
mp
,
fs_quota_stat_t
*
out
)
struct
xfs_moun
t
*
mp
,
struct
fs_quota_stat
*
out
)
{
xfs_inode_t
*
uip
,
*
gip
;
boolean_t
tempuqip
,
tempgqip
;
struct
xfs_quotainfo
*
q
=
mp
->
m_quotainfo
;
struct
xfs_inode
*
uip
,
*
gip
;
boolean_t
tempuqip
,
tempgqip
;
uip
=
gip
=
NULL
;
tempuqip
=
tempgqip
=
B_FALSE
;
...
...
@@ -415,9 +411,9 @@ xfs_qm_scall_getqstat(
out
->
qs_uquota
.
qfs_ino
=
mp
->
m_sb
.
sb_uquotino
;
out
->
qs_gquota
.
qfs_ino
=
mp
->
m_sb
.
sb_gquotino
;
if
(
mp
->
m_quotainfo
)
{
uip
=
mp
->
m_quotainfo
->
qi_uquotaip
;
gip
=
mp
->
m_quotainfo
->
qi_gquotaip
;
if
(
q
)
{
uip
=
q
->
qi_uquotaip
;
gip
=
q
->
qi_gquotaip
;
}
if
(
!
uip
&&
mp
->
m_sb
.
sb_uquotino
!=
NULLFSINO
)
{
if
(
xfs_iget
(
mp
,
NULL
,
mp
->
m_sb
.
sb_uquotino
,
...
...
@@ -441,15 +437,15 @@ xfs_qm_scall_getqstat(
if
(
tempgqip
)
IRELE
(
gip
);
}
if
(
mp
->
m_quotainfo
)
{
out
->
qs_incoredqs
=
XFS_QI_MPLNDQUOTS
(
mp
)
;
out
->
qs_btimelimit
=
XFS_QI_BTIMELIMIT
(
mp
)
;
out
->
qs_itimelimit
=
XFS_QI_ITIMELIMIT
(
mp
)
;
out
->
qs_rtbtimelimit
=
XFS_QI_RTBTIMELIMIT
(
mp
)
;
out
->
qs_bwarnlimit
=
XFS_QI_BWARNLIMIT
(
mp
)
;
out
->
qs_iwarnlimit
=
XFS_QI_IWARNLIMIT
(
mp
)
;
if
(
q
)
{
out
->
qs_incoredqs
=
q
->
qi_dquots
;
out
->
qs_btimelimit
=
q
->
qi_btimelimit
;
out
->
qs_itimelimit
=
q
->
qi_itimelimit
;
out
->
qs_rtbtimelimit
=
q
->
qi_rtbtimelimit
;
out
->
qs_bwarnlimit
=
q
->
qi_bwarnlimit
;
out
->
qs_iwarnlimit
=
q
->
qi_iwarnlimit
;
}
return
(
0
)
;
return
0
;
}
/*
...
...
@@ -462,6 +458,7 @@ xfs_qm_scall_setqlim(
uint
type
,
fs_disk_quota_t
*
newlim
)
{
struct
xfs_quotainfo
*
q
=
mp
->
m_quotainfo
;
xfs_disk_dquot_t
*
ddq
;
xfs_dquot_t
*
dqp
;
xfs_trans_t
*
tp
;
...
...
@@ -485,7 +482,7 @@ xfs_qm_scall_setqlim(
* a quotaoff from happening). (XXXThis doesn't currently happen
* because we take the vfslock before calling xfs_qm_sysent).
*/
mutex_lock
(
&
(
XFS_QI_QOFFLOCK
(
mp
))
);
mutex_lock
(
&
q
->
qi_quotaofflock
);
/*
* Get the dquot (locked), and join it to the transaction.
...
...
@@ -493,9 +490,8 @@ xfs_qm_scall_setqlim(
*/
if
((
error
=
xfs_qm_dqget
(
mp
,
NULL
,
id
,
type
,
XFS_QMOPT_DQALLOC
,
&
dqp
)))
{
xfs_trans_cancel
(
tp
,
XFS_TRANS_ABORT
);
mutex_unlock
(
&
(
XFS_QI_QOFFLOCK
(
mp
)));
ASSERT
(
error
!=
ENOENT
);
return
(
error
)
;
goto
out_unlock
;
}
xfs_trans_dqjoin
(
tp
,
dqp
);
ddq
=
&
dqp
->
q_core
;
...
...
@@ -513,8 +509,8 @@ xfs_qm_scall_setqlim(
ddq
->
d_blk_hardlimit
=
cpu_to_be64
(
hard
);
ddq
->
d_blk_softlimit
=
cpu_to_be64
(
soft
);
if
(
id
==
0
)
{
mp
->
m_quotainfo
->
qi_bhardlimit
=
hard
;
mp
->
m_quotainfo
->
qi_bsoftlimit
=
soft
;
q
->
qi_bhardlimit
=
hard
;
q
->
qi_bsoftlimit
=
soft
;
}
}
else
{
qdprintk
(
"blkhard %Ld < blksoft %Ld
\n
"
,
hard
,
soft
);
...
...
@@ -529,8 +525,8 @@ xfs_qm_scall_setqlim(
ddq
->
d_rtb_hardlimit
=
cpu_to_be64
(
hard
);
ddq
->
d_rtb_softlimit
=
cpu_to_be64
(
soft
);
if
(
id
==
0
)
{
mp
->
m_quotainfo
->
qi_rtbhardlimit
=
hard
;
mp
->
m_quotainfo
->
qi_rtbsoftlimit
=
soft
;
q
->
qi_rtbhardlimit
=
hard
;
q
->
qi_rtbsoftlimit
=
soft
;
}
}
else
{
qdprintk
(
"rtbhard %Ld < rtbsoft %Ld
\n
"
,
hard
,
soft
);
...
...
@@ -546,8 +542,8 @@ xfs_qm_scall_setqlim(
ddq
->
d_ino_hardlimit
=
cpu_to_be64
(
hard
);
ddq
->
d_ino_softlimit
=
cpu_to_be64
(
soft
);
if
(
id
==
0
)
{
mp
->
m_quotainfo
->
qi_ihardlimit
=
hard
;
mp
->
m_quotainfo
->
qi_isoftlimit
=
soft
;
q
->
qi_ihardlimit
=
hard
;
q
->
qi_isoftlimit
=
soft
;
}
}
else
{
qdprintk
(
"ihard %Ld < isoft %Ld
\n
"
,
hard
,
soft
);
...
...
@@ -572,23 +568,23 @@ xfs_qm_scall_setqlim(
* for warnings.
*/
if
(
newlim
->
d_fieldmask
&
FS_DQ_BTIMER
)
{
mp
->
m_quotainfo
->
qi_btimelimit
=
newlim
->
d_btimer
;
q
->
qi_btimelimit
=
newlim
->
d_btimer
;
ddq
->
d_btimer
=
cpu_to_be32
(
newlim
->
d_btimer
);
}
if
(
newlim
->
d_fieldmask
&
FS_DQ_ITIMER
)
{
mp
->
m_quotainfo
->
qi_itimelimit
=
newlim
->
d_itimer
;
q
->
qi_itimelimit
=
newlim
->
d_itimer
;
ddq
->
d_itimer
=
cpu_to_be32
(
newlim
->
d_itimer
);
}
if
(
newlim
->
d_fieldmask
&
FS_DQ_RTBTIMER
)
{
mp
->
m_quotainfo
->
qi_rtbtimelimit
=
newlim
->
d_rtbtimer
;
q
->
qi_rtbtimelimit
=
newlim
->
d_rtbtimer
;
ddq
->
d_rtbtimer
=
cpu_to_be32
(
newlim
->
d_rtbtimer
);
}
if
(
newlim
->
d_fieldmask
&
FS_DQ_BWARNS
)
mp
->
m_quotainfo
->
qi_bwarnlimit
=
newlim
->
d_bwarns
;
q
->
qi_bwarnlimit
=
newlim
->
d_bwarns
;
if
(
newlim
->
d_fieldmask
&
FS_DQ_IWARNS
)
mp
->
m_quotainfo
->
qi_iwarnlimit
=
newlim
->
d_iwarns
;
q
->
qi_iwarnlimit
=
newlim
->
d_iwarns
;
if
(
newlim
->
d_fieldmask
&
FS_DQ_RTBWARNS
)
mp
->
m_quotainfo
->
qi_rtbwarnlimit
=
newlim
->
d_rtbwarns
;
q
->
qi_rtbwarnlimit
=
newlim
->
d_rtbwarns
;
}
else
{
/*
* If the user is now over quota, start the timelimit.
...
...
@@ -605,8 +601,9 @@ xfs_qm_scall_setqlim(
error
=
xfs_trans_commit
(
tp
,
0
);
xfs_qm_dqprint
(
dqp
);
xfs_qm_dqrele
(
dqp
);
mutex_unlock
(
&
(
XFS_QI_QOFFLOCK
(
mp
)));
out_unlock:
mutex_unlock
(
&
q
->
qi_quotaofflock
);
return
error
;
}
...
...
@@ -853,7 +850,8 @@ xfs_dqrele_inode(
int
error
;
/* skip quota inodes */
if
(
ip
==
XFS_QI_UQIP
(
ip
->
i_mount
)
||
ip
==
XFS_QI_GQIP
(
ip
->
i_mount
))
{
if
(
ip
==
ip
->
i_mount
->
m_quotainfo
->
qi_uquotaip
||
ip
==
ip
->
i_mount
->
m_quotainfo
->
qi_gquotaip
)
{
ASSERT
(
ip
->
i_udquot
==
NULL
);
ASSERT
(
ip
->
i_gdquot
==
NULL
);
read_unlock
(
&
pag
->
pag_ici_lock
);
...
...
@@ -931,7 +929,8 @@ struct mutex qcheck_lock;
}
typedef
struct
dqtest
{
xfs_dqmarker_t
q_lists
;
uint
dq_flags
;
/* various flags (XFS_DQ_*) */
struct
list_head
q_hashlist
;
xfs_dqhash_t
*
q_hash
;
/* the hashchain header */
xfs_mount_t
*
q_mount
;
/* filesystem this relates to */
xfs_dqid_t
d_id
;
/* user id or group id */
...
...
@@ -942,14 +941,9 @@ typedef struct dqtest {
STATIC
void
xfs_qm_hashinsert
(
xfs_dqhash_t
*
h
,
xfs_dqtest_t
*
dqp
)
{
xfs_dquot_t
*
d
;
if
(((
d
)
=
(
h
)
->
qh_next
))
(
d
)
->
HL_PREVP
=
&
((
dqp
)
->
HL_NEXT
);
(
dqp
)
->
HL_NEXT
=
d
;
(
dqp
)
->
HL_PREVP
=
&
((
h
)
->
qh_next
);
(
h
)
->
qh_next
=
(
xfs_dquot_t
*
)
dqp
;
(
h
)
->
qh_version
++
;
(
h
)
->
qh_nelems
++
;
list_add
(
&
dqp
->
q_hashlist
,
&
h
->
qh_list
);
h
->
qh_version
++
;
h
->
qh_nelems
++
;
}
STATIC
void
xfs_qm_dqtest_print
(
...
...
@@ -1061,9 +1055,7 @@ xfs_qm_internalqcheck_dqget(
xfs_dqhash_t
*
h
;
h
=
DQTEST_HASH
(
mp
,
id
,
type
);
for
(
d
=
(
xfs_dqtest_t
*
)
h
->
qh_next
;
d
!=
NULL
;
d
=
(
xfs_dqtest_t
*
)
d
->
HL_NEXT
)
{
/* DQTEST_LIST_PRINT(h, HL_NEXT, "@@@@@ dqtestlist @@@@@"); */
list_for_each_entry
(
d
,
&
h
->
qh_list
,
q_hashlist
)
{
if
(
d
->
d_id
==
id
&&
mp
==
d
->
q_mount
)
{
*
O_dq
=
d
;
return
(
0
);
...
...
@@ -1074,6 +1066,7 @@ xfs_qm_internalqcheck_dqget(
d
->
d_id
=
id
;
d
->
q_mount
=
mp
;
d
->
q_hash
=
h
;
INIT_LIST_HEAD
(
&
d
->
q_hashlist
);
xfs_qm_hashinsert
(
h
,
d
);
*
O_dq
=
d
;
return
(
0
);
...
...
@@ -1180,8 +1173,6 @@ xfs_qm_internalqcheck(
xfs_ino_t
lastino
;
int
done
,
count
;
int
i
;
xfs_dqtest_t
*
d
,
*
e
;
xfs_dqhash_t
*
h1
;
int
error
;
lastino
=
0
;
...
...
@@ -1221,19 +1212,18 @@ xfs_qm_internalqcheck(
}
cmn_err
(
CE_DEBUG
,
"Checking results against system dquots"
);
for
(
i
=
0
;
i
<
qmtest_hashmask
;
i
++
)
{
h1
=
&
qmtest_udqtab
[
i
];
for
(
d
=
(
xfs_dqtest_t
*
)
h1
->
qh_next
;
d
!=
NULL
;
)
{
xfs_dqtest_t
*
d
,
*
n
;
xfs_dqhash_t
*
h
;
h
=
&
qmtest_udqtab
[
i
];
list_for_each_entry_safe
(
d
,
n
,
&
h
->
qh_list
,
q_hashlist
)
{
xfs_dqtest_cmp
(
d
);
e
=
(
xfs_dqtest_t
*
)
d
->
HL_NEXT
;
kmem_free
(
d
);
d
=
e
;
}
h
1
=
&
qmtest_gdqtab
[
i
];
for
(
d
=
(
xfs_dqtest_t
*
)
h1
->
qh_next
;
d
!=
NULL
;
)
{
h
=
&
qmtest_gdqtab
[
i
];
list_for_each_entry_safe
(
d
,
n
,
&
h
->
qh_list
,
q_hashlist
)
{
xfs_dqtest_cmp
(
d
);
e
=
(
xfs_dqtest_t
*
)
d
->
HL_NEXT
;
kmem_free
(
d
);
d
=
e
;
}
}
...
...
fs/xfs/quota/xfs_quota_priv.h
浏览文件 @
e9cee8e6
...
...
@@ -24,43 +24,6 @@
*/
#define XFS_DQITER_MAP_SIZE 10
/* Number of dquots that fit in to a dquot block */
#define XFS_QM_DQPERBLK(mp) ((mp)->m_quotainfo->qi_dqperchunk)
#define XFS_DQ_IS_ADDEDTO_TRX(t, d) ((d)->q_transp == (t))
#define XFS_QI_MPLRECLAIMS(mp) ((mp)->m_quotainfo->qi_dqreclaims)
#define XFS_QI_UQIP(mp) ((mp)->m_quotainfo->qi_uquotaip)
#define XFS_QI_GQIP(mp) ((mp)->m_quotainfo->qi_gquotaip)
#define XFS_QI_DQCHUNKLEN(mp) ((mp)->m_quotainfo->qi_dqchunklen)
#define XFS_QI_BTIMELIMIT(mp) ((mp)->m_quotainfo->qi_btimelimit)
#define XFS_QI_RTBTIMELIMIT(mp) ((mp)->m_quotainfo->qi_rtbtimelimit)
#define XFS_QI_ITIMELIMIT(mp) ((mp)->m_quotainfo->qi_itimelimit)
#define XFS_QI_BWARNLIMIT(mp) ((mp)->m_quotainfo->qi_bwarnlimit)
#define XFS_QI_RTBWARNLIMIT(mp) ((mp)->m_quotainfo->qi_rtbwarnlimit)
#define XFS_QI_IWARNLIMIT(mp) ((mp)->m_quotainfo->qi_iwarnlimit)
#define XFS_QI_QOFFLOCK(mp) ((mp)->m_quotainfo->qi_quotaofflock)
#define XFS_QI_MPL_LIST(mp) ((mp)->m_quotainfo->qi_dqlist)
#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next)
#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems)
#define xfs_qm_mplist_lock(mp) \
mutex_lock(&(XFS_QI_MPL_LIST(mp).qh_lock))
#define xfs_qm_mplist_nowait(mp) \
mutex_trylock(&(XFS_QI_MPL_LIST(mp).qh_lock))
#define xfs_qm_mplist_unlock(mp) \
mutex_unlock(&(XFS_QI_MPL_LIST(mp).qh_lock))
#define XFS_QM_IS_MPLIST_LOCKED(mp) \
mutex_is_locked(&(XFS_QI_MPL_LIST(mp).qh_lock))
#define xfs_qm_freelist_lock(qm) \
mutex_lock(&((qm)->qm_dqfreelist.qh_lock))
#define xfs_qm_freelist_lock_nowait(qm) \
mutex_trylock(&((qm)->qm_dqfreelist.qh_lock))
#define xfs_qm_freelist_unlock(qm) \
mutex_unlock(&((qm)->qm_dqfreelist.qh_lock))
/*
* Hash into a bucket in the dquot hash table, based on <mp, id>.
*/
...
...
@@ -72,9 +35,6 @@
XFS_DQ_HASHVAL(mp, id)) : \
(xfs_Gqm->qm_grp_dqhtable + \
XFS_DQ_HASHVAL(mp, id)))
#define XFS_IS_DQTYPE_ON(mp, type) (type == XFS_DQ_USER ? \
XFS_IS_UQUOTA_ON(mp) : \
XFS_IS_OQUOTA_ON(mp))
#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \
!dqp->q_core.d_blk_hardlimit && \
!dqp->q_core.d_blk_softlimit && \
...
...
@@ -86,68 +46,6 @@
!dqp->q_core.d_rtbcount && \
!dqp->q_core.d_icount)
#define HL_PREVP dq_hashlist.ql_prevp
#define HL_NEXT dq_hashlist.ql_next
#define MPL_PREVP dq_mplist.ql_prevp
#define MPL_NEXT dq_mplist.ql_next
#define _LIST_REMOVE(h, dqp, PVP, NXT) \
{ \
xfs_dquot_t *d; \
if (((d) = (dqp)->NXT)) \
(d)->PVP = (dqp)->PVP; \
*((dqp)->PVP) = d; \
(dqp)->NXT = NULL; \
(dqp)->PVP = NULL; \
(h)->qh_version++; \
(h)->qh_nelems--; \
}
#define _LIST_INSERT(h, dqp, PVP, NXT) \
{ \
xfs_dquot_t *d; \
if (((d) = (h)->qh_next)) \
(d)->PVP = &((dqp)->NXT); \
(dqp)->NXT = d; \
(dqp)->PVP = &((h)->qh_next); \
(h)->qh_next = dqp; \
(h)->qh_version++; \
(h)->qh_nelems++; \
}
#define FOREACH_DQUOT_IN_MP(dqp, mp) \
for ((dqp) = XFS_QI_MPLNEXT(mp); (dqp) != NULL; (dqp) = (dqp)->MPL_NEXT)
#define FOREACH_DQUOT_IN_FREELIST(dqp, qlist) \
for ((dqp) = (qlist)->qh_next; (dqp) != (xfs_dquot_t *)(qlist); \
(dqp) = (dqp)->dq_flnext)
#define XQM_HASHLIST_INSERT(h, dqp) \
_LIST_INSERT(h, dqp, HL_PREVP, HL_NEXT)
#define XQM_FREELIST_INSERT(h, dqp) \
xfs_qm_freelist_append(h, dqp)
#define XQM_MPLIST_INSERT(h, dqp) \
_LIST_INSERT(h, dqp, MPL_PREVP, MPL_NEXT)
#define XQM_HASHLIST_REMOVE(h, dqp) \
_LIST_REMOVE(h, dqp, HL_PREVP, HL_NEXT)
#define XQM_FREELIST_REMOVE(dqp) \
xfs_qm_freelist_unlink(dqp)
#define XQM_MPLIST_REMOVE(h, dqp) \
{ _LIST_REMOVE(h, dqp, MPL_PREVP, MPL_NEXT); \
XFS_QI_MPLRECLAIMS((dqp)->q_mount)++; }
#define XFS_DQ_IS_LOGITEM_INITD(dqp) ((dqp)->q_logitem.qli_dquot == (dqp))
#define XFS_QM_DQP_TO_DQACCT(tp, dqp) (XFS_QM_ISUDQ(dqp) ? \
(tp)->t_dqinfo->dqa_usrdquots : \
(tp)->t_dqinfo->dqa_grpdquots)
#define XFS_IS_SUSER_DQUOT(dqp) \
(!((dqp)->q_core.d_id))
#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \
(((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \
(((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???")))
...
...
fs/xfs/quota/xfs_trans_dquot.c
浏览文件 @
e9cee8e6
...
...
@@ -59,12 +59,11 @@ xfs_trans_dqjoin(
xfs_trans_t
*
tp
,
xfs_dquot_t
*
dqp
)
{
xfs_dq_logitem_t
*
lp
;
xfs_dq_logitem_t
*
lp
=
&
dqp
->
q_logitem
;
ASSERT
(
!
XFS_DQ_IS_ADDEDTO_TRX
(
tp
,
dqp
)
);
ASSERT
(
dqp
->
q_transp
!=
tp
);
ASSERT
(
XFS_DQ_IS_LOCKED
(
dqp
));
ASSERT
(
XFS_DQ_IS_LOGITEM_INITD
(
dqp
));
lp
=
&
dqp
->
q_logitem
;
ASSERT
(
lp
->
qli_dquot
==
dqp
);
/*
* Get a log_item_desc to point at the new item.
...
...
@@ -96,7 +95,7 @@ xfs_trans_log_dquot(
{
xfs_log_item_desc_t
*
lidp
;
ASSERT
(
XFS_DQ_IS_ADDEDTO_TRX
(
tp
,
dqp
)
);
ASSERT
(
dqp
->
q_transp
==
tp
);
ASSERT
(
XFS_DQ_IS_LOCKED
(
dqp
));
lidp
=
xfs_trans_find_item
(
tp
,
(
xfs_log_item_t
*
)(
&
dqp
->
q_logitem
));
...
...
@@ -198,16 +197,16 @@ xfs_trans_get_dqtrx(
int
i
;
xfs_dqtrx_t
*
qa
;
for
(
i
=
0
;
i
<
XFS_QM_TRANS_MAXDQS
;
i
++
)
{
qa
=
XFS_QM_DQP_TO_DQACCT
(
tp
,
dqp
)
;
qa
=
XFS_QM_ISUDQ
(
dqp
)
?
tp
->
t_dqinfo
->
dqa_usrdquots
:
tp
->
t_dqinfo
->
dqa_grpdquots
;
for
(
i
=
0
;
i
<
XFS_QM_TRANS_MAXDQS
;
i
++
)
{
if
(
qa
[
i
].
qt_dquot
==
NULL
||
qa
[
i
].
qt_dquot
==
dqp
)
{
return
(
&
qa
[
i
]);
}
qa
[
i
].
qt_dquot
==
dqp
)
return
&
qa
[
i
];
}
return
(
NULL
)
;
return
NULL
;
}
/*
...
...
@@ -381,7 +380,7 @@ xfs_trans_apply_dquot_deltas(
break
;
ASSERT
(
XFS_DQ_IS_LOCKED
(
dqp
));
ASSERT
(
XFS_DQ_IS_ADDEDTO_TRX
(
tp
,
dqp
)
);
ASSERT
(
dqp
->
q_transp
==
tp
);
/*
* adjust the actual number of blocks used
...
...
@@ -639,7 +638,7 @@ xfs_trans_dqresv(
softlimit
=
q
->
qi_bsoftlimit
;
timer
=
be32_to_cpu
(
dqp
->
q_core
.
d_btimer
);
warns
=
be16_to_cpu
(
dqp
->
q_core
.
d_bwarns
);
warnlimit
=
XFS_QI_BWARNLIMIT
(
dqp
->
q_mount
)
;
warnlimit
=
dqp
->
q_mount
->
m_quotainfo
->
qi_bwarnlimit
;
resbcountp
=
&
dqp
->
q_res_bcount
;
}
else
{
ASSERT
(
flags
&
XFS_TRANS_DQ_RES_RTBLKS
);
...
...
@@ -651,7 +650,7 @@ xfs_trans_dqresv(
softlimit
=
q
->
qi_rtbsoftlimit
;
timer
=
be32_to_cpu
(
dqp
->
q_core
.
d_rtbtimer
);
warns
=
be16_to_cpu
(
dqp
->
q_core
.
d_rtbwarns
);
warnlimit
=
XFS_QI_RTBWARNLIMIT
(
dqp
->
q_mount
)
;
warnlimit
=
dqp
->
q_mount
->
m_quotainfo
->
qi_rtbwarnlimit
;
resbcountp
=
&
dqp
->
q_res_rtbcount
;
}
...
...
@@ -691,7 +690,7 @@ xfs_trans_dqresv(
count
=
be64_to_cpu
(
dqp
->
q_core
.
d_icount
);
timer
=
be32_to_cpu
(
dqp
->
q_core
.
d_itimer
);
warns
=
be16_to_cpu
(
dqp
->
q_core
.
d_iwarns
);
warnlimit
=
XFS_QI_IWARNLIMIT
(
dqp
->
q_mount
)
;
warnlimit
=
dqp
->
q_mount
->
m_quotainfo
->
qi_iwarnlimit
;
hardlimit
=
be64_to_cpu
(
dqp
->
q_core
.
d_ino_hardlimit
);
if
(
!
hardlimit
)
hardlimit
=
q
->
qi_ihardlimit
;
...
...
fs/xfs/xfs_bmap.c
浏览文件 @
e9cee8e6
...
...
@@ -3829,7 +3829,7 @@ xfs_bmap_add_attrfork(
}
if
((
error
=
xfs_bmap_finish
(
&
tp
,
&
flist
,
&
committed
)))
goto
error2
;
error
=
xfs_trans_commit
(
tp
,
XFS_TRANS_
PERM
_LOG_RES
);
error
=
xfs_trans_commit
(
tp
,
XFS_TRANS_
RELEASE
_LOG_RES
);
ASSERT
(
ip
->
i_df
.
if_ext_max
==
XFS_IFORK_DSIZE
(
ip
)
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
));
return
error
;
...
...
fs/xfs/xfs_buf_item.c
浏览文件 @
e9cee8e6
...
...
@@ -372,12 +372,12 @@ xfs_buf_item_pin(
*/
STATIC
void
xfs_buf_item_unpin
(
xfs_buf_log_item_t
*
bip
,
int
stale
)
xfs_buf_log_item_t
*
bip
)
{
struct
xfs_ail
*
ailp
;
xfs_buf_t
*
bp
;
int
freed
;
int
stale
=
bip
->
bli_flags
&
XFS_BLI_STALE
;
bp
=
bip
->
bli_buf
;
ASSERT
(
bp
!=
NULL
);
...
...
@@ -428,40 +428,34 @@ xfs_buf_item_unpin_remove(
xfs_buf_log_item_t
*
bip
,
xfs_trans_t
*
tp
)
{
xfs_buf_t
*
bp
;
xfs_log_item_desc_t
*
lidp
;
int
stale
=
0
;
bp
=
bip
->
bli_buf
;
/*
* will xfs_buf_item_unpin() call xfs_buf_item_relse()?
*/
/* will xfs_buf_item_unpin() call xfs_buf_item_relse()? */
if
((
atomic_read
(
&
bip
->
bli_refcount
)
==
1
)
&&
(
bip
->
bli_flags
&
XFS_BLI_STALE
))
{
/*
* yes -- We can safely do some work here and then call
* buf_item_unpin to do the rest because we are
* are holding the buffer locked so no one else will be
* able to bump up the refcount. We have to remove the
* log item from the transaction as we are about to release
* our reference to the buffer. If we don't, the unlock that
* occurs later in the xfs_trans_uncommit() will try to
* reference the buffer which we no longer have a hold on.
*/
struct
xfs_log_item_desc
*
lidp
;
ASSERT
(
XFS_BUF_VALUSEMA
(
bip
->
bli_buf
)
<=
0
);
trace_xfs_buf_item_unpin_stale
(
bip
);
/*
* yes -- clear the xaction descriptor in-use flag
* and free the chunk if required. We can safely
* do some work here and then call buf_item_unpin
* to do the rest because if the if is true, then
* we are holding the buffer locked so no one else
* will be able to bump up the refcount.
*/
lidp
=
xfs_trans_find_item
(
tp
,
(
xfs_log_item_t
*
)
bip
);
stale
=
lidp
->
lid_flags
&
XFS_LID_BUF_STALE
;
lidp
=
xfs_trans_find_item
(
tp
,
(
xfs_log_item_t
*
)
bip
);
xfs_trans_free_item
(
tp
,
lidp
);
/*
* Since the transaction no longer refers to the buffer,
*
the
buffer should no longer refer to the transaction.
* Since the transaction no longer refers to the buffer,
the
* buffer should no longer refer to the transaction.
*/
XFS_BUF_SET_FSPRIVATE2
(
b
p
,
NULL
);
XFS_BUF_SET_FSPRIVATE2
(
b
ip
->
bli_buf
,
NULL
);
}
xfs_buf_item_unpin
(
bip
,
stale
);
return
;
xfs_buf_item_unpin
(
bip
);
}
/*
...
...
@@ -675,7 +669,7 @@ static struct xfs_item_ops xfs_buf_item_ops = {
.
iop_format
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_log_iovec_t
*
))
xfs_buf_item_format
,
.
iop_pin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_buf_item_pin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
,
int
))
xfs_buf_item_unpin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_buf_item_unpin
,
.
iop_unpin_remove
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_trans_t
*
))
xfs_buf_item_unpin_remove
,
.
iop_trylock
=
(
uint
(
*
)(
xfs_log_item_t
*
))
xfs_buf_item_trylock
,
...
...
@@ -733,10 +727,7 @@ xfs_buf_item_init(
bip
=
(
xfs_buf_log_item_t
*
)
kmem_zone_zalloc
(
xfs_buf_item_zone
,
KM_SLEEP
);
bip
->
bli_item
.
li_type
=
XFS_LI_BUF
;
bip
->
bli_item
.
li_ops
=
&
xfs_buf_item_ops
;
bip
->
bli_item
.
li_mountp
=
mp
;
bip
->
bli_item
.
li_ailp
=
mp
->
m_ail
;
xfs_log_item_init
(
mp
,
&
bip
->
bli_item
,
XFS_LI_BUF
,
&
xfs_buf_item_ops
);
bip
->
bli_buf
=
bp
;
xfs_buf_hold
(
bp
);
bip
->
bli_format
.
blf_type
=
XFS_LI_BUF
;
...
...
fs/xfs/xfs_buf_item.h
浏览文件 @
e9cee8e6
...
...
@@ -26,7 +26,7 @@ extern kmem_zone_t *xfs_buf_item_zone;
* have been logged.
* For 6.2 and beyond, this is XFS_LI_BUF. We use this to log everything.
*/
typedef
struct
xfs_buf_log_format
_t
{
typedef
struct
xfs_buf_log_format
{
unsigned
short
blf_type
;
/* buf log item type indicator */
unsigned
short
blf_size
;
/* size of this item */
ushort
blf_flags
;
/* misc state */
...
...
fs/xfs/xfs_error.c
浏览文件 @
e9cee8e6
...
...
@@ -186,18 +186,18 @@ xfs_cmn_err(int panic_tag, int level, xfs_mount_t *mp, char *fmt, ...)
void
xfs_error_report
(
char
*
tag
,
int
level
,
xfs_mount_
t
*
mp
,
c
har
*
f
name
,
int
linenum
,
inst_t
*
ra
)
c
onst
c
har
*
tag
,
int
level
,
struct
xfs_moun
t
*
mp
,
c
onst
char
*
file
name
,
int
linenum
,
inst_t
*
ra
)
{
if
(
level
<=
xfs_error_level
)
{
xfs_cmn_err
(
XFS_PTAG_ERROR_REPORT
,
CE_ALERT
,
mp
,
"XFS internal error %s at line %d of file %s. Caller 0x%p
\n
"
,
tag
,
linenum
,
fname
,
ra
);
tag
,
linenum
,
f
ile
name
,
ra
);
xfs_stack_trace
();
}
...
...
@@ -205,15 +205,15 @@ xfs_error_report(
void
xfs_corruption_error
(
char
*
tag
,
int
level
,
xfs_mount_
t
*
mp
,
void
*
p
,
c
har
*
f
name
,
int
linenum
,
inst_t
*
ra
)
c
onst
c
har
*
tag
,
int
level
,
struct
xfs_moun
t
*
mp
,
void
*
p
,
c
onst
char
*
file
name
,
int
linenum
,
inst_t
*
ra
)
{
if
(
level
<=
xfs_error_level
)
xfs_hex_dump
(
p
,
16
);
xfs_error_report
(
tag
,
level
,
mp
,
fname
,
linenum
,
ra
);
xfs_error_report
(
tag
,
level
,
mp
,
f
ile
name
,
linenum
,
ra
);
}
fs/xfs/xfs_error.h
浏览文件 @
e9cee8e6
...
...
@@ -29,10 +29,11 @@ extern int xfs_error_trap(int);
struct
xfs_mount
;
extern
void
xfs_error_report
(
char
*
tag
,
int
level
,
struct
xfs_mount
*
mp
,
char
*
fname
,
int
linenum
,
inst_t
*
ra
);
extern
void
xfs_corruption_error
(
char
*
tag
,
int
level
,
struct
xfs_mount
*
mp
,
void
*
p
,
char
*
fname
,
int
linenum
,
inst_t
*
ra
);
extern
void
xfs_error_report
(
const
char
*
tag
,
int
level
,
struct
xfs_mount
*
mp
,
const
char
*
filename
,
int
linenum
,
inst_t
*
ra
);
extern
void
xfs_corruption_error
(
const
char
*
tag
,
int
level
,
struct
xfs_mount
*
mp
,
void
*
p
,
const
char
*
filename
,
int
linenum
,
inst_t
*
ra
);
#define XFS_ERROR_REPORT(e, lvl, mp) \
xfs_error_report(e, lvl, mp, __FILE__, __LINE__, __return_address)
...
...
fs/xfs/xfs_extfree_item.c
浏览文件 @
e9cee8e6
...
...
@@ -106,7 +106,7 @@ xfs_efi_item_pin(xfs_efi_log_item_t *efip)
*/
/*ARGSUSED*/
STATIC
void
xfs_efi_item_unpin
(
xfs_efi_log_item_t
*
efip
,
int
stale
)
xfs_efi_item_unpin
(
xfs_efi_log_item_t
*
efip
)
{
struct
xfs_ail
*
ailp
=
efip
->
efi_item
.
li_ailp
;
...
...
@@ -224,7 +224,7 @@ static struct xfs_item_ops xfs_efi_item_ops = {
.
iop_format
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_log_iovec_t
*
))
xfs_efi_item_format
,
.
iop_pin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_efi_item_pin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
,
int
))
xfs_efi_item_unpin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_efi_item_unpin
,
.
iop_unpin_remove
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_trans_t
*
))
xfs_efi_item_unpin_remove
,
.
iop_trylock
=
(
uint
(
*
)(
xfs_log_item_t
*
))
xfs_efi_item_trylock
,
...
...
@@ -259,10 +259,7 @@ xfs_efi_init(xfs_mount_t *mp,
KM_SLEEP
);
}
efip
->
efi_item
.
li_type
=
XFS_LI_EFI
;
efip
->
efi_item
.
li_ops
=
&
xfs_efi_item_ops
;
efip
->
efi_item
.
li_mountp
=
mp
;
efip
->
efi_item
.
li_ailp
=
mp
->
m_ail
;
xfs_log_item_init
(
mp
,
&
efip
->
efi_item
,
XFS_LI_EFI
,
&
xfs_efi_item_ops
);
efip
->
efi_format
.
efi_nextents
=
nextents
;
efip
->
efi_format
.
efi_id
=
(
__psint_t
)(
void
*
)
efip
;
...
...
@@ -428,7 +425,7 @@ xfs_efd_item_pin(xfs_efd_log_item_t *efdp)
*/
/*ARGSUSED*/
STATIC
void
xfs_efd_item_unpin
(
xfs_efd_log_item_t
*
efdp
,
int
stale
)
xfs_efd_item_unpin
(
xfs_efd_log_item_t
*
efdp
)
{
return
;
}
...
...
@@ -518,7 +515,7 @@ static struct xfs_item_ops xfs_efd_item_ops = {
.
iop_format
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_log_iovec_t
*
))
xfs_efd_item_format
,
.
iop_pin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_efd_item_pin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
,
int
))
xfs_efd_item_unpin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_efd_item_unpin
,
.
iop_unpin_remove
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_trans_t
*
))
xfs_efd_item_unpin_remove
,
.
iop_trylock
=
(
uint
(
*
)(
xfs_log_item_t
*
))
xfs_efd_item_trylock
,
...
...
@@ -554,10 +551,7 @@ xfs_efd_init(xfs_mount_t *mp,
KM_SLEEP
);
}
efdp
->
efd_item
.
li_type
=
XFS_LI_EFD
;
efdp
->
efd_item
.
li_ops
=
&
xfs_efd_item_ops
;
efdp
->
efd_item
.
li_mountp
=
mp
;
efdp
->
efd_item
.
li_ailp
=
mp
->
m_ail
;
xfs_log_item_init
(
mp
,
&
efdp
->
efd_item
,
XFS_LI_EFD
,
&
xfs_efd_item_ops
);
efdp
->
efd_efip
=
efip
;
efdp
->
efd_format
.
efd_nextents
=
nextents
;
efdp
->
efd_format
.
efd_efi_id
=
efip
->
efi_format
.
efi_id
;
...
...
fs/xfs/xfs_inode.c
浏览文件 @
e9cee8e6
...
...
@@ -2449,6 +2449,8 @@ xfs_iunpin_nowait(
{
ASSERT
(
xfs_isilocked
(
ip
,
XFS_ILOCK_EXCL
|
XFS_ILOCK_SHARED
));
trace_xfs_inode_unpin_nowait
(
ip
,
_RET_IP_
);
/* Give the log a push to start the unpinning I/O */
xfs_log_force_lsn
(
ip
->
i_mount
,
ip
->
i_itemp
->
ili_last_lsn
,
0
);
...
...
fs/xfs/xfs_inode_item.c
浏览文件 @
e9cee8e6
...
...
@@ -543,6 +543,7 @@ xfs_inode_item_pin(
{
ASSERT
(
xfs_isilocked
(
iip
->
ili_inode
,
XFS_ILOCK_EXCL
));
trace_xfs_inode_pin
(
iip
->
ili_inode
,
_RET_IP_
);
atomic_inc
(
&
iip
->
ili_inode
->
i_pincount
);
}
...
...
@@ -556,11 +557,11 @@ xfs_inode_item_pin(
/* ARGSUSED */
STATIC
void
xfs_inode_item_unpin
(
xfs_inode_log_item_t
*
iip
,
int
stale
)
xfs_inode_log_item_t
*
iip
)
{
struct
xfs_inode
*
ip
=
iip
->
ili_inode
;
trace_xfs_inode_unpin
(
ip
,
_RET_IP_
);
ASSERT
(
atomic_read
(
&
ip
->
i_pincount
)
>
0
);
if
(
atomic_dec_and_test
(
&
ip
->
i_pincount
))
wake_up
(
&
ip
->
i_ipin_wait
);
...
...
@@ -572,7 +573,7 @@ xfs_inode_item_unpin_remove(
xfs_inode_log_item_t
*
iip
,
xfs_trans_t
*
tp
)
{
xfs_inode_item_unpin
(
iip
,
0
);
xfs_inode_item_unpin
(
iip
);
}
/*
...
...
@@ -838,7 +839,7 @@ static struct xfs_item_ops xfs_inode_item_ops = {
.
iop_format
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_log_iovec_t
*
))
xfs_inode_item_format
,
.
iop_pin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_inode_item_pin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
,
int
))
xfs_inode_item_unpin
,
.
iop_unpin
=
(
void
(
*
)(
xfs_log_item_t
*
))
xfs_inode_item_unpin
,
.
iop_unpin_remove
=
(
void
(
*
)(
xfs_log_item_t
*
,
xfs_trans_t
*
))
xfs_inode_item_unpin_remove
,
.
iop_trylock
=
(
uint
(
*
)(
xfs_log_item_t
*
))
xfs_inode_item_trylock
,
...
...
@@ -865,17 +866,9 @@ xfs_inode_item_init(
ASSERT
(
ip
->
i_itemp
==
NULL
);
iip
=
ip
->
i_itemp
=
kmem_zone_zalloc
(
xfs_ili_zone
,
KM_SLEEP
);
iip
->
ili_item
.
li_type
=
XFS_LI_INODE
;
iip
->
ili_item
.
li_ops
=
&
xfs_inode_item_ops
;
iip
->
ili_item
.
li_mountp
=
mp
;
iip
->
ili_item
.
li_ailp
=
mp
->
m_ail
;
iip
->
ili_inode
=
ip
;
/*
We have zeroed memory. No need ...
iip->ili_extents_buf = NULL;
*/
xfs_log_item_init
(
mp
,
&
iip
->
ili_item
,
XFS_LI_INODE
,
&
xfs_inode_item_ops
);
iip
->
ili_format
.
ilf_type
=
XFS_LI_INODE
;
iip
->
ili_format
.
ilf_ino
=
ip
->
i_ino
;
iip
->
ili_format
.
ilf_blkno
=
ip
->
i_imap
.
im_blkno
;
...
...
fs/xfs/xfs_iomap.c
浏览文件 @
e9cee8e6
...
...
@@ -55,71 +55,33 @@
#define XFS_STRAT_WRITE_IMAPS 2
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
STATIC
int
xfs_imap_to_bmap
(
xfs_inode_t
*
ip
,
xfs_off_t
offset
,
xfs_bmbt_irec_t
*
imap
,
xfs_iomap_t
*
iomapp
,
int
imaps
,
/* Number of imap entries */
int
iomaps
,
/* Number of iomap entries */
int
flags
)
{
xfs_mount_t
*
mp
=
ip
->
i_mount
;
int
pbm
;
xfs_fsblock_t
start_block
;
for
(
pbm
=
0
;
imaps
&&
pbm
<
iomaps
;
imaps
--
,
iomapp
++
,
imap
++
,
pbm
++
)
{
iomapp
->
iomap_offset
=
XFS_FSB_TO_B
(
mp
,
imap
->
br_startoff
);
iomapp
->
iomap_delta
=
offset
-
iomapp
->
iomap_offset
;
iomapp
->
iomap_bsize
=
XFS_FSB_TO_B
(
mp
,
imap
->
br_blockcount
);
iomapp
->
iomap_flags
=
flags
;
if
(
XFS_IS_REALTIME_INODE
(
ip
))
{
iomapp
->
iomap_flags
|=
IOMAP_REALTIME
;
iomapp
->
iomap_target
=
mp
->
m_rtdev_targp
;
}
else
{
iomapp
->
iomap_target
=
mp
->
m_ddev_targp
;
}
start_block
=
imap
->
br_startblock
;
if
(
start_block
==
HOLESTARTBLOCK
)
{
iomapp
->
iomap_bn
=
IOMAP_DADDR_NULL
;
iomapp
->
iomap_flags
|=
IOMAP_HOLE
;
}
else
if
(
start_block
==
DELAYSTARTBLOCK
)
{
iomapp
->
iomap_bn
=
IOMAP_DADDR_NULL
;
iomapp
->
iomap_flags
|=
IOMAP_DELAY
;
}
else
{
iomapp
->
iomap_bn
=
xfs_fsb_to_db
(
ip
,
start_block
);
if
(
ISUNWRITTEN
(
imap
))
iomapp
->
iomap_flags
|=
IOMAP_UNWRITTEN
;
}
offset
+=
iomapp
->
iomap_bsize
-
iomapp
->
iomap_delta
;
}
return
pbm
;
/* Return the number filled */
}
STATIC
int
xfs_iomap_write_direct
(
struct
xfs_inode
*
,
xfs_off_t
,
size_t
,
int
,
struct
xfs_bmbt_irec
*
,
int
*
);
STATIC
int
xfs_iomap_write_delay
(
struct
xfs_inode
*
,
xfs_off_t
,
size_t
,
int
,
struct
xfs_bmbt_irec
*
,
int
*
);
STATIC
int
xfs_iomap_write_allocate
(
struct
xfs_inode
*
,
xfs_off_t
,
size_t
,
struct
xfs_bmbt_irec
*
,
int
*
);
int
xfs_iomap
(
xfs_inode_t
*
ip
,
xfs_off_t
offset
,
ssize_t
count
,
int
flags
,
xfs_iomap_t
*
iomapp
,
int
*
niomaps
)
struct
xfs_inode
*
ip
,
xfs_off_t
offset
,
ssize_t
count
,
int
flags
,
struct
xfs_bmbt_irec
*
imap
,
int
*
nimaps
,
int
*
new
)
{
xfs_mount_t
*
mp
=
ip
->
i_mount
;
xfs_fileoff_t
offset_fsb
,
end_fsb
;
int
error
=
0
;
int
lockmode
=
0
;
xfs_bmbt_irec_t
imap
;
int
nimaps
=
1
;
int
bmapi_flags
=
0
;
int
iomap_flags
=
0
;
struct
xfs_mount
*
mp
=
ip
->
i_mount
;
xfs_fileoff_t
offset_fsb
,
end_fsb
;
int
error
=
0
;
int
lockmode
=
0
;
int
bmapi_flags
=
0
;
ASSERT
((
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFREG
);
*
new
=
0
;
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
return
XFS_ERROR
(
EIO
);
...
...
@@ -160,8 +122,8 @@ xfs_iomap(
error
=
xfs_bmapi
(
NULL
,
ip
,
offset_fsb
,
(
xfs_filblks_t
)(
end_fsb
-
offset_fsb
),
bmapi_flags
,
NULL
,
0
,
&
imap
,
&
nimaps
,
NULL
,
NULL
);
bmapi_flags
,
NULL
,
0
,
imap
,
nimaps
,
NULL
,
NULL
);
if
(
error
)
goto
out
;
...
...
@@ -169,46 +131,41 @@ xfs_iomap(
switch
(
flags
&
(
BMAPI_WRITE
|
BMAPI_ALLOCATE
))
{
case
BMAPI_WRITE
:
/* If we found an extent, return it */
if
(
nimaps
&&
(
imap
.
br_startblock
!=
HOLESTARTBLOCK
)
&&
(
imap
.
br_startblock
!=
DELAYSTARTBLOCK
))
{
trace_xfs_iomap_found
(
ip
,
offset
,
count
,
flags
,
&
imap
);
if
(
*
nimaps
&&
(
imap
->
br_startblock
!=
HOLESTARTBLOCK
)
&&
(
imap
->
br_startblock
!=
DELAYSTARTBLOCK
))
{
trace_xfs_iomap_found
(
ip
,
offset
,
count
,
flags
,
imap
);
break
;
}
if
(
flags
&
(
BMAPI_DIRECT
|
BMAPI_MMAP
))
{
error
=
xfs_iomap_write_direct
(
ip
,
offset
,
count
,
flags
,
&
imap
,
&
nimaps
,
nimaps
);
imap
,
nimaps
);
}
else
{
error
=
xfs_iomap_write_delay
(
ip
,
offset
,
count
,
flags
,
&
imap
,
&
nimaps
);
imap
,
nimaps
);
}
if
(
!
error
)
{
trace_xfs_iomap_alloc
(
ip
,
offset
,
count
,
flags
,
&
imap
);
trace_xfs_iomap_alloc
(
ip
,
offset
,
count
,
flags
,
imap
);
}
iomap_flags
=
IOMAP_NEW
;
*
new
=
1
;
break
;
case
BMAPI_ALLOCATE
:
/* If we found an extent, return it */
xfs_iunlock
(
ip
,
lockmode
);
lockmode
=
0
;
if
(
nimaps
&&
!
isnullstartblock
(
imap
.
br_startblock
))
{
trace_xfs_iomap_found
(
ip
,
offset
,
count
,
flags
,
&
imap
);
if
(
*
nimaps
&&
!
isnullstartblock
(
imap
->
br_startblock
))
{
trace_xfs_iomap_found
(
ip
,
offset
,
count
,
flags
,
imap
);
break
;
}
error
=
xfs_iomap_write_allocate
(
ip
,
offset
,
count
,
&
imap
,
&
nimaps
);
imap
,
nimaps
);
break
;
}
if
(
nimaps
)
{
*
niomaps
=
xfs_imap_to_bmap
(
ip
,
offset
,
&
imap
,
iomapp
,
nimaps
,
*
niomaps
,
iomap_flags
);
}
else
if
(
niomaps
)
{
*
niomaps
=
0
;
}
ASSERT
(
*
nimaps
<=
1
);
out:
if
(
lockmode
)
...
...
@@ -216,7 +173,6 @@ xfs_iomap(
return
XFS_ERROR
(
error
);
}
STATIC
int
xfs_iomap_eof_align_last_fsb
(
xfs_mount_t
*
mp
,
...
...
@@ -285,15 +241,14 @@ xfs_cmn_err_fsblock_zero(
return
EFSCORRUPTED
;
}
int
STATIC
int
xfs_iomap_write_direct
(
xfs_inode_t
*
ip
,
xfs_off_t
offset
,
size_t
count
,
int
flags
,
xfs_bmbt_irec_t
*
ret_imap
,
int
*
nmaps
,
int
found
)
int
*
nmaps
)
{
xfs_mount_t
*
mp
=
ip
->
i_mount
;
xfs_fileoff_t
offset_fsb
;
...
...
@@ -330,7 +285,7 @@ xfs_iomap_write_direct(
if
(
error
)
goto
error_out
;
}
else
{
if
(
found
&&
(
ret_imap
->
br_startblock
==
HOLESTARTBLOCK
))
if
(
*
nmaps
&&
(
ret_imap
->
br_startblock
==
HOLESTARTBLOCK
))
last_fsb
=
MIN
(
last_fsb
,
(
xfs_fileoff_t
)
ret_imap
->
br_blockcount
+
ret_imap
->
br_startoff
);
...
...
@@ -485,7 +440,7 @@ xfs_iomap_eof_want_preallocate(
return
0
;
}
int
STATIC
int
xfs_iomap_write_delay
(
xfs_inode_t
*
ip
,
xfs_off_t
offset
,
...
...
@@ -588,7 +543,7 @@ xfs_iomap_write_delay(
* We no longer bother to look at the incoming map - all we have to
* guarantee is that whatever we allocate fills the required range.
*/
int
STATIC
int
xfs_iomap_write_allocate
(
xfs_inode_t
*
ip
,
xfs_off_t
offset
,
...
...
fs/xfs/xfs_iomap.h
浏览文件 @
e9cee8e6
...
...
@@ -18,19 +18,6 @@
#ifndef __XFS_IOMAP_H__
#define __XFS_IOMAP_H__
#define IOMAP_DADDR_NULL ((xfs_daddr_t) (-1LL))
typedef
enum
{
/* iomap_flags values */
IOMAP_READ
=
0
,
/* mapping for a read */
IOMAP_HOLE
=
0x02
,
/* mapping covers a hole */
IOMAP_DELAY
=
0x04
,
/* mapping covers delalloc region */
IOMAP_REALTIME
=
0x10
,
/* mapping on the realtime device */
IOMAP_UNWRITTEN
=
0x20
,
/* mapping covers allocated */
/* but uninitialized file data */
IOMAP_NEW
=
0x40
/* just allocate */
}
iomap_flags_t
;
typedef
enum
{
/* base extent manipulation calls */
BMAPI_READ
=
(
1
<<
0
),
/* read extents */
...
...
@@ -52,43 +39,11 @@ typedef enum {
{ BMAPI_MMAP, "MMAP" }, \
{ BMAPI_TRYLOCK, "TRYLOCK" }
/*
* xfs_iomap_t: File system I/O map
*
* The iomap_bn field is expressed in 512-byte blocks, and is where the
* mapping starts on disk.
*
* The iomap_offset, iomap_bsize and iomap_delta fields are in bytes.
* iomap_offset is the offset of the mapping in the file itself.
* iomap_bsize is the size of the mapping, iomap_delta is the
* desired data's offset into the mapping, given the offset supplied
* to the file I/O map routine.
*
* When a request is made to read beyond the logical end of the object,
* iomap_size may be set to 0, but iomap_offset and iomap_length should be set
* to the actual amount of underlying storage that has been allocated, if any.
*/
typedef
struct
xfs_iomap
{
xfs_daddr_t
iomap_bn
;
/* first 512B blk of mapping */
xfs_buftarg_t
*
iomap_target
;
xfs_off_t
iomap_offset
;
/* offset of mapping, bytes */
xfs_off_t
iomap_bsize
;
/* size of mapping, bytes */
xfs_off_t
iomap_delta
;
/* offset into mapping, bytes */
iomap_flags_t
iomap_flags
;
}
xfs_iomap_t
;
struct
xfs_inode
;
struct
xfs_bmbt_irec
;
extern
int
xfs_iomap
(
struct
xfs_inode
*
,
xfs_off_t
,
ssize_t
,
int
,
struct
xfs_iomap
*
,
int
*
);
extern
int
xfs_iomap_write_direct
(
struct
xfs_inode
*
,
xfs_off_t
,
size_t
,
int
,
struct
xfs_bmbt_irec
*
,
int
*
,
int
);
extern
int
xfs_iomap_write_delay
(
struct
xfs_inode
*
,
xfs_off_t
,
size_t
,
int
,
struct
xfs_bmbt_irec
*
,
int
*
);
extern
int
xfs_iomap_write_allocate
(
struct
xfs_inode
*
,
xfs_off_t
,
size_t
,
struct
xfs_bmbt_irec
*
,
int
*
);
struct
xfs_bmbt_irec
*
,
int
*
,
int
*
);
extern
int
xfs_iomap_write_unwritten
(
struct
xfs_inode
*
,
xfs_off_t
,
size_t
);
#endif
/* __XFS_IOMAP_H__*/
fs/xfs/xfs_log.c
浏览文件 @
e9cee8e6
此差异已折叠。
点击以展开。
fs/xfs/xfs_log.h
浏览文件 @
e9cee8e6
...
...
@@ -110,6 +110,12 @@ typedef struct xfs_log_iovec {
uint
i_type
;
/* type of region */
}
xfs_log_iovec_t
;
struct
xfs_log_vec
{
struct
xfs_log_vec
*
lv_next
;
/* next lv in build list */
int
lv_niovecs
;
/* number of iovecs in lv */
struct
xfs_log_iovec
*
lv_iovecp
;
/* iovec array */
};
/*
* Structure used to pass callback function and the function's argument
* to the log manager.
...
...
@@ -126,6 +132,13 @@ typedef struct xfs_log_callback {
struct
xfs_mount
;
struct
xlog_in_core
;
struct
xlog_ticket
;
struct
xfs_log_item
;
struct
xfs_item_ops
;
void
xfs_log_item_init
(
struct
xfs_mount
*
mp
,
struct
xfs_log_item
*
item
,
int
type
,
struct
xfs_item_ops
*
ops
);
xfs_lsn_t
xfs_log_done
(
struct
xfs_mount
*
mp
,
struct
xlog_ticket
*
ticket
,
...
...
fs/xfs/xfs_log_priv.h
浏览文件 @
e9cee8e6
...
...
@@ -396,9 +396,7 @@ typedef struct log {
struct
xfs_buf_cancel
**
l_buf_cancel_table
;
int
l_iclog_hsize
;
/* size of iclog header */
int
l_iclog_heads
;
/* # of iclog header sectors */
uint
l_sectbb_log
;
/* log2 of sector size in BBs */
uint
l_sectbb_mask
;
/* sector size (in BBs)
* alignment mask */
uint
l_sectBBsize
;
/* sector size in BBs (2^n) */
int
l_iclog_size
;
/* size of log in bytes */
int
l_iclog_size_log
;
/* log power size of log */
int
l_iclog_bufs
;
/* number of iclog buffers */
...
...
@@ -449,6 +447,14 @@ extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
extern
kmem_zone_t
*
xfs_log_ticket_zone
;
static
inline
void
xlog_write_adv_cnt
(
void
**
ptr
,
int
*
len
,
int
*
off
,
size_t
bytes
)
{
*
ptr
+=
bytes
;
*
len
-=
bytes
;
*
off
+=
bytes
;
}
/*
* Unmount record type is used as a pseudo transaction type for the ticket.
* It's value must be outside the range of XFS_TRANS_* values.
...
...
fs/xfs/xfs_log_recover.c
浏览文件 @
e9cee8e6
此差异已折叠。
点击以展开。
fs/xfs/xfs_mount.c
浏览文件 @
e9cee8e6
...
...
@@ -1405,13 +1405,6 @@ xfs_mountfs(
xfs_qm_mount_quotas
(
mp
);
}
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
if
(
XFS_IS_QUOTA_ON
(
mp
))
xfs_fs_cmn_err
(
CE_NOTE
,
mp
,
"Disk quotas turned on"
);
else
xfs_fs_cmn_err
(
CE_NOTE
,
mp
,
"Disk quotas not turned on"
);
#endif
/*
* Now we are mounted, reserve a small amount of unused space for
* privileged transactions. This is needed so that transaction
...
...
fs/xfs/xfs_quota.h
浏览文件 @
e9cee8e6
...
...
@@ -201,9 +201,6 @@ typedef struct xfs_qoff_logformat {
#define XFS_QMOPT_FORCE_RES 0x0000010
/* ignore quota limits */
#define XFS_QMOPT_DQSUSER 0x0000020
/* don't cache super users dquot */
#define XFS_QMOPT_SBVERSION 0x0000040
/* change superblock version num */
#define XFS_QMOPT_QUOTAOFF 0x0000080
/* quotas are being turned off */
#define XFS_QMOPT_UMOUNTING 0x0000100
/* filesys is being unmounted */
#define XFS_QMOPT_DOLOG 0x0000200
/* log buf changes (in quotacheck) */
#define XFS_QMOPT_DOWARN 0x0000400
/* increase warning cnt if needed */
#define XFS_QMOPT_DQREPAIR 0x0001000
/* repair dquot if damaged */
#define XFS_QMOPT_GQUOTA 0x0002000
/* group dquot requested */
...
...
fs/xfs/xfs_trans.c
浏览文件 @
e9cee8e6
此差异已折叠。
点击以展开。
fs/xfs/xfs_trans.h
浏览文件 @
e9cee8e6
...
...
@@ -49,6 +49,15 @@ typedef struct xfs_trans_header {
#define XFS_LI_DQUOT 0x123d
#define XFS_LI_QUOTAOFF 0x123e
#define XFS_LI_TYPE_DESC \
{ XFS_LI_EFI, "XFS_LI_EFI" }, \
{ XFS_LI_EFD, "XFS_LI_EFD" }, \
{ XFS_LI_IUNLINK, "XFS_LI_IUNLINK" }, \
{ XFS_LI_INODE, "XFS_LI_INODE" }, \
{ XFS_LI_BUF, "XFS_LI_BUF" }, \
{ XFS_LI_DQUOT, "XFS_LI_DQUOT" }, \
{ XFS_LI_QUOTAOFF, "XFS_LI_QUOTAOFF" }
/*
* Transaction types. Used to distinguish types of buffers.
*/
...
...
@@ -159,7 +168,6 @@ typedef struct xfs_log_item_desc {
#define XFS_LID_DIRTY 0x1
#define XFS_LID_PINNED 0x2
#define XFS_LID_BUF_STALE 0x8
/*
* This structure is used to maintain a chunk list of log_item_desc
...
...
@@ -833,7 +841,7 @@ typedef struct xfs_item_ops {
uint
(
*
iop_size
)(
xfs_log_item_t
*
);
void
(
*
iop_format
)(
xfs_log_item_t
*
,
struct
xfs_log_iovec
*
);
void
(
*
iop_pin
)(
xfs_log_item_t
*
);
void
(
*
iop_unpin
)(
xfs_log_item_t
*
,
int
);
void
(
*
iop_unpin
)(
xfs_log_item_t
*
);
void
(
*
iop_unpin_remove
)(
xfs_log_item_t
*
,
struct
xfs_trans
*
);
uint
(
*
iop_trylock
)(
xfs_log_item_t
*
);
void
(
*
iop_unlock
)(
xfs_log_item_t
*
);
...
...
@@ -846,7 +854,7 @@ typedef struct xfs_item_ops {
#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip)
#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp)
#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip)
#define IOP_UNPIN(ip
, flags) (*(ip)->li_ops->iop_unpin)(ip, flags
)
#define IOP_UNPIN(ip
) (*(ip)->li_ops->iop_unpin)(ip
)
#define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp)
#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip)
#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip)
...
...
fs/xfs/xfs_trans_buf.c
浏览文件 @
e9cee8e6
...
...
@@ -40,11 +40,51 @@
#include "xfs_rw.h"
#include "xfs_trace.h"
/*
* Check to see if a buffer matching the given parameters is already
* a part of the given transaction.
*/
STATIC
struct
xfs_buf
*
xfs_trans_buf_item_match
(
struct
xfs_trans
*
tp
,
struct
xfs_buftarg
*
target
,
xfs_daddr_t
blkno
,
int
len
)
{
xfs_log_item_chunk_t
*
licp
;
xfs_log_item_desc_t
*
lidp
;
xfs_buf_log_item_t
*
blip
;
int
i
;
STATIC
xfs_buf_t
*
xfs_trans_buf_item_match
(
xfs_trans_t
*
,
xfs_buftarg_t
*
,
xfs_daddr_t
,
int
);
STATIC
xfs_buf_t
*
xfs_trans_buf_item_match_all
(
xfs_trans_t
*
,
xfs_buftarg_t
*
,
xfs_daddr_t
,
int
);
len
=
BBTOB
(
len
);
for
(
licp
=
&
tp
->
t_items
;
licp
!=
NULL
;
licp
=
licp
->
lic_next
)
{
if
(
xfs_lic_are_all_free
(
licp
))
{
ASSERT
(
licp
==
&
tp
->
t_items
);
ASSERT
(
licp
->
lic_next
==
NULL
);
return
NULL
;
}
for
(
i
=
0
;
i
<
licp
->
lic_unused
;
i
++
)
{
/*
* Skip unoccupied slots.
*/
if
(
xfs_lic_isfree
(
licp
,
i
))
continue
;
lidp
=
xfs_lic_slot
(
licp
,
i
);
blip
=
(
xfs_buf_log_item_t
*
)
lidp
->
lid_item
;
if
(
blip
->
bli_item
.
li_type
!=
XFS_LI_BUF
)
continue
;
if
(
XFS_BUF_TARGET
(
blip
->
bli_buf
)
==
target
&&
XFS_BUF_ADDR
(
blip
->
bli_buf
)
==
blkno
&&
XFS_BUF_COUNT
(
blip
->
bli_buf
)
==
len
)
return
blip
->
bli_buf
;
}
}
return
NULL
;
}
/*
* Add the locked buffer to the transaction.
...
...
@@ -112,14 +152,6 @@ xfs_trans_bjoin(
* within the transaction, just increment its lock recursion count
* and return a pointer to it.
*
* Use the fast path function xfs_trans_buf_item_match() or the buffer
* cache routine incore_match() to find the buffer
* if it is already owned by this transaction.
*
* If we don't already own the buffer, use get_buf() to get it.
* If it doesn't yet have an associated xfs_buf_log_item structure,
* then allocate one and add the item to this transaction.
*
* If the transaction pointer is NULL, make this just a normal
* get_buf() call.
*/
...
...
@@ -149,11 +181,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
* have it locked. In this case we just increment the lock
* recursion count and return the buffer to the caller.
*/
if
(
tp
->
t_items
.
lic_next
==
NULL
)
{
bp
=
xfs_trans_buf_item_match
(
tp
,
target_dev
,
blkno
,
len
);
}
else
{
bp
=
xfs_trans_buf_item_match_all
(
tp
,
target_dev
,
blkno
,
len
);
}
bp
=
xfs_trans_buf_item_match
(
tp
,
target_dev
,
blkno
,
len
);
if
(
bp
!=
NULL
)
{
ASSERT
(
XFS_BUF_VALUSEMA
(
bp
)
<=
0
);
if
(
XFS_FORCED_SHUTDOWN
(
tp
->
t_mountp
))
...
...
@@ -259,14 +287,6 @@ int xfs_error_mod = 33;
* within the transaction and already read in, just increment its
* lock recursion count and return a pointer to it.
*
* Use the fast path function xfs_trans_buf_item_match() or the buffer
* cache routine incore_match() to find the buffer
* if it is already owned by this transaction.
*
* If we don't already own the buffer, use read_buf() to get it.
* If it doesn't yet have an associated xfs_buf_log_item structure,
* then allocate one and add the item to this transaction.
*
* If the transaction pointer is NULL, make this just a normal
* read_buf() call.
*/
...
...
@@ -328,11 +348,7 @@ xfs_trans_read_buf(
* If the buffer is not yet read in, then we read it in, increment
* the lock recursion count, and return it to the caller.
*/
if
(
tp
->
t_items
.
lic_next
==
NULL
)
{
bp
=
xfs_trans_buf_item_match
(
tp
,
target
,
blkno
,
len
);
}
else
{
bp
=
xfs_trans_buf_item_match_all
(
tp
,
target
,
blkno
,
len
);
}
bp
=
xfs_trans_buf_item_match
(
tp
,
target
,
blkno
,
len
);
if
(
bp
!=
NULL
)
{
ASSERT
(
XFS_BUF_VALUSEMA
(
bp
)
<=
0
);
ASSERT
(
XFS_BUF_FSPRIVATE2
(
bp
,
xfs_trans_t
*
)
==
tp
);
...
...
@@ -696,7 +712,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
tp
->
t_flags
|=
XFS_TRANS_DIRTY
;
lidp
->
lid_flags
|=
XFS_LID_DIRTY
;
lidp
->
lid_flags
&=
~
XFS_LID_BUF_STALE
;
bip
->
bli_flags
|=
XFS_BLI_LOGGED
;
xfs_buf_item_log
(
bip
,
first
,
last
);
}
...
...
@@ -782,7 +797,7 @@ xfs_trans_binval(
bip
->
bli_format
.
blf_flags
|=
XFS_BLI_CANCEL
;
memset
((
char
*
)(
bip
->
bli_format
.
blf_data_map
),
0
,
(
bip
->
bli_format
.
blf_map_size
*
sizeof
(
uint
)));
lidp
->
lid_flags
|=
XFS_LID_DIRTY
|
XFS_LID_BUF_STALE
;
lidp
->
lid_flags
|=
XFS_LID_DIRTY
;
tp
->
t_flags
|=
XFS_TRANS_DIRTY
;
}
...
...
@@ -902,111 +917,3 @@ xfs_trans_dquot_buf(
bip
->
bli_format
.
blf_flags
|=
type
;
}
/*
* Check to see if a buffer matching the given parameters is already
* a part of the given transaction. Only check the first, embedded
* chunk, since we don't want to spend all day scanning large transactions.
*/
STATIC
xfs_buf_t
*
xfs_trans_buf_item_match
(
xfs_trans_t
*
tp
,
xfs_buftarg_t
*
target
,
xfs_daddr_t
blkno
,
int
len
)
{
xfs_log_item_chunk_t
*
licp
;
xfs_log_item_desc_t
*
lidp
;
xfs_buf_log_item_t
*
blip
;
xfs_buf_t
*
bp
;
int
i
;
bp
=
NULL
;
len
=
BBTOB
(
len
);
licp
=
&
tp
->
t_items
;
if
(
!
xfs_lic_are_all_free
(
licp
))
{
for
(
i
=
0
;
i
<
licp
->
lic_unused
;
i
++
)
{
/*
* Skip unoccupied slots.
*/
if
(
xfs_lic_isfree
(
licp
,
i
))
{
continue
;
}
lidp
=
xfs_lic_slot
(
licp
,
i
);
blip
=
(
xfs_buf_log_item_t
*
)
lidp
->
lid_item
;
if
(
blip
->
bli_item
.
li_type
!=
XFS_LI_BUF
)
{
continue
;
}
bp
=
blip
->
bli_buf
;
if
((
XFS_BUF_TARGET
(
bp
)
==
target
)
&&
(
XFS_BUF_ADDR
(
bp
)
==
blkno
)
&&
(
XFS_BUF_COUNT
(
bp
)
==
len
))
{
/*
* We found it. Break out and
* return the pointer to the buffer.
*/
break
;
}
else
{
bp
=
NULL
;
}
}
}
return
bp
;
}
/*
* Check to see if a buffer matching the given parameters is already
* a part of the given transaction. Check all the chunks, we
* want to be thorough.
*/
STATIC
xfs_buf_t
*
xfs_trans_buf_item_match_all
(
xfs_trans_t
*
tp
,
xfs_buftarg_t
*
target
,
xfs_daddr_t
blkno
,
int
len
)
{
xfs_log_item_chunk_t
*
licp
;
xfs_log_item_desc_t
*
lidp
;
xfs_buf_log_item_t
*
blip
;
xfs_buf_t
*
bp
;
int
i
;
bp
=
NULL
;
len
=
BBTOB
(
len
);
for
(
licp
=
&
tp
->
t_items
;
licp
!=
NULL
;
licp
=
licp
->
lic_next
)
{
if
(
xfs_lic_are_all_free
(
licp
))
{
ASSERT
(
licp
==
&
tp
->
t_items
);
ASSERT
(
licp
->
lic_next
==
NULL
);
return
NULL
;
}
for
(
i
=
0
;
i
<
licp
->
lic_unused
;
i
++
)
{
/*
* Skip unoccupied slots.
*/
if
(
xfs_lic_isfree
(
licp
,
i
))
{
continue
;
}
lidp
=
xfs_lic_slot
(
licp
,
i
);
blip
=
(
xfs_buf_log_item_t
*
)
lidp
->
lid_item
;
if
(
blip
->
bli_item
.
li_type
!=
XFS_LI_BUF
)
{
continue
;
}
bp
=
blip
->
bli_buf
;
if
((
XFS_BUF_TARGET
(
bp
)
==
target
)
&&
(
XFS_BUF_ADDR
(
bp
)
==
blkno
)
&&
(
XFS_BUF_COUNT
(
bp
)
==
len
))
{
/*
* We found it. Break out and
* return the pointer to the buffer.
*/
return
bp
;
}
}
}
return
NULL
;
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录