提交 dcd7b4e5 编写于 作者: N Niv Sardi

Merge branch 'master' of git://oss.sgi.com:8090/xfs/linux-2.6

...@@ -229,10 +229,6 @@ The following sysctls are available for the XFS filesystem: ...@@ -229,10 +229,6 @@ The following sysctls are available for the XFS filesystem:
ISGID bit is cleared if the irix_sgid_inherit compatibility sysctl ISGID bit is cleared if the irix_sgid_inherit compatibility sysctl
is set. is set.
fs.xfs.restrict_chown (Min: 0 Default: 1 Max: 1)
Controls whether unprivileged users can use chown to "give away"
a file to another user.
fs.xfs.inherit_sync (Min: 0 Default: 1 Max: 1) fs.xfs.inherit_sync (Min: 0 Default: 1 Max: 1)
Setting this to "1" will cause the "sync" flag set Setting this to "1" will cause the "sync" flag set
by the xfs_io(8) chattr command on a directory to be by the xfs_io(8) chattr command on a directory to be
......
...@@ -108,84 +108,100 @@ static void wake_up_inode(struct inode *inode) ...@@ -108,84 +108,100 @@ static void wake_up_inode(struct inode *inode)
wake_up_bit(&inode->i_state, __I_LOCK); wake_up_bit(&inode->i_state, __I_LOCK);
} }
static struct inode *alloc_inode(struct super_block *sb) /**
* inode_init_always - perform inode structure intialisation
* @sb - superblock inode belongs to.
* @inode - inode to initialise
*
* These are initializations that need to be done on every inode
* allocation as the fields are not initialised by slab allocation.
*/
struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
{ {
static const struct address_space_operations empty_aops; static const struct address_space_operations empty_aops;
static struct inode_operations empty_iops; static struct inode_operations empty_iops;
static const struct file_operations empty_fops; static const struct file_operations empty_fops;
struct inode *inode;
if (sb->s_op->alloc_inode)
inode = sb->s_op->alloc_inode(sb);
else
inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
if (inode) { struct address_space * const mapping = &inode->i_data;
struct address_space * const mapping = &inode->i_data;
inode->i_sb = sb;
inode->i_sb = sb; inode->i_blkbits = sb->s_blocksize_bits;
inode->i_blkbits = sb->s_blocksize_bits; inode->i_flags = 0;
inode->i_flags = 0; atomic_set(&inode->i_count, 1);
atomic_set(&inode->i_count, 1); inode->i_op = &empty_iops;
inode->i_op = &empty_iops; inode->i_fop = &empty_fops;
inode->i_fop = &empty_fops; inode->i_nlink = 1;
inode->i_nlink = 1; atomic_set(&inode->i_writecount, 0);
atomic_set(&inode->i_writecount, 0); inode->i_size = 0;
inode->i_size = 0; inode->i_blocks = 0;
inode->i_blocks = 0; inode->i_bytes = 0;
inode->i_bytes = 0; inode->i_generation = 0;
inode->i_generation = 0;
#ifdef CONFIG_QUOTA #ifdef CONFIG_QUOTA
memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
#endif #endif
inode->i_pipe = NULL; inode->i_pipe = NULL;
inode->i_bdev = NULL; inode->i_bdev = NULL;
inode->i_cdev = NULL; inode->i_cdev = NULL;
inode->i_rdev = 0; inode->i_rdev = 0;
inode->dirtied_when = 0; inode->dirtied_when = 0;
if (security_inode_alloc(inode)) { if (security_inode_alloc(inode)) {
if (inode->i_sb->s_op->destroy_inode) if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode); inode->i_sb->s_op->destroy_inode(inode);
else else
kmem_cache_free(inode_cachep, (inode)); kmem_cache_free(inode_cachep, (inode));
return NULL; return NULL;
} }
spin_lock_init(&inode->i_lock); spin_lock_init(&inode->i_lock);
lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
mutex_init(&inode->i_mutex); mutex_init(&inode->i_mutex);
lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
init_rwsem(&inode->i_alloc_sem); init_rwsem(&inode->i_alloc_sem);
lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);
mapping->a_ops = &empty_aops; mapping->a_ops = &empty_aops;
mapping->host = inode; mapping->host = inode;
mapping->flags = 0; mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE); mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE);
mapping->assoc_mapping = NULL; mapping->assoc_mapping = NULL;
mapping->backing_dev_info = &default_backing_dev_info; mapping->backing_dev_info = &default_backing_dev_info;
mapping->writeback_index = 0; mapping->writeback_index = 0;
/* /*
* If the block_device provides a backing_dev_info for client * If the block_device provides a backing_dev_info for client
* inodes then use that. Otherwise the inode share the bdev's * inodes then use that. Otherwise the inode share the bdev's
* backing_dev_info. * backing_dev_info.
*/ */
if (sb->s_bdev) { if (sb->s_bdev) {
struct backing_dev_info *bdi; struct backing_dev_info *bdi;
bdi = sb->s_bdev->bd_inode_backing_dev_info; bdi = sb->s_bdev->bd_inode_backing_dev_info;
if (!bdi) if (!bdi)
bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
mapping->backing_dev_info = bdi; mapping->backing_dev_info = bdi;
}
inode->i_private = NULL;
inode->i_mapping = mapping;
} }
inode->i_private = NULL;
inode->i_mapping = mapping;
return inode; return inode;
} }
EXPORT_SYMBOL(inode_init_always);
static struct inode *alloc_inode(struct super_block *sb)
{
struct inode *inode;
if (sb->s_op->alloc_inode)
inode = sb->s_op->alloc_inode(sb);
else
inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
if (inode)
return inode_init_always(sb, inode);
return NULL;
}
void destroy_inode(struct inode *inode) void destroy_inode(struct inode *inode)
{ {
...@@ -196,6 +212,7 @@ void destroy_inode(struct inode *inode) ...@@ -196,6 +212,7 @@ void destroy_inode(struct inode *inode)
else else
kmem_cache_free(inode_cachep, (inode)); kmem_cache_free(inode_cachep, (inode));
} }
EXPORT_SYMBOL(destroy_inode);
/* /*
...@@ -534,6 +551,49 @@ static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head ...@@ -534,6 +551,49 @@ static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head
return node ? inode : NULL; return node ? inode : NULL;
} }
static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
unsigned long tmp;
tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
L1_CACHE_BYTES;
tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
return tmp & I_HASHMASK;
}
static inline void
__inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
struct inode *inode)
{
inodes_stat.nr_inodes++;
list_add(&inode->i_list, &inode_in_use);
list_add(&inode->i_sb_list, &sb->s_inodes);
if (head)
hlist_add_head(&inode->i_hash, head);
}
/**
* inode_add_to_lists - add a new inode to relevant lists
* @sb - superblock inode belongs to.
* @inode - inode to mark in use
*
* When an inode is allocated it needs to be accounted for, added to the in use
* list, the owning superblock and the inode hash. This needs to be done under
* the inode_lock, so export a function to do this rather than the inode lock
* itself. We calculate the hash list to add to here so it is all internal
* which requires the caller to have already set up the inode number in the
* inode to add.
*/
void inode_add_to_lists(struct super_block *sb, struct inode *inode)
{
struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino);
spin_lock(&inode_lock);
__inode_add_to_lists(sb, head, inode);
spin_unlock(&inode_lock);
}
EXPORT_SYMBOL_GPL(inode_add_to_lists);
/** /**
* new_inode - obtain an inode * new_inode - obtain an inode
* @sb: superblock * @sb: superblock
...@@ -561,9 +621,7 @@ struct inode *new_inode(struct super_block *sb) ...@@ -561,9 +621,7 @@ struct inode *new_inode(struct super_block *sb)
inode = alloc_inode(sb); inode = alloc_inode(sb);
if (inode) { if (inode) {
spin_lock(&inode_lock); spin_lock(&inode_lock);
inodes_stat.nr_inodes++; __inode_add_to_lists(sb, NULL, inode);
list_add(&inode->i_list, &inode_in_use);
list_add(&inode->i_sb_list, &sb->s_inodes);
inode->i_ino = ++last_ino; inode->i_ino = ++last_ino;
inode->i_state = 0; inode->i_state = 0;
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
...@@ -622,10 +680,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h ...@@ -622,10 +680,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h
if (set(inode, data)) if (set(inode, data))
goto set_failed; goto set_failed;
inodes_stat.nr_inodes++; __inode_add_to_lists(sb, head, inode);
list_add(&inode->i_list, &inode_in_use);
list_add(&inode->i_sb_list, &sb->s_inodes);
hlist_add_head(&inode->i_hash, head);
inode->i_state = I_LOCK|I_NEW; inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
...@@ -671,10 +726,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he ...@@ -671,10 +726,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
old = find_inode_fast(sb, head, ino); old = find_inode_fast(sb, head, ino);
if (!old) { if (!old) {
inode->i_ino = ino; inode->i_ino = ino;
inodes_stat.nr_inodes++; __inode_add_to_lists(sb, head, inode);
list_add(&inode->i_list, &inode_in_use);
list_add(&inode->i_sb_list, &sb->s_inodes);
hlist_add_head(&inode->i_hash, head);
inode->i_state = I_LOCK|I_NEW; inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
...@@ -698,16 +750,6 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he ...@@ -698,16 +750,6 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
return inode; return inode;
} }
static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
unsigned long tmp;
tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
L1_CACHE_BYTES;
tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
return tmp & I_HASHMASK;
}
/** /**
* iunique - get a unique inode number * iunique - get a unique inode number
* @sb: superblock * @sb: superblock
......
...@@ -91,7 +91,8 @@ xfs-y += xfs_alloc.o \ ...@@ -91,7 +91,8 @@ xfs-y += xfs_alloc.o \
xfs_dmops.o \ xfs_dmops.o \
xfs_qmops.o xfs_qmops.o
xfs-$(CONFIG_XFS_TRACE) += xfs_dir2_trace.o xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \
xfs_dir2_trace.o
# Objects in linux/ # Objects in linux/
xfs-y += $(addprefix $(XFS_LINUX)/, \ xfs-y += $(addprefix $(XFS_LINUX)/, \
...@@ -106,6 +107,7 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \ ...@@ -106,6 +107,7 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \
xfs_iops.o \ xfs_iops.o \
xfs_lrw.o \ xfs_lrw.o \
xfs_super.o \ xfs_super.o \
xfs_sync.o \
xfs_vnode.o \ xfs_vnode.o \
xfs_xattr.o) xfs_xattr.o)
......
...@@ -191,7 +191,7 @@ xfs_setfilesize( ...@@ -191,7 +191,7 @@ xfs_setfilesize(
ip->i_d.di_size = isize; ip->i_d.di_size = isize;
ip->i_update_core = 1; ip->i_update_core = 1;
ip->i_update_size = 1; ip->i_update_size = 1;
mark_inode_dirty_sync(ioend->io_inode); xfs_mark_inode_dirty_sync(ip);
} }
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
......
...@@ -24,15 +24,7 @@ ...@@ -24,15 +24,7 @@
* Credentials * Credentials
*/ */
typedef struct cred { typedef struct cred {
/* EMPTY */ /* EMPTY */
} cred_t; } cred_t;
extern struct cred *sys_cred;
/* this is a hack.. (assumes sys_cred is the only cred_t in the system) */
static inline int capable_cred(cred_t *cr, int cid)
{
return (cr == sys_cred) ? 1 : capable(cid);
}
#endif /* __XFS_CRED_H__ */ #endif /* __XFS_CRED_H__ */
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
*/ */
xfs_param_t xfs_params = { xfs_param_t xfs_params = {
/* MIN DFLT MAX */ /* MIN DFLT MAX */
.restrict_chown = { 0, 1, 1 },
.sgid_inherit = { 0, 0, 1 }, .sgid_inherit = { 0, 0, 1 },
.symlink_mode = { 0, 0, 1 }, .symlink_mode = { 0, 0, 1 },
.panic_mask = { 0, 0, 255 }, .panic_mask = { 0, 0, 255 },
...@@ -43,10 +42,3 @@ xfs_param_t xfs_params = { ...@@ -43,10 +42,3 @@ xfs_param_t xfs_params = {
.inherit_nodfrg = { 0, 1, 1 }, .inherit_nodfrg = { 0, 1, 1 },
.fstrm_timer = { 1, 30*100, 3600*100}, .fstrm_timer = { 1, 30*100, 3600*100},
}; };
/*
* Global system credential structure.
*/
static cred_t sys_cred_val;
cred_t *sys_cred = &sys_cred_val;
...@@ -19,6 +19,5 @@ ...@@ -19,6 +19,5 @@
#define __XFS_GLOBALS_H__ #define __XFS_GLOBALS_H__
extern uint64_t xfs_panic_mask; /* set to cause more panics */ extern uint64_t xfs_panic_mask; /* set to cause more panics */
extern struct cred *sys_cred;
#endif /* __XFS_GLOBALS_H__ */ #endif /* __XFS_GLOBALS_H__ */
...@@ -691,8 +691,7 @@ xfs_ioc_space( ...@@ -691,8 +691,7 @@ xfs_ioc_space(
if (ioflags & IO_INVIS) if (ioflags & IO_INVIS)
attr_flags |= XFS_ATTR_DMI; attr_flags |= XFS_ATTR_DMI;
error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos, error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos, attr_flags);
NULL, attr_flags);
return -error; return -error;
} }
...@@ -1007,7 +1006,7 @@ xfs_ioctl_setattr( ...@@ -1007,7 +1006,7 @@ xfs_ioctl_setattr(
* to the file owner ID, except in cases where the * to the file owner ID, except in cases where the
* CAP_FSETID capability is applicable. * CAP_FSETID capability is applicable.
*/ */
if (current->fsuid != ip->i_d.di_uid && !capable(CAP_FOWNER)) { if (current_fsuid() != ip->i_d.di_uid && !capable(CAP_FOWNER)) {
code = XFS_ERROR(EPERM); code = XFS_ERROR(EPERM);
goto error_return; goto error_return;
} }
...@@ -1104,10 +1103,6 @@ xfs_ioctl_setattr( ...@@ -1104,10 +1103,6 @@ xfs_ioctl_setattr(
/* /*
* Change file ownership. Must be the owner or privileged. * Change file ownership. Must be the owner or privileged.
* If the system was configured with the "restricted_chown"
* option, the owner is not permitted to give away the file,
* and can change the group id only to a group of which he
* or she is a member.
*/ */
if (mask & FSX_PROJID) { if (mask & FSX_PROJID) {
/* /*
......
...@@ -64,14 +64,14 @@ xfs_synchronize_atime( ...@@ -64,14 +64,14 @@ xfs_synchronize_atime(
{ {
struct inode *inode = VFS_I(ip); struct inode *inode = VFS_I(ip);
if (inode) { if (!(inode->i_state & I_CLEAR)) {
ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec; ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
} }
} }
/* /*
* If the linux inode exists, mark it dirty. * If the linux inode is valid, mark it dirty.
* Used when commiting a dirty inode into a transaction so that * Used when commiting a dirty inode into a transaction so that
* the inode will get written back by the linux code * the inode will get written back by the linux code
*/ */
...@@ -81,7 +81,7 @@ xfs_mark_inode_dirty_sync( ...@@ -81,7 +81,7 @@ xfs_mark_inode_dirty_sync(
{ {
struct inode *inode = VFS_I(ip); struct inode *inode = VFS_I(ip);
if (inode) if (!(inode->i_state & (I_WILL_FREE|I_FREEING|I_CLEAR)))
mark_inode_dirty_sync(inode); mark_inode_dirty_sync(inode);
} }
...@@ -128,7 +128,7 @@ xfs_ichgtime( ...@@ -128,7 +128,7 @@ xfs_ichgtime(
if (sync_it) { if (sync_it) {
SYNCHRONIZE(); SYNCHRONIZE();
ip->i_update_core = 1; ip->i_update_core = 1;
mark_inode_dirty_sync(inode); xfs_mark_inode_dirty_sync(ip);
} }
} }
...@@ -601,7 +601,7 @@ xfs_vn_setattr( ...@@ -601,7 +601,7 @@ xfs_vn_setattr(
struct dentry *dentry, struct dentry *dentry,
struct iattr *iattr) struct iattr *iattr)
{ {
return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0, NULL); return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0);
} }
/* /*
...@@ -642,7 +642,7 @@ xfs_vn_fallocate( ...@@ -642,7 +642,7 @@ xfs_vn_fallocate(
xfs_ilock(ip, XFS_IOLOCK_EXCL); xfs_ilock(ip, XFS_IOLOCK_EXCL);
error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
0, NULL, XFS_ATTR_NOLOCK); 0, XFS_ATTR_NOLOCK);
if (!error && !(mode & FALLOC_FL_KEEP_SIZE) && if (!error && !(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) offset + len > i_size_read(inode))
new_size = offset + len; new_size = offset + len;
...@@ -653,7 +653,7 @@ xfs_vn_fallocate( ...@@ -653,7 +653,7 @@ xfs_vn_fallocate(
iattr.ia_valid = ATTR_SIZE; iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = new_size; iattr.ia_size = new_size;
error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK, NULL); error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
} }
xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_iunlock(ip, XFS_IOLOCK_EXCL);
...@@ -766,12 +766,21 @@ xfs_diflags_to_iflags( ...@@ -766,12 +766,21 @@ xfs_diflags_to_iflags(
* When reading existing inodes from disk this is called directly * When reading existing inodes from disk this is called directly
* from xfs_iget, when creating a new inode it is called from * from xfs_iget, when creating a new inode it is called from
* xfs_ialloc after setting up the inode. * xfs_ialloc after setting up the inode.
*
* We are always called with an uninitialised linux inode here.
* We need to initialise the necessary fields and take a reference
* on it.
*/ */
void void
xfs_setup_inode( xfs_setup_inode(
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
struct inode *inode = ip->i_vnode; struct inode *inode = &ip->i_vnode;
inode->i_ino = ip->i_ino;
inode->i_state = I_NEW|I_LOCK;
inode_add_to_lists(ip->i_mount->m_super, inode);
ASSERT(atomic_read(&inode->i_count) == 1);
inode->i_mode = ip->i_d.di_mode; inode->i_mode = ip->i_d.di_mode;
inode->i_nlink = ip->i_d.di_nlink; inode->i_nlink = ip->i_d.di_nlink;
......
...@@ -77,6 +77,7 @@ ...@@ -77,6 +77,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/writeback.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/div64.h> #include <asm/div64.h>
...@@ -107,7 +108,6 @@ ...@@ -107,7 +108,6 @@
#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ #undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
#endif #endif
#define restricted_chown xfs_params.restrict_chown.val
#define irix_sgid_inherit xfs_params.sgid_inherit.val #define irix_sgid_inherit xfs_params.sgid_inherit.val
#define irix_symlink_mode xfs_params.symlink_mode.val #define irix_symlink_mode xfs_params.symlink_mode.val
#define xfs_panic_mask xfs_params.panic_mask.val #define xfs_panic_mask xfs_params.panic_mask.val
......
...@@ -53,6 +53,10 @@ xfs_read_xfsstats( ...@@ -53,6 +53,10 @@ xfs_read_xfsstats(
{ "icluster", XFSSTAT_END_INODE_CLUSTER }, { "icluster", XFSSTAT_END_INODE_CLUSTER },
{ "vnodes", XFSSTAT_END_VNODE_OPS }, { "vnodes", XFSSTAT_END_VNODE_OPS },
{ "buf", XFSSTAT_END_BUF }, { "buf", XFSSTAT_END_BUF },
{ "abtb2", XFSSTAT_END_ABTB_V2 },
{ "abtc2", XFSSTAT_END_ABTC_V2 },
{ "bmbt2", XFSSTAT_END_BMBT_V2 },
{ "ibt2", XFSSTAT_END_IBT_V2 },
}; };
/* Loop over all stats groups */ /* Loop over all stats groups */
......
...@@ -118,6 +118,71 @@ struct xfsstats { ...@@ -118,6 +118,71 @@ struct xfsstats {
__uint32_t xb_page_retries; __uint32_t xb_page_retries;
__uint32_t xb_page_found; __uint32_t xb_page_found;
__uint32_t xb_get_read; __uint32_t xb_get_read;
/* Version 2 btree counters */
#define XFSSTAT_END_ABTB_V2 (XFSSTAT_END_BUF+15)
__uint32_t xs_abtb_2_lookup;
__uint32_t xs_abtb_2_compare;
__uint32_t xs_abtb_2_insrec;
__uint32_t xs_abtb_2_delrec;
__uint32_t xs_abtb_2_newroot;
__uint32_t xs_abtb_2_killroot;
__uint32_t xs_abtb_2_increment;
__uint32_t xs_abtb_2_decrement;
__uint32_t xs_abtb_2_lshift;
__uint32_t xs_abtb_2_rshift;
__uint32_t xs_abtb_2_split;
__uint32_t xs_abtb_2_join;
__uint32_t xs_abtb_2_alloc;
__uint32_t xs_abtb_2_free;
__uint32_t xs_abtb_2_moves;
#define XFSSTAT_END_ABTC_V2 (XFSSTAT_END_ABTB_V2+15)
__uint32_t xs_abtc_2_lookup;
__uint32_t xs_abtc_2_compare;
__uint32_t xs_abtc_2_insrec;
__uint32_t xs_abtc_2_delrec;
__uint32_t xs_abtc_2_newroot;
__uint32_t xs_abtc_2_killroot;
__uint32_t xs_abtc_2_increment;
__uint32_t xs_abtc_2_decrement;
__uint32_t xs_abtc_2_lshift;
__uint32_t xs_abtc_2_rshift;
__uint32_t xs_abtc_2_split;
__uint32_t xs_abtc_2_join;
__uint32_t xs_abtc_2_alloc;
__uint32_t xs_abtc_2_free;
__uint32_t xs_abtc_2_moves;
#define XFSSTAT_END_BMBT_V2 (XFSSTAT_END_ABTC_V2+15)
__uint32_t xs_bmbt_2_lookup;
__uint32_t xs_bmbt_2_compare;
__uint32_t xs_bmbt_2_insrec;
__uint32_t xs_bmbt_2_delrec;
__uint32_t xs_bmbt_2_newroot;
__uint32_t xs_bmbt_2_killroot;
__uint32_t xs_bmbt_2_increment;
__uint32_t xs_bmbt_2_decrement;
__uint32_t xs_bmbt_2_lshift;
__uint32_t xs_bmbt_2_rshift;
__uint32_t xs_bmbt_2_split;
__uint32_t xs_bmbt_2_join;
__uint32_t xs_bmbt_2_alloc;
__uint32_t xs_bmbt_2_free;
__uint32_t xs_bmbt_2_moves;
#define XFSSTAT_END_IBT_V2 (XFSSTAT_END_BMBT_V2+15)
__uint32_t xs_ibt_2_lookup;
__uint32_t xs_ibt_2_compare;
__uint32_t xs_ibt_2_insrec;
__uint32_t xs_ibt_2_delrec;
__uint32_t xs_ibt_2_newroot;
__uint32_t xs_ibt_2_killroot;
__uint32_t xs_ibt_2_increment;
__uint32_t xs_ibt_2_decrement;
__uint32_t xs_ibt_2_lshift;
__uint32_t xs_ibt_2_rshift;
__uint32_t xs_ibt_2_split;
__uint32_t xs_ibt_2_join;
__uint32_t xs_ibt_2_alloc;
__uint32_t xs_ibt_2_free;
__uint32_t xs_ibt_2_moves;
/* Extra precision counters */ /* Extra precision counters */
__uint64_t xs_xstrat_bytes; __uint64_t xs_xstrat_bytes;
__uint64_t xs_write_bytes; __uint64_t xs_write_bytes;
......
此差异已折叠。
...@@ -101,9 +101,6 @@ struct block_device; ...@@ -101,9 +101,6 @@ struct block_device;
extern __uint64_t xfs_max_file_offset(unsigned int); extern __uint64_t xfs_max_file_offset(unsigned int);
extern void xfs_flush_inode(struct xfs_inode *);
extern void xfs_flush_device(struct xfs_inode *);
extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
extern const struct export_operations xfs_export_operations; extern const struct export_operations xfs_export_operations;
......
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_inode.h"
#include "xfs_dinode.h"
#include "xfs_error.h"
#include "xfs_mru_cache.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
#include "xfs_utils.h"
#include "xfs_buf_item.h"
#include "xfs_inode_item.h"
#include "xfs_rw.h"
#include <linux/kthread.h>
#include <linux/freezer.h>
/*
* Sync all the inodes in the given AG according to the
* direction given by the flags.
*/
STATIC int
xfs_sync_inodes_ag(
xfs_mount_t *mp,
int ag,
int flags)
{
xfs_perag_t *pag = &mp->m_perag[ag];
int nr_found;
uint32_t first_index = 0;
int error = 0;
int last_error = 0;
int fflag = XFS_B_ASYNC;
if (flags & SYNC_DELWRI)
fflag = XFS_B_DELWRI;
if (flags & SYNC_WAIT)
fflag = 0; /* synchronous overrides all */
do {
struct inode *inode;
xfs_inode_t *ip = NULL;
int lock_flags = XFS_ILOCK_SHARED;
/*
* use a gang lookup to find the next inode in the tree
* as the tree is sparse and a gang lookup walks to find
* the number of objects requested.
*/
read_lock(&pag->pag_ici_lock);
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
(void**)&ip, first_index, 1);
if (!nr_found) {
read_unlock(&pag->pag_ici_lock);
break;
}
/*
* Update the index for the next lookup. Catch overflows
* into the next AG range which can occur if we have inodes
* in the last block of the AG and we are currently
* pointing to the last inode.
*/
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
read_unlock(&pag->pag_ici_lock);
break;
}
/* nothing to sync during shutdown */
if (XFS_FORCED_SHUTDOWN(mp)) {
read_unlock(&pag->pag_ici_lock);
return 0;
}
/*
* If we can't get a reference on the inode, it must be
* in reclaim. Leave it for the reclaim code to flush.
*/
inode = VFS_I(ip);
if (!igrab(inode)) {
read_unlock(&pag->pag_ici_lock);
continue;
}
read_unlock(&pag->pag_ici_lock);
/* bad inodes are dealt with elsewhere */
if (is_bad_inode(inode)) {
IRELE(ip);
continue;
}
/*
* If we have to flush data or wait for I/O completion
* we need to hold the iolock.
*/
if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) {
xfs_ilock(ip, XFS_IOLOCK_SHARED);
lock_flags |= XFS_IOLOCK_SHARED;
error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE);
if (flags & SYNC_IOWAIT)
vn_iowait(ip);
}
xfs_ilock(ip, XFS_ILOCK_SHARED);
if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) {
if (flags & SYNC_WAIT) {
xfs_iflock(ip);
if (!xfs_inode_clean(ip))
error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
else
xfs_ifunlock(ip);
} else if (xfs_iflock_nowait(ip)) {
if (!xfs_inode_clean(ip))
error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
else
xfs_ifunlock(ip);
}
}
xfs_iput(ip, lock_flags);
if (error)
last_error = error;
/*
* bail out if the filesystem is corrupted.
*/
if (error == EFSCORRUPTED)
return XFS_ERROR(error);
} while (nr_found);
return last_error;
}
int
xfs_sync_inodes(
xfs_mount_t *mp,
int flags)
{
int error;
int last_error;
int i;
int lflags = XFS_LOG_FORCE;
if (mp->m_flags & XFS_MOUNT_RDONLY)
return 0;
error = 0;
last_error = 0;
if (flags & SYNC_WAIT)
lflags |= XFS_LOG_SYNC;
for (i = 0; i < mp->m_sb.sb_agcount; i++) {
if (!mp->m_perag[i].pag_ici_init)
continue;
error = xfs_sync_inodes_ag(mp, i, flags);
if (error)
last_error = error;
if (error == EFSCORRUPTED)
break;
}
if (flags & SYNC_DELWRI)
xfs_log_force(mp, 0, lflags);
return XFS_ERROR(last_error);
}
STATIC int
xfs_commit_dummy_trans(
struct xfs_mount *mp,
uint log_flags)
{
struct xfs_inode *ip = mp->m_rootip;
struct xfs_trans *tp;
int error;
/*
* Put a dummy transaction in the log to tell recovery
* that all others are OK.
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
if (error) {
xfs_trans_cancel(tp, 0);
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
/* XXX(hch): ignoring the error here.. */
error = xfs_trans_commit(tp, 0);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_log_force(mp, 0, log_flags);
return 0;
}
int
xfs_sync_fsdata(
struct xfs_mount *mp,
int flags)
{
struct xfs_buf *bp;
struct xfs_buf_log_item *bip;
int error = 0;
/*
* If this is xfssyncd() then only sync the superblock if we can
* lock it without sleeping and it is not pinned.
*/
if (flags & SYNC_BDFLUSH) {
ASSERT(!(flags & SYNC_WAIT));
bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
if (!bp)
goto out;
bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
goto out_brelse;
} else {
bp = xfs_getsb(mp, 0);
/*
* If the buffer is pinned then push on the log so we won't
* get stuck waiting in the write for someone, maybe
* ourselves, to flush the log.
*
* Even though we just pushed the log above, we did not have
* the superblock buffer locked at that point so it can
* become pinned in between there and here.
*/
if (XFS_BUF_ISPINNED(bp))
xfs_log_force(mp, 0, XFS_LOG_FORCE);
}
if (flags & SYNC_WAIT)
XFS_BUF_UNASYNC(bp);
else
XFS_BUF_ASYNC(bp);
return xfs_bwrite(mp, bp);
out_brelse:
xfs_buf_relse(bp);
out:
return error;
}
/*
* When remounting a filesystem read-only or freezing the filesystem, we have
* two phases to execute. This first phase is syncing the data before we
* quiesce the filesystem, and the second is flushing all the inodes out after
* we've waited for all the transactions created by the first phase to
* complete. The second phase ensures that the inodes are written to their
* location on disk rather than just existing in transactions in the log. This
* means after a quiesce there is no log replay required to write the inodes to
* disk (this is the main difference between a sync and a quiesce).
*/
/*
* First stage of freeze - no writers will make progress now we are here,
* so we flush delwri and delalloc buffers here, then wait for all I/O to
* complete. Data is frozen at that point. Metadata is not frozen,
* transactions can still occur here so don't bother flushing the buftarg
* because it'll just get dirty again.
*/
int
xfs_quiesce_data(
struct xfs_mount *mp)
{
int error;
/* push non-blocking */
xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH);
XFS_QM_DQSYNC(mp, SYNC_BDFLUSH);
xfs_filestream_flush(mp);
/* push and block */
xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT);
XFS_QM_DQSYNC(mp, SYNC_WAIT);
/* write superblock and hoover up shutdown errors */
error = xfs_sync_fsdata(mp, 0);
/* flush data-only devices */
if (mp->m_rtdev_targp)
XFS_bflush(mp->m_rtdev_targp);
return error;
}
STATIC void
xfs_quiesce_fs(
struct xfs_mount *mp)
{
int count = 0, pincount;
xfs_flush_buftarg(mp->m_ddev_targp, 0);
xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
/*
* This loop must run at least twice. The first instance of the loop
* will flush most meta data but that will generate more meta data
* (typically directory updates). Which then must be flushed and
* logged before we can write the unmount record.
*/
do {
xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT);
pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
if (!pincount) {
delay(50);
count++;
}
} while (count < 2);
}
/*
* Second stage of a quiesce. The data is already synced, now we have to take
* care of the metadata. New transactions are already blocked, so we need to
* wait for any remaining transactions to drain out before proceding.
*/
void
xfs_quiesce_attr(
struct xfs_mount *mp)
{
int error = 0;
/* wait for all modifications to complete */
while (atomic_read(&mp->m_active_trans) > 0)
delay(100);
/* flush inodes and push all remaining buffers out to disk */
xfs_quiesce_fs(mp);
ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0);
/* Push the superblock and write an unmount record */
error = xfs_log_sbcount(mp, 1);
if (error)
xfs_fs_cmn_err(CE_WARN, mp,
"xfs_attr_quiesce: failed to log sb changes. "
"Frozen image may not be consistent.");
xfs_log_unmount_write(mp);
xfs_unmountfs_writesb(mp);
}
/*
* Enqueue a work item to be picked up by the vfs xfssyncd thread.
* Doing this has two advantages:
* - It saves on stack space, which is tight in certain situations
* - It can be used (with care) as a mechanism to avoid deadlocks.
* Flushing while allocating in a full filesystem requires both.
*/
STATIC void
xfs_syncd_queue_work(
struct xfs_mount *mp,
void *data,
void (*syncer)(struct xfs_mount *, void *))
{
struct bhv_vfs_sync_work *work;
work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
INIT_LIST_HEAD(&work->w_list);
work->w_syncer = syncer;
work->w_data = data;
work->w_mount = mp;
spin_lock(&mp->m_sync_lock);
list_add_tail(&work->w_list, &mp->m_sync_list);
spin_unlock(&mp->m_sync_lock);
wake_up_process(mp->m_sync_task);
}
/*
* Flush delayed allocate data, attempting to free up reserved space
* from existing allocations. At this point a new allocation attempt
* has failed with ENOSPC and we are in the process of scratching our
* heads, looking about for more room...
*/
STATIC void
xfs_flush_inode_work(
struct xfs_mount *mp,
void *arg)
{
struct inode *inode = arg;
filemap_flush(inode->i_mapping);
iput(inode);
}
void
xfs_flush_inode(
xfs_inode_t *ip)
{
struct inode *inode = VFS_I(ip);
igrab(inode);
xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
delay(msecs_to_jiffies(500));
}
/*
* This is the "bigger hammer" version of xfs_flush_inode_work...
* (IOW, "If at first you don't succeed, use a Bigger Hammer").
*/
STATIC void
xfs_flush_device_work(
struct xfs_mount *mp,
void *arg)
{
struct inode *inode = arg;
sync_blockdev(mp->m_super->s_bdev);
iput(inode);
}
void
xfs_flush_device(
xfs_inode_t *ip)
{
struct inode *inode = VFS_I(ip);
igrab(inode);
xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
delay(msecs_to_jiffies(500));
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
}
/*
* Every sync period we need to unpin all items, reclaim inodes, sync
* quota and write out the superblock. We might need to cover the log
* to indicate it is idle.
*/
STATIC void
xfs_sync_worker(
struct xfs_mount *mp,
void *unused)
{
int error;
if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
/* dgc: errors ignored here */
error = XFS_QM_DQSYNC(mp, SYNC_BDFLUSH);
error = xfs_sync_fsdata(mp, SYNC_BDFLUSH);
if (xfs_log_need_covered(mp))
error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE);
}
mp->m_sync_seq++;
wake_up(&mp->m_wait_single_sync_task);
}
STATIC int
xfssyncd(
void *arg)
{
struct xfs_mount *mp = arg;
long timeleft;
bhv_vfs_sync_work_t *work, *n;
LIST_HEAD (tmp);
set_freezable();
timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
for (;;) {
timeleft = schedule_timeout_interruptible(timeleft);
/* swsusp */
try_to_freeze();
if (kthread_should_stop() && list_empty(&mp->m_sync_list))
break;
spin_lock(&mp->m_sync_lock);
/*
* We can get woken by laptop mode, to do a sync -
* that's the (only!) case where the list would be
* empty with time remaining.
*/
if (!timeleft || list_empty(&mp->m_sync_list)) {
if (!timeleft)
timeleft = xfs_syncd_centisecs *
msecs_to_jiffies(10);
INIT_LIST_HEAD(&mp->m_sync_work.w_list);
list_add_tail(&mp->m_sync_work.w_list,
&mp->m_sync_list);
}
list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
list_move(&work->w_list, &tmp);
spin_unlock(&mp->m_sync_lock);
list_for_each_entry_safe(work, n, &tmp, w_list) {
(*work->w_syncer)(mp, work->w_data);
list_del(&work->w_list);
if (work == &mp->m_sync_work)
continue;
kmem_free(work);
}
}
return 0;
}
int
xfs_syncd_init(
struct xfs_mount *mp)
{
mp->m_sync_work.w_syncer = xfs_sync_worker;
mp->m_sync_work.w_mount = mp;
mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
if (IS_ERR(mp->m_sync_task))
return -PTR_ERR(mp->m_sync_task);
return 0;
}
void
xfs_syncd_stop(
struct xfs_mount *mp)
{
kthread_stop(mp->m_sync_task);
}
int
xfs_reclaim_inode(
xfs_inode_t *ip,
int locked,
int sync_mode)
{
xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
/* The hash lock here protects a thread in xfs_iget_core from
* racing with us on linking the inode back with a vnode.
* Once we have the XFS_IRECLAIM flag set it will not touch
* us.
*/
write_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
!__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
spin_unlock(&ip->i_flags_lock);
write_unlock(&pag->pag_ici_lock);
if (locked) {
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
return 1;
}
__xfs_iflags_set(ip, XFS_IRECLAIM);
spin_unlock(&ip->i_flags_lock);
write_unlock(&pag->pag_ici_lock);
xfs_put_perag(ip->i_mount, pag);
/*
* If the inode is still dirty, then flush it out. If the inode
* is not in the AIL, then it will be OK to flush it delwri as
* long as xfs_iflush() does not keep any references to the inode.
* We leave that decision up to xfs_iflush() since it has the
* knowledge of whether it's OK to simply do a delwri flush of
* the inode or whether we need to wait until the inode is
* pulled from the AIL.
* We get the flush lock regardless, though, just to make sure
* we don't free it while it is being flushed.
*/
if (!locked) {
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_iflock(ip);
}
/*
* In the case of a forced shutdown we rely on xfs_iflush() to
* wait for the inode to be unpinned before returning an error.
*/
if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
/* synchronize with xfs_iflush_done */
xfs_iflock(ip);
xfs_ifunlock(ip);
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_ireclaim(ip);
return 0;
}
/*
* We set the inode flag atomically with the radix tree tag.
* Once we get tag lookups on the radix tree, this inode flag
* can go away.
*/
void
xfs_inode_set_reclaim_tag(
xfs_inode_t *ip)
{
xfs_mount_t *mp = ip->i_mount;
xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
read_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
radix_tree_tag_set(&pag->pag_ici_root,
XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
read_unlock(&pag->pag_ici_lock);
xfs_put_perag(mp, pag);
}
void
__xfs_inode_clear_reclaim_tag(
xfs_mount_t *mp,
xfs_perag_t *pag,
xfs_inode_t *ip)
{
radix_tree_tag_clear(&pag->pag_ici_root,
XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
}
void
xfs_inode_clear_reclaim_tag(
xfs_inode_t *ip)
{
xfs_mount_t *mp = ip->i_mount;
xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
read_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
__xfs_inode_clear_reclaim_tag(mp, pag, ip);
spin_unlock(&ip->i_flags_lock);
read_unlock(&pag->pag_ici_lock);
xfs_put_perag(mp, pag);
}
STATIC void
xfs_reclaim_inodes_ag(
xfs_mount_t *mp,
int ag,
int noblock,
int mode)
{
xfs_inode_t *ip = NULL;
xfs_perag_t *pag = &mp->m_perag[ag];
int nr_found;
uint32_t first_index;
int skipped;
restart:
first_index = 0;
skipped = 0;
do {
/*
* use a gang lookup to find the next inode in the tree
* as the tree is sparse and a gang lookup walks to find
* the number of objects requested.
*/
read_lock(&pag->pag_ici_lock);
nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
(void**)&ip, first_index, 1,
XFS_ICI_RECLAIM_TAG);
if (!nr_found) {
read_unlock(&pag->pag_ici_lock);
break;
}
/*
* Update the index for the next lookup. Catch overflows
* into the next AG range which can occur if we have inodes
* in the last block of the AG and we are currently
* pointing to the last inode.
*/
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
read_unlock(&pag->pag_ici_lock);
break;
}
ASSERT(xfs_iflags_test(ip, (XFS_IRECLAIMABLE|XFS_IRECLAIM)));
/* ignore if already under reclaim */
if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
read_unlock(&pag->pag_ici_lock);
continue;
}
if (noblock) {
if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
read_unlock(&pag->pag_ici_lock);
continue;
}
if (xfs_ipincount(ip) ||
!xfs_iflock_nowait(ip)) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
read_unlock(&pag->pag_ici_lock);
continue;
}
}
read_unlock(&pag->pag_ici_lock);
/*
* hmmm - this is an inode already in reclaim. Do
* we even bother catching it here?
*/
if (xfs_reclaim_inode(ip, noblock, mode))
skipped++;
} while (nr_found);
if (skipped) {
delay(1);
goto restart;
}
return;
}
int
xfs_reclaim_inodes(
xfs_mount_t *mp,
int noblock,
int mode)
{
int i;
for (i = 0; i < mp->m_sb.sb_agcount; i++) {
if (!mp->m_perag[i].pag_ici_init)
continue;
xfs_reclaim_inodes_ag(mp, i, noblock, mode);
}
return 0;
}
/*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef XFS_SYNC_H
#define XFS_SYNC_H 1
struct xfs_mount;
typedef struct bhv_vfs_sync_work {
struct list_head w_list;
struct xfs_mount *w_mount;
void *w_data; /* syncer routine argument */
void (*w_syncer)(struct xfs_mount *, void *);
} bhv_vfs_sync_work_t;
#define SYNC_ATTR 0x0001 /* sync attributes */
#define SYNC_DELWRI 0x0002 /* look at delayed writes */
#define SYNC_WAIT 0x0004 /* wait for i/o to complete */
#define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */
#define SYNC_IOWAIT 0x0010 /* wait for all I/O to complete */
int xfs_syncd_init(struct xfs_mount *mp);
void xfs_syncd_stop(struct xfs_mount *mp);
int xfs_sync_inodes(struct xfs_mount *mp, int flags);
int xfs_sync_fsdata(struct xfs_mount *mp, int flags);
int xfs_quiesce_data(struct xfs_mount *mp);
void xfs_quiesce_attr(struct xfs_mount *mp);
void xfs_flush_inode(struct xfs_inode *ip);
void xfs_flush_device(struct xfs_inode *ip);
int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode);
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip);
void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
struct xfs_inode *ip);
#endif
...@@ -55,17 +55,6 @@ xfs_stats_clear_proc_handler( ...@@ -55,17 +55,6 @@ xfs_stats_clear_proc_handler(
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
static ctl_table xfs_table[] = { static ctl_table xfs_table[] = {
{
.ctl_name = XFS_RESTRICT_CHOWN,
.procname = "restrict_chown",
.data = &xfs_params.restrict_chown.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec_minmax,
.strategy = &sysctl_intvec,
.extra1 = &xfs_params.restrict_chown.min,
.extra2 = &xfs_params.restrict_chown.max
},
{ {
.ctl_name = XFS_SGID_INHERIT, .ctl_name = XFS_SGID_INHERIT,
.procname = "irix_sgid_inherit", .procname = "irix_sgid_inherit",
......
...@@ -31,7 +31,6 @@ typedef struct xfs_sysctl_val { ...@@ -31,7 +31,6 @@ typedef struct xfs_sysctl_val {
} xfs_sysctl_val_t; } xfs_sysctl_val_t;
typedef struct xfs_param { typedef struct xfs_param {
xfs_sysctl_val_t restrict_chown;/* Root/non-root can give away files.*/
xfs_sysctl_val_t sgid_inherit; /* Inherit S_ISGID if process' GID is xfs_sysctl_val_t sgid_inherit; /* Inherit S_ISGID if process' GID is
* not a member of parent dir GID. */ * not a member of parent dir GID. */
xfs_sysctl_val_t symlink_mode; /* Link creat mode affected by umask */ xfs_sysctl_val_t symlink_mode; /* Link creat mode affected by umask */
...@@ -68,7 +67,7 @@ typedef struct xfs_param { ...@@ -68,7 +67,7 @@ typedef struct xfs_param {
enum { enum {
/* XFS_REFCACHE_SIZE = 1 */ /* XFS_REFCACHE_SIZE = 1 */
/* XFS_REFCACHE_PURGE = 2 */ /* XFS_REFCACHE_PURGE = 2 */
XFS_RESTRICT_CHOWN = 3, /* XFS_RESTRICT_CHOWN = 3 */
XFS_SGID_INHERIT = 4, XFS_SGID_INHERIT = 4,
XFS_SYMLINK_MODE = 5, XFS_SYMLINK_MODE = 5,
XFS_PANIC_MASK = 6, XFS_PANIC_MASK = 6,
......
...@@ -33,37 +33,6 @@ struct xfs_mount_args; ...@@ -33,37 +33,6 @@ struct xfs_mount_args;
typedef struct kstatfs bhv_statvfs_t; typedef struct kstatfs bhv_statvfs_t;
typedef struct bhv_vfs_sync_work {
struct list_head w_list;
struct xfs_mount *w_mount;
void *w_data; /* syncer routine argument */
void (*w_syncer)(struct xfs_mount *, void *);
} bhv_vfs_sync_work_t;
#define SYNC_ATTR 0x0001 /* sync attributes */
#define SYNC_CLOSE 0x0002 /* close file system down */
#define SYNC_DELWRI 0x0004 /* look at delayed writes */
#define SYNC_WAIT 0x0008 /* wait for i/o to complete */
#define SYNC_BDFLUSH 0x0010 /* BDFLUSH is calling -- don't block */
#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */
#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */
#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */
#define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */
/*
* When remounting a filesystem read-only or freezing the filesystem,
* we have two phases to execute. This first phase is syncing the data
* before we quiesce the fielsystem, and the second is flushing all the
* inodes out after we've waited for all the transactions created by
* the first phase to complete. The second phase uses SYNC_INODE_QUIESCE
* to ensure that the inodes are written to their location on disk
* rather than just existing in transactions in the log. This means
* after a quiesce there is no log replay required to write the inodes
* to disk (this is the main difference between a sync and a quiesce).
*/
#define SYNC_DATA_QUIESCE (SYNC_DELWRI|SYNC_FSDATA|SYNC_WAIT|SYNC_IOWAIT)
#define SYNC_INODE_QUIESCE (SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT)
#define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ #define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */
#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ #define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */
#define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */ #define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */
......
...@@ -84,25 +84,12 @@ vn_ioerror( ...@@ -84,25 +84,12 @@ vn_ioerror(
#ifdef XFS_INODE_TRACE #ifdef XFS_INODE_TRACE
/*
* Reference count of Linux inode if present, -1 if the xfs_inode
* has no associated Linux inode.
*/
static inline int xfs_icount(struct xfs_inode *ip)
{
struct inode *vp = VFS_I(ip);
if (vp)
return vn_count(vp);
return -1;
}
#define KTRACE_ENTER(ip, vk, s, line, ra) \ #define KTRACE_ENTER(ip, vk, s, line, ra) \
ktrace_enter( (ip)->i_trace, \ ktrace_enter( (ip)->i_trace, \
/* 0 */ (void *)(__psint_t)(vk), \ /* 0 */ (void *)(__psint_t)(vk), \
/* 1 */ (void *)(s), \ /* 1 */ (void *)(s), \
/* 2 */ (void *)(__psint_t) line, \ /* 2 */ (void *)(__psint_t) line, \
/* 3 */ (void *)(__psint_t)xfs_icount(ip), \ /* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
/* 4 */ (void *)(ra), \ /* 4 */ (void *)(ra), \
/* 5 */ NULL, \ /* 5 */ NULL, \
/* 6 */ (void *)(__psint_t)current_cpu(), \ /* 6 */ (void *)(__psint_t)current_cpu(), \
......
...@@ -80,11 +80,6 @@ do { \ ...@@ -80,11 +80,6 @@ do { \
iput(VFS_I(ip)); \ iput(VFS_I(ip)); \
} while (0) } while (0)
static inline struct inode *vn_grab(struct inode *vp)
{
return igrab(vp);
}
/* /*
* Dealing with bad inodes * Dealing with bad inodes
*/ */
......
...@@ -101,7 +101,7 @@ xfs_qm_dqinit( ...@@ -101,7 +101,7 @@ xfs_qm_dqinit(
if (brandnewdquot) { if (brandnewdquot) {
dqp->dq_flnext = dqp->dq_flprev = dqp; dqp->dq_flnext = dqp->dq_flprev = dqp;
mutex_init(&dqp->q_qlock); mutex_init(&dqp->q_qlock);
sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq"); init_waitqueue_head(&dqp->q_pinwait);
/* /*
* Because we want to use a counting completion, complete * Because we want to use a counting completion, complete
...@@ -131,7 +131,7 @@ xfs_qm_dqinit( ...@@ -131,7 +131,7 @@ xfs_qm_dqinit(
dqp->q_res_bcount = 0; dqp->q_res_bcount = 0;
dqp->q_res_icount = 0; dqp->q_res_icount = 0;
dqp->q_res_rtbcount = 0; dqp->q_res_rtbcount = 0;
dqp->q_pincount = 0; atomic_set(&dqp->q_pincount, 0);
dqp->q_hash = NULL; dqp->q_hash = NULL;
ASSERT(dqp->dq_flnext == dqp->dq_flprev); ASSERT(dqp->dq_flnext == dqp->dq_flprev);
...@@ -1221,16 +1221,14 @@ xfs_qm_dqflush( ...@@ -1221,16 +1221,14 @@ xfs_qm_dqflush(
xfs_dqtrace_entry(dqp, "DQFLUSH"); xfs_dqtrace_entry(dqp, "DQFLUSH");
/* /*
* If not dirty, nada. * If not dirty, or it's pinned and we are not supposed to
* block, nada.
*/ */
if (!XFS_DQ_IS_DIRTY(dqp)) { if (!XFS_DQ_IS_DIRTY(dqp) ||
(!(flags & XFS_QMOPT_SYNC) && atomic_read(&dqp->q_pincount) > 0)) {
xfs_dqfunlock(dqp); xfs_dqfunlock(dqp);
return (0); return 0;
} }
/*
* Cant flush a pinned dquot. Wait for it.
*/
xfs_qm_dqunpin_wait(dqp); xfs_qm_dqunpin_wait(dqp);
/* /*
...@@ -1274,10 +1272,8 @@ xfs_qm_dqflush( ...@@ -1274,10 +1272,8 @@ xfs_qm_dqflush(
dqp->dq_flags &= ~(XFS_DQ_DIRTY); dqp->dq_flags &= ~(XFS_DQ_DIRTY);
mp = dqp->q_mount; mp = dqp->q_mount;
/* lsn is 64 bits */ xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
spin_lock(&mp->m_ail_lock); &dqp->q_logitem.qli_item.li_lsn);
dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn;
spin_unlock(&mp->m_ail_lock);
/* /*
* Attach an iodone routine so that we can remove this dquot from the * Attach an iodone routine so that we can remove this dquot from the
...@@ -1323,8 +1319,10 @@ xfs_qm_dqflush_done( ...@@ -1323,8 +1319,10 @@ xfs_qm_dqflush_done(
xfs_dq_logitem_t *qip) xfs_dq_logitem_t *qip)
{ {
xfs_dquot_t *dqp; xfs_dquot_t *dqp;
struct xfs_ail *ailp;
dqp = qip->qli_dquot; dqp = qip->qli_dquot;
ailp = qip->qli_item.li_ailp;
/* /*
* We only want to pull the item from the AIL if its * We only want to pull the item from the AIL if its
...@@ -1337,15 +1335,12 @@ xfs_qm_dqflush_done( ...@@ -1337,15 +1335,12 @@ xfs_qm_dqflush_done(
if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
qip->qli_item.li_lsn == qip->qli_flush_lsn) { qip->qli_item.li_lsn == qip->qli_flush_lsn) {
spin_lock(&dqp->q_mount->m_ail_lock); /* xfs_trans_ail_delete() drops the AIL lock. */
/* spin_lock(&ailp->xa_lock);
* xfs_trans_delete_ail() drops the AIL lock.
*/
if (qip->qli_item.li_lsn == qip->qli_flush_lsn) if (qip->qli_item.li_lsn == qip->qli_flush_lsn)
xfs_trans_delete_ail(dqp->q_mount, xfs_trans_ail_delete(ailp, (xfs_log_item_t*)qip);
(xfs_log_item_t*)qip);
else else
spin_unlock(&dqp->q_mount->m_ail_lock); spin_unlock(&ailp->xa_lock);
} }
/* /*
...@@ -1375,7 +1370,7 @@ xfs_dqunlock( ...@@ -1375,7 +1370,7 @@ xfs_dqunlock(
mutex_unlock(&(dqp->q_qlock)); mutex_unlock(&(dqp->q_qlock));
if (dqp->q_logitem.qli_dquot == dqp) { if (dqp->q_logitem.qli_dquot == dqp) {
/* Once was dqp->q_mount, but might just have been cleared */ /* Once was dqp->q_mount, but might just have been cleared */
xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_mountp, xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp,
(xfs_log_item_t*)&(dqp->q_logitem)); (xfs_log_item_t*)&(dqp->q_logitem));
} }
} }
...@@ -1489,7 +1484,7 @@ xfs_qm_dqpurge( ...@@ -1489,7 +1484,7 @@ xfs_qm_dqpurge(
"xfs_qm_dqpurge: dquot %p flush failed", dqp); "xfs_qm_dqpurge: dquot %p flush failed", dqp);
xfs_dqflock(dqp); xfs_dqflock(dqp);
} }
ASSERT(dqp->q_pincount == 0); ASSERT(atomic_read(&dqp->q_pincount) == 0);
ASSERT(XFS_FORCED_SHUTDOWN(mp) || ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
!(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
......
...@@ -83,8 +83,8 @@ typedef struct xfs_dquot { ...@@ -83,8 +83,8 @@ typedef struct xfs_dquot {
xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
mutex_t q_qlock; /* quota lock */ mutex_t q_qlock; /* quota lock */
struct completion q_flush; /* flush completion queue */ struct completion q_flush; /* flush completion queue */
uint q_pincount; /* pin count for this dquot */ atomic_t q_pincount; /* dquot pin count */
sv_t q_pinwait; /* sync var for pinning */ wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
#ifdef XFS_DQUOT_TRACE #ifdef XFS_DQUOT_TRACE
struct ktrace *q_trace; /* trace header structure */ struct ktrace *q_trace; /* trace header structure */
#endif #endif
......
...@@ -88,25 +88,22 @@ xfs_qm_dquot_logitem_format( ...@@ -88,25 +88,22 @@ xfs_qm_dquot_logitem_format(
/* /*
* Increment the pin count of the given dquot. * Increment the pin count of the given dquot.
* This value is protected by pinlock spinlock in the xQM structure.
*/ */
STATIC void STATIC void
xfs_qm_dquot_logitem_pin( xfs_qm_dquot_logitem_pin(
xfs_dq_logitem_t *logitem) xfs_dq_logitem_t *logitem)
{ {
xfs_dquot_t *dqp; xfs_dquot_t *dqp = logitem->qli_dquot;
dqp = logitem->qli_dquot;
ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(XFS_DQ_IS_LOCKED(dqp));
spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); atomic_inc(&dqp->q_pincount);
dqp->q_pincount++;
spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
} }
/* /*
* Decrement the pin count of the given dquot, and wake up * Decrement the pin count of the given dquot, and wake up
* anyone in xfs_dqwait_unpin() if the count goes to 0. The * anyone in xfs_dqwait_unpin() if the count goes to 0. The
* dquot must have been previously pinned with a call to xfs_dqpin(). * dquot must have been previously pinned with a call to
* xfs_qm_dquot_logitem_pin().
*/ */
/* ARGSUSED */ /* ARGSUSED */
STATIC void STATIC void
...@@ -114,16 +111,11 @@ xfs_qm_dquot_logitem_unpin( ...@@ -114,16 +111,11 @@ xfs_qm_dquot_logitem_unpin(
xfs_dq_logitem_t *logitem, xfs_dq_logitem_t *logitem,
int stale) int stale)
{ {
xfs_dquot_t *dqp; xfs_dquot_t *dqp = logitem->qli_dquot;
dqp = logitem->qli_dquot; ASSERT(atomic_read(&dqp->q_pincount) > 0);
ASSERT(dqp->q_pincount > 0); if (atomic_dec_and_test(&dqp->q_pincount))
spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); wake_up(&dqp->q_pinwait);
dqp->q_pincount--;
if (dqp->q_pincount == 0) {
sv_broadcast(&dqp->q_pinwait);
}
spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
} }
/* ARGSUSED */ /* ARGSUSED */
...@@ -193,21 +185,14 @@ xfs_qm_dqunpin_wait( ...@@ -193,21 +185,14 @@ xfs_qm_dqunpin_wait(
xfs_dquot_t *dqp) xfs_dquot_t *dqp)
{ {
ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(XFS_DQ_IS_LOCKED(dqp));
if (dqp->q_pincount == 0) { if (atomic_read(&dqp->q_pincount) == 0)
return; return;
}
/* /*
* Give the log a push so we don't wait here too long. * Give the log a push so we don't wait here too long.
*/ */
xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE); xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE);
spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
if (dqp->q_pincount == 0) {
spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
return;
}
sv_wait(&(dqp->q_pinwait), PINOD,
&(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s);
} }
/* /*
...@@ -310,7 +295,7 @@ xfs_qm_dquot_logitem_trylock( ...@@ -310,7 +295,7 @@ xfs_qm_dquot_logitem_trylock(
uint retval; uint retval;
dqp = qip->qli_dquot; dqp = qip->qli_dquot;
if (dqp->q_pincount > 0) if (atomic_read(&dqp->q_pincount) > 0)
return (XFS_ITEM_PINNED); return (XFS_ITEM_PINNED);
if (! xfs_qm_dqlock_nowait(dqp)) if (! xfs_qm_dqlock_nowait(dqp))
...@@ -568,14 +553,16 @@ xfs_qm_qoffend_logitem_committed( ...@@ -568,14 +553,16 @@ xfs_qm_qoffend_logitem_committed(
xfs_lsn_t lsn) xfs_lsn_t lsn)
{ {
xfs_qoff_logitem_t *qfs; xfs_qoff_logitem_t *qfs;
struct xfs_ail *ailp;
qfs = qfe->qql_start_lip; qfs = qfe->qql_start_lip;
spin_lock(&qfs->qql_item.li_mountp->m_ail_lock); ailp = qfs->qql_item.li_ailp;
spin_lock(&ailp->xa_lock);
/* /*
* Delete the qoff-start logitem from the AIL. * Delete the qoff-start logitem from the AIL.
* xfs_trans_delete_ail() drops the AIL lock. * xfs_trans_ail_delete() drops the AIL lock.
*/ */
xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs); xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs);
kmem_free(qfs); kmem_free(qfs);
kmem_free(qfe); kmem_free(qfe);
return (xfs_lsn_t)-1; return (xfs_lsn_t)-1;
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h" #include "xfs_inum.h"
#include "xfs_clnt.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
...@@ -987,14 +986,10 @@ xfs_qm_dqdetach( ...@@ -987,14 +986,10 @@ xfs_qm_dqdetach(
} }
/* /*
* This is called by VFS_SYNC and flags arg determines the caller, * This is called to sync quotas. We can be told to use non-blocking
* and its motives, as done in xfs_sync. * semantics by either the SYNC_BDFLUSH flag or the absence of the
* * SYNC_WAIT flag.
* vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31
* syscall sync: SYNC_FSDATA|SYNC_ATTR|SYNC_DELWRI 0x25
* umountroot : SYNC_WAIT | SYNC_CLOSE | SYNC_ATTR | SYNC_FSDATA
*/ */
int int
xfs_qm_sync( xfs_qm_sync(
xfs_mount_t *mp, xfs_mount_t *mp,
...@@ -1137,7 +1132,6 @@ xfs_qm_init_quotainfo( ...@@ -1137,7 +1132,6 @@ xfs_qm_init_quotainfo(
return error; return error;
} }
spin_lock_init(&qinf->qi_pinlock);
xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0); xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);
qinf->qi_dqreclaims = 0; qinf->qi_dqreclaims = 0;
...@@ -1234,7 +1228,6 @@ xfs_qm_destroy_quotainfo( ...@@ -1234,7 +1228,6 @@ xfs_qm_destroy_quotainfo(
*/ */
xfs_qm_rele_quotafs_ref(mp); xfs_qm_rele_quotafs_ref(mp);
spinlock_destroy(&qi->qi_pinlock);
xfs_qm_list_destroy(&qi->qi_dqlist); xfs_qm_list_destroy(&qi->qi_dqlist);
if (qi->qi_uquotaip) { if (qi->qi_uquotaip) {
......
...@@ -106,7 +106,6 @@ typedef struct xfs_qm { ...@@ -106,7 +106,6 @@ typedef struct xfs_qm {
typedef struct xfs_quotainfo { typedef struct xfs_quotainfo {
xfs_inode_t *qi_uquotaip; /* user quota inode */ xfs_inode_t *qi_uquotaip; /* user quota inode */
xfs_inode_t *qi_gquotaip; /* group quota inode */ xfs_inode_t *qi_gquotaip; /* group quota inode */
spinlock_t qi_pinlock; /* dquot pinning lock */
xfs_dqlist_t qi_dqlist; /* all dquots in filesys */ xfs_dqlist_t qi_dqlist; /* all dquots in filesys */
int qi_dqreclaims; /* a change here indicates int qi_dqreclaims; /* a change here indicates
a removal in the dqlist */ a removal in the dqlist */
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h" #include "xfs_inum.h"
#include "xfs_clnt.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
...@@ -127,7 +127,7 @@ xfs_qm_quotactl( ...@@ -127,7 +127,7 @@ xfs_qm_quotactl(
break; break;
case Q_XQUOTASYNC: case Q_XQUOTASYNC:
return (xfs_sync_inodes(mp, SYNC_DELWRI, NULL)); return xfs_sync_inodes(mp, SYNC_DELWRI);
default: default:
break; break;
...@@ -1022,101 +1022,92 @@ xfs_qm_export_flags( ...@@ -1022,101 +1022,92 @@ xfs_qm_export_flags(
/* /*
* Go thru all the inodes in the file system, releasing their dquots. * Release all the dquots on the inodes in an AG.
* Note that the mount structure gets modified to indicate that quotas are off
* AFTER this, in the case of quotaoff. This also gets called from
* xfs_rootumount.
*/ */
void STATIC void
xfs_qm_dqrele_all_inodes( xfs_qm_dqrele_inodes_ag(
struct xfs_mount *mp, xfs_mount_t *mp,
uint flags) int ag,
uint flags)
{ {
xfs_inode_t *ip, *topino; xfs_inode_t *ip = NULL;
uint ireclaims; xfs_perag_t *pag = &mp->m_perag[ag];
struct inode *vp; int first_index = 0;
boolean_t vnode_refd; int nr_found;
ASSERT(mp->m_quotainfo);
XFS_MOUNT_ILOCK(mp);
again:
ip = mp->m_inodes;
if (ip == NULL) {
XFS_MOUNT_IUNLOCK(mp);
return;
}
do { do {
/* Skip markers inserted by xfs_sync */ boolean_t inode_refed;
if (ip->i_mount == NULL) { struct inode *inode;
ip = ip->i_mnext;
continue; /*
} * use a gang lookup to find the next inode in the tree
/* Root inode, rbmip and rsumip have associated blocks */ * as the tree is sparse and a gang lookup walks to find
if (ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) { * the number of objects requested.
ASSERT(ip->i_udquot == NULL); */
ASSERT(ip->i_gdquot == NULL); read_lock(&pag->pag_ici_lock);
ip = ip->i_mnext; nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
continue; (void**)&ip, first_index, 1);
if (!nr_found) {
read_unlock(&pag->pag_ici_lock);
break;
} }
vp = VFS_I(ip);
if (!vp) { /* update the index for the next lookup */
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
/* skip quota inodes and those in reclaim */
inode = VFS_I(ip);
if (!inode || ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) {
ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_udquot == NULL);
ASSERT(ip->i_gdquot == NULL); ASSERT(ip->i_gdquot == NULL);
ip = ip->i_mnext; read_unlock(&pag->pag_ici_lock);
continue; continue;
} }
vnode_refd = B_FALSE;
if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) {
ireclaims = mp->m_ireclaims; inode = igrab(inode);
topino = mp->m_inodes; read_unlock(&pag->pag_ici_lock);
vp = vn_grab(vp); if (!inode)
if (!vp) continue;
goto again; inode_refed = B_TRUE;
XFS_MOUNT_IUNLOCK(mp);
/* XXX restart limit ? */
xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_ilock(ip, XFS_ILOCK_EXCL);
vnode_refd = B_TRUE;
} else { } else {
ireclaims = mp->m_ireclaims; read_unlock(&pag->pag_ici_lock);
topino = mp->m_inodes;
XFS_MOUNT_IUNLOCK(mp);
} }
/*
* We don't keep the mountlock across the dqrele() call,
* since it can take a while..
*/
if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
xfs_qm_dqrele(ip->i_udquot); xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL; ip->i_udquot = NULL;
} }
if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) { if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) &&
ip->i_gdquot) {
xfs_qm_dqrele(ip->i_gdquot); xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL; ip->i_gdquot = NULL;
} }
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
/* if (inode_refed)
* Wait until we've dropped the ilock and mountlock to
* do the vn_rele. Or be condemned to an eternity in the
* inactive code in hell.
*/
if (vnode_refd)
IRELE(ip); IRELE(ip);
XFS_MOUNT_ILOCK(mp); } while (nr_found);
/* }
* If an inode was inserted or removed, we gotta
* start over again. /*
*/ * Go thru all the inodes in the file system, releasing their dquots.
if (topino != mp->m_inodes || mp->m_ireclaims != ireclaims) { * Note that the mount structure gets modified to indicate that quotas are off
/* XXX use a sentinel */ * AFTER this, in the case of quotaoff. This also gets called from
goto again; * xfs_rootumount.
} */
ip = ip->i_mnext; void
} while (ip != mp->m_inodes); xfs_qm_dqrele_all_inodes(
struct xfs_mount *mp,
uint flags)
{
int i;
XFS_MOUNT_IUNLOCK(mp); ASSERT(mp->m_quotainfo);
for (i = 0; i < mp->m_sb.sb_agcount; i++) {
if (!mp->m_perag[i].pag_ici_init)
continue;
xfs_qm_dqrele_inodes_ag(mp, i, flags);
}
} }
/*------------------------------------------------------------------------*/ /*------------------------------------------------------------------------*/
......
...@@ -84,5 +84,5 @@ assfail(char *expr, char *file, int line) ...@@ -84,5 +84,5 @@ assfail(char *expr, char *file, int line)
void void
xfs_hex_dump(void *p, int length) xfs_hex_dump(void *p, int length)
{ {
print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1); print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1);
} }
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#define XFS_ATTR_TRACE 1 #define XFS_ATTR_TRACE 1
#define XFS_BLI_TRACE 1 #define XFS_BLI_TRACE 1
#define XFS_BMAP_TRACE 1 #define XFS_BMAP_TRACE 1
#define XFS_BMBT_TRACE 1 #define XFS_BTREE_TRACE 1
#define XFS_DIR2_TRACE 1 #define XFS_DIR2_TRACE 1
#define XFS_DQUOT_TRACE 1 #define XFS_DQUOT_TRACE 1
#define XFS_ILOCK_TRACE 1 #define XFS_ILOCK_TRACE 1
......
...@@ -366,7 +366,7 @@ xfs_acl_allow_set( ...@@ -366,7 +366,7 @@ xfs_acl_allow_set(
return ENOTDIR; return ENOTDIR;
if (vp->i_sb->s_flags & MS_RDONLY) if (vp->i_sb->s_flags & MS_RDONLY)
return EROFS; return EROFS;
if (XFS_I(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER)) if (XFS_I(vp)->i_d.di_uid != current_fsuid() && !capable(CAP_FOWNER))
return EPERM; return EPERM;
return 0; return 0;
} }
...@@ -413,13 +413,13 @@ xfs_acl_access( ...@@ -413,13 +413,13 @@ xfs_acl_access(
switch (fap->acl_entry[i].ae_tag) { switch (fap->acl_entry[i].ae_tag) {
case ACL_USER_OBJ: case ACL_USER_OBJ:
seen_userobj = 1; seen_userobj = 1;
if (fuid != current->fsuid) if (fuid != current_fsuid())
continue; continue;
matched.ae_tag = ACL_USER_OBJ; matched.ae_tag = ACL_USER_OBJ;
matched.ae_perm = allows; matched.ae_perm = allows;
break; break;
case ACL_USER: case ACL_USER:
if (fap->acl_entry[i].ae_id != current->fsuid) if (fap->acl_entry[i].ae_id != current_fsuid())
continue; continue;
matched.ae_tag = ACL_USER; matched.ae_tag = ACL_USER;
matched.ae_perm = allows; matched.ae_perm = allows;
...@@ -758,7 +758,7 @@ xfs_acl_setmode( ...@@ -758,7 +758,7 @@ xfs_acl_setmode(
if (gap && nomask) if (gap && nomask)
iattr.ia_mode |= gap->ae_perm << 3; iattr.ia_mode |= gap->ae_perm << 3;
return xfs_setattr(XFS_I(vp), &iattr, 0, sys_cred); return xfs_setattr(XFS_I(vp), &iattr, 0);
} }
/* /*
......
...@@ -192,17 +192,23 @@ typedef struct xfs_perag ...@@ -192,17 +192,23 @@ typedef struct xfs_perag
xfs_agino_t pagi_freecount; /* number of free inodes */ xfs_agino_t pagi_freecount; /* number of free inodes */
xfs_agino_t pagi_count; /* number of allocated inodes */ xfs_agino_t pagi_count; /* number of allocated inodes */
int pagb_count; /* pagb slots in use */ int pagb_count; /* pagb slots in use */
xfs_perag_busy_t *pagb_list; /* unstable blocks */
#ifdef __KERNEL__ #ifdef __KERNEL__
spinlock_t pagb_lock; /* lock for pagb_list */ spinlock_t pagb_lock; /* lock for pagb_list */
#endif
xfs_perag_busy_t *pagb_list; /* unstable blocks */
atomic_t pagf_fstrms; /* # of filestreams active in this AG */ atomic_t pagf_fstrms; /* # of filestreams active in this AG */
int pag_ici_init; /* incore inode cache initialised */ int pag_ici_init; /* incore inode cache initialised */
rwlock_t pag_ici_lock; /* incore inode lock */ rwlock_t pag_ici_lock; /* incore inode lock */
struct radix_tree_root pag_ici_root; /* incore inode cache root */ struct radix_tree_root pag_ici_root; /* incore inode cache root */
#endif
} xfs_perag_t; } xfs_perag_t;
/*
* tags for inode radix tree
*/
#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels) #define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
#define XFS_MIN_FREELIST_RAW(bl,cl,mp) \ #define XFS_MIN_FREELIST_RAW(bl,cl,mp) \
(MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp))) (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp)))
......
...@@ -89,6 +89,92 @@ STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, ...@@ -89,6 +89,92 @@ STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
* Internal functions. * Internal functions.
*/ */
/*
* Lookup the record equal to [bno, len] in the btree given by cur.
*/
STATIC int /* error */
xfs_alloc_lookup_eq(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
}
/*
* Lookup the first record greater than or equal to [bno, len]
* in the btree given by cur.
*/
STATIC int /* error */
xfs_alloc_lookup_ge(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
}
/*
* Lookup the first record less than or equal to [bno, len]
* in the btree given by cur.
*/
STATIC int /* error */
xfs_alloc_lookup_le(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
}
/*
* Update the record referred to by cur to the value given
* by [bno, len].
* This either works (return 0) or gets an EFSCORRUPTED error.
*/
STATIC int /* error */
xfs_alloc_update(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
xfs_extlen_t len) /* length of extent */
{
union xfs_btree_rec rec;
rec.alloc.ar_startblock = cpu_to_be32(bno);
rec.alloc.ar_blockcount = cpu_to_be32(len);
return xfs_btree_update(cur, &rec);
}
/*
* Get the data from the pointed-to record.
*/
STATIC int /* error */
xfs_alloc_get_rec(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t *bno, /* output: starting block of extent */
xfs_extlen_t *len, /* output: length of extent */
int *stat) /* output: success/failure */
{
union xfs_btree_rec *rec;
int error;
error = xfs_btree_get_rec(cur, &rec, stat);
if (!error && *stat == 1) {
*bno = be32_to_cpu(rec->alloc.ar_startblock);
*len = be32_to_cpu(rec->alloc.ar_blockcount);
}
return error;
}
/* /*
* Compute aligned version of the found extent. * Compute aligned version of the found extent.
* Takes alignment and min length into account. * Takes alignment and min length into account.
...@@ -294,21 +380,20 @@ xfs_alloc_fixup_trees( ...@@ -294,21 +380,20 @@ xfs_alloc_fixup_trees(
return error; return error;
XFS_WANT_CORRUPTED_RETURN(i == 1); XFS_WANT_CORRUPTED_RETURN(i == 1);
} }
#ifdef DEBUG #ifdef DEBUG
{ if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
xfs_alloc_block_t *bnoblock; struct xfs_btree_block *bnoblock;
xfs_alloc_block_t *cntblock; struct xfs_btree_block *cntblock;
if (bno_cur->bc_nlevels == 1 && bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
cnt_cur->bc_nlevels == 1) { cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]);
cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]); XFS_WANT_CORRUPTED_RETURN(
XFS_WANT_CORRUPTED_RETURN( bnoblock->bb_numrecs == cntblock->bb_numrecs);
be16_to_cpu(bnoblock->bb_numrecs) ==
be16_to_cpu(cntblock->bb_numrecs));
}
} }
#endif #endif
/* /*
* Deal with all four cases: the allocated record is contained * Deal with all four cases: the allocated record is contained
* within the freespace record, so we can have new freespace * within the freespace record, so we can have new freespace
...@@ -333,7 +418,7 @@ xfs_alloc_fixup_trees( ...@@ -333,7 +418,7 @@ xfs_alloc_fixup_trees(
/* /*
* Delete the entry from the by-size btree. * Delete the entry from the by-size btree.
*/ */
if ((error = xfs_alloc_delete(cnt_cur, &i))) if ((error = xfs_btree_delete(cnt_cur, &i)))
return error; return error;
XFS_WANT_CORRUPTED_RETURN(i == 1); XFS_WANT_CORRUPTED_RETURN(i == 1);
/* /*
...@@ -343,7 +428,7 @@ xfs_alloc_fixup_trees( ...@@ -343,7 +428,7 @@ xfs_alloc_fixup_trees(
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i))) if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
return error; return error;
XFS_WANT_CORRUPTED_RETURN(i == 0); XFS_WANT_CORRUPTED_RETURN(i == 0);
if ((error = xfs_alloc_insert(cnt_cur, &i))) if ((error = xfs_btree_insert(cnt_cur, &i)))
return error; return error;
XFS_WANT_CORRUPTED_RETURN(i == 1); XFS_WANT_CORRUPTED_RETURN(i == 1);
} }
...@@ -351,7 +436,7 @@ xfs_alloc_fixup_trees( ...@@ -351,7 +436,7 @@ xfs_alloc_fixup_trees(
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i))) if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
return error; return error;
XFS_WANT_CORRUPTED_RETURN(i == 0); XFS_WANT_CORRUPTED_RETURN(i == 0);
if ((error = xfs_alloc_insert(cnt_cur, &i))) if ((error = xfs_btree_insert(cnt_cur, &i)))
return error; return error;
XFS_WANT_CORRUPTED_RETURN(i == 1); XFS_WANT_CORRUPTED_RETURN(i == 1);
} }
...@@ -362,7 +447,7 @@ xfs_alloc_fixup_trees( ...@@ -362,7 +447,7 @@ xfs_alloc_fixup_trees(
/* /*
* No remaining freespace, just delete the by-block tree entry. * No remaining freespace, just delete the by-block tree entry.
*/ */
if ((error = xfs_alloc_delete(bno_cur, &i))) if ((error = xfs_btree_delete(bno_cur, &i)))
return error; return error;
XFS_WANT_CORRUPTED_RETURN(i == 1); XFS_WANT_CORRUPTED_RETURN(i == 1);
} else { } else {
...@@ -379,7 +464,7 @@ xfs_alloc_fixup_trees( ...@@ -379,7 +464,7 @@ xfs_alloc_fixup_trees(
if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i))) if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
return error; return error;
XFS_WANT_CORRUPTED_RETURN(i == 0); XFS_WANT_CORRUPTED_RETURN(i == 0);
if ((error = xfs_alloc_insert(bno_cur, &i))) if ((error = xfs_btree_insert(bno_cur, &i)))
return error; return error;
XFS_WANT_CORRUPTED_RETURN(i == 1); XFS_WANT_CORRUPTED_RETURN(i == 1);
} }
...@@ -640,8 +725,8 @@ xfs_alloc_ag_vextent_exact( ...@@ -640,8 +725,8 @@ xfs_alloc_ag_vextent_exact(
/* /*
* Allocate/initialize a cursor for the by-number freespace btree. * Allocate/initialize a cursor for the by-number freespace btree.
*/ */
bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_BNO, NULL, 0); args->agno, XFS_BTNUM_BNO);
/* /*
* Lookup bno and minlen in the btree (minlen is irrelevant, really). * Lookup bno and minlen in the btree (minlen is irrelevant, really).
* Look for the closest free block <= bno, it must contain bno * Look for the closest free block <= bno, it must contain bno
...@@ -696,8 +781,8 @@ xfs_alloc_ag_vextent_exact( ...@@ -696,8 +781,8 @@ xfs_alloc_ag_vextent_exact(
* We are allocating agbno for rlen [agbno .. end] * We are allocating agbno for rlen [agbno .. end]
* Allocate/initialize a cursor for the by-size btree. * Allocate/initialize a cursor for the by-size btree.
*/ */
cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_CNT, NULL, 0); args->agno, XFS_BTNUM_CNT);
ASSERT(args->agbno + args->len <= ASSERT(args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
...@@ -759,8 +844,8 @@ xfs_alloc_ag_vextent_near( ...@@ -759,8 +844,8 @@ xfs_alloc_ag_vextent_near(
/* /*
* Get a cursor for the by-size btree. * Get a cursor for the by-size btree.
*/ */
cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_CNT, NULL, 0); args->agno, XFS_BTNUM_CNT);
ltlen = 0; ltlen = 0;
bno_cur_lt = bno_cur_gt = NULL; bno_cur_lt = bno_cur_gt = NULL;
/* /*
...@@ -818,7 +903,7 @@ xfs_alloc_ag_vextent_near( ...@@ -818,7 +903,7 @@ xfs_alloc_ag_vextent_near(
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if (ltlen >= args->minlen) if (ltlen >= args->minlen)
break; break;
if ((error = xfs_alloc_increment(cnt_cur, 0, &i))) if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
goto error0; goto error0;
} while (i); } while (i);
ASSERT(ltlen >= args->minlen); ASSERT(ltlen >= args->minlen);
...@@ -828,7 +913,7 @@ xfs_alloc_ag_vextent_near( ...@@ -828,7 +913,7 @@ xfs_alloc_ag_vextent_near(
i = cnt_cur->bc_ptrs[0]; i = cnt_cur->bc_ptrs[0];
for (j = 1, blen = 0, bdiff = 0; for (j = 1, blen = 0, bdiff = 0;
!error && j && (blen < args->maxlen || bdiff > 0); !error && j && (blen < args->maxlen || bdiff > 0);
error = xfs_alloc_increment(cnt_cur, 0, &j)) { error = xfs_btree_increment(cnt_cur, 0, &j)) {
/* /*
* For each entry, decide if it's better than * For each entry, decide if it's better than
* the previous best entry. * the previous best entry.
...@@ -886,8 +971,8 @@ xfs_alloc_ag_vextent_near( ...@@ -886,8 +971,8 @@ xfs_alloc_ag_vextent_near(
/* /*
* Set up a cursor for the by-bno tree. * Set up a cursor for the by-bno tree.
*/ */
bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
args->agbp, args->agno, XFS_BTNUM_BNO, NULL, 0); args->agbp, args->agno, XFS_BTNUM_BNO);
/* /*
* Fix up the btree entries. * Fix up the btree entries.
*/ */
...@@ -914,8 +999,8 @@ xfs_alloc_ag_vextent_near( ...@@ -914,8 +999,8 @@ xfs_alloc_ag_vextent_near(
/* /*
* Allocate and initialize the cursor for the leftward search. * Allocate and initialize the cursor for the leftward search.
*/ */
bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_BNO, NULL, 0); args->agno, XFS_BTNUM_BNO);
/* /*
* Lookup <= bno to find the leftward search's starting point. * Lookup <= bno to find the leftward search's starting point.
*/ */
...@@ -938,7 +1023,7 @@ xfs_alloc_ag_vextent_near( ...@@ -938,7 +1023,7 @@ xfs_alloc_ag_vextent_near(
* Increment the cursor, so we will point at the entry just right * Increment the cursor, so we will point at the entry just right
* of the leftward entry if any, or to the leftmost entry. * of the leftward entry if any, or to the leftmost entry.
*/ */
if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i))) if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
goto error0; goto error0;
if (!i) { if (!i) {
/* /*
...@@ -961,7 +1046,7 @@ xfs_alloc_ag_vextent_near( ...@@ -961,7 +1046,7 @@ xfs_alloc_ag_vextent_near(
args->minlen, &ltbnoa, &ltlena); args->minlen, &ltbnoa, &ltlena);
if (ltlena >= args->minlen) if (ltlena >= args->minlen)
break; break;
if ((error = xfs_alloc_decrement(bno_cur_lt, 0, &i))) if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
goto error0; goto error0;
if (!i) { if (!i) {
xfs_btree_del_cursor(bno_cur_lt, xfs_btree_del_cursor(bno_cur_lt,
...@@ -977,7 +1062,7 @@ xfs_alloc_ag_vextent_near( ...@@ -977,7 +1062,7 @@ xfs_alloc_ag_vextent_near(
args->minlen, &gtbnoa, &gtlena); args->minlen, &gtbnoa, &gtlena);
if (gtlena >= args->minlen) if (gtlena >= args->minlen)
break; break;
if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i))) if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
goto error0; goto error0;
if (!i) { if (!i) {
xfs_btree_del_cursor(bno_cur_gt, xfs_btree_del_cursor(bno_cur_gt,
...@@ -1066,7 +1151,7 @@ xfs_alloc_ag_vextent_near( ...@@ -1066,7 +1151,7 @@ xfs_alloc_ag_vextent_near(
/* /*
* Fell off the right end. * Fell off the right end.
*/ */
if ((error = xfs_alloc_increment( if ((error = xfs_btree_increment(
bno_cur_gt, 0, &i))) bno_cur_gt, 0, &i)))
goto error0; goto error0;
if (!i) { if (!i) {
...@@ -1162,7 +1247,7 @@ xfs_alloc_ag_vextent_near( ...@@ -1162,7 +1247,7 @@ xfs_alloc_ag_vextent_near(
/* /*
* Fell off the left end. * Fell off the left end.
*/ */
if ((error = xfs_alloc_decrement( if ((error = xfs_btree_decrement(
bno_cur_lt, 0, &i))) bno_cur_lt, 0, &i)))
goto error0; goto error0;
if (!i) { if (!i) {
...@@ -1267,8 +1352,8 @@ xfs_alloc_ag_vextent_size( ...@@ -1267,8 +1352,8 @@ xfs_alloc_ag_vextent_size(
/* /*
* Allocate and initialize a cursor for the by-size btree. * Allocate and initialize a cursor for the by-size btree.
*/ */
cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_CNT, NULL, 0); args->agno, XFS_BTNUM_CNT);
bno_cur = NULL; bno_cur = NULL;
/* /*
* Look for an entry >= maxlen+alignment-1 blocks. * Look for an entry >= maxlen+alignment-1 blocks.
...@@ -1321,7 +1406,7 @@ xfs_alloc_ag_vextent_size( ...@@ -1321,7 +1406,7 @@ xfs_alloc_ag_vextent_size(
bestflen = flen; bestflen = flen;
bestfbno = fbno; bestfbno = fbno;
for (;;) { for (;;) {
if ((error = xfs_alloc_decrement(cnt_cur, 0, &i))) if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
goto error0; goto error0;
if (i == 0) if (i == 0)
break; break;
...@@ -1372,8 +1457,8 @@ xfs_alloc_ag_vextent_size( ...@@ -1372,8 +1457,8 @@ xfs_alloc_ag_vextent_size(
/* /*
* Allocate and initialize a cursor for the by-block tree. * Allocate and initialize a cursor for the by-block tree.
*/ */
bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_BNO, NULL, 0); args->agno, XFS_BTNUM_BNO);
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
rbno, rlen, XFSA_FIXUP_CNT_OK))) rbno, rlen, XFSA_FIXUP_CNT_OK)))
goto error0; goto error0;
...@@ -1416,7 +1501,7 @@ xfs_alloc_ag_vextent_small( ...@@ -1416,7 +1501,7 @@ xfs_alloc_ag_vextent_small(
xfs_extlen_t flen; xfs_extlen_t flen;
int i; int i;
if ((error = xfs_alloc_decrement(ccur, 0, &i))) if ((error = xfs_btree_decrement(ccur, 0, &i)))
goto error0; goto error0;
if (i) { if (i) {
if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i))) if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
...@@ -1515,8 +1600,7 @@ xfs_free_ag_extent( ...@@ -1515,8 +1600,7 @@ xfs_free_ag_extent(
/* /*
* Allocate and initialize a cursor for the by-block btree. * Allocate and initialize a cursor for the by-block btree.
*/ */
bno_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO, NULL, bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
0);
cnt_cur = NULL; cnt_cur = NULL;
/* /*
* Look for a neighboring block on the left (lower block numbers) * Look for a neighboring block on the left (lower block numbers)
...@@ -1549,7 +1633,7 @@ xfs_free_ag_extent( ...@@ -1549,7 +1633,7 @@ xfs_free_ag_extent(
* Look for a neighboring block on the right (higher block numbers) * Look for a neighboring block on the right (higher block numbers)
* that is contiguous with this space. * that is contiguous with this space.
*/ */
if ((error = xfs_alloc_increment(bno_cur, 0, &haveright))) if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
goto error0; goto error0;
if (haveright) { if (haveright) {
/* /*
...@@ -1575,8 +1659,7 @@ xfs_free_ag_extent( ...@@ -1575,8 +1659,7 @@ xfs_free_ag_extent(
/* /*
* Now allocate and initialize a cursor for the by-size tree. * Now allocate and initialize a cursor for the by-size tree.
*/ */
cnt_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT, NULL, cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
0);
/* /*
* Have both left and right contiguous neighbors. * Have both left and right contiguous neighbors.
* Merge all three into a single free block. * Merge all three into a single free block.
...@@ -1588,7 +1671,7 @@ xfs_free_ag_extent( ...@@ -1588,7 +1671,7 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if ((error = xfs_alloc_delete(cnt_cur, &i))) if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/* /*
...@@ -1597,19 +1680,19 @@ xfs_free_ag_extent( ...@@ -1597,19 +1680,19 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if ((error = xfs_alloc_delete(cnt_cur, &i))) if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/* /*
* Delete the old by-block entry for the right block. * Delete the old by-block entry for the right block.
*/ */
if ((error = xfs_alloc_delete(bno_cur, &i))) if ((error = xfs_btree_delete(bno_cur, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/* /*
* Move the by-block cursor back to the left neighbor. * Move the by-block cursor back to the left neighbor.
*/ */
if ((error = xfs_alloc_decrement(bno_cur, 0, &i))) if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
#ifdef DEBUG #ifdef DEBUG
...@@ -1648,14 +1731,14 @@ xfs_free_ag_extent( ...@@ -1648,14 +1731,14 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if ((error = xfs_alloc_delete(cnt_cur, &i))) if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/* /*
* Back up the by-block cursor to the left neighbor, and * Back up the by-block cursor to the left neighbor, and
* update its length. * update its length.
*/ */
if ((error = xfs_alloc_decrement(bno_cur, 0, &i))) if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
nbno = ltbno; nbno = ltbno;
...@@ -1674,7 +1757,7 @@ xfs_free_ag_extent( ...@@ -1674,7 +1757,7 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if ((error = xfs_alloc_delete(cnt_cur, &i))) if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/* /*
...@@ -1693,7 +1776,7 @@ xfs_free_ag_extent( ...@@ -1693,7 +1776,7 @@ xfs_free_ag_extent(
else { else {
nbno = bno; nbno = bno;
nlen = len; nlen = len;
if ((error = xfs_alloc_insert(bno_cur, &i))) if ((error = xfs_btree_insert(bno_cur, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
} }
...@@ -1705,7 +1788,7 @@ xfs_free_ag_extent( ...@@ -1705,7 +1788,7 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i))) if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 0, error0); XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
if ((error = xfs_alloc_insert(cnt_cur, &i))) if ((error = xfs_btree_insert(cnt_cur, &i)))
goto error0; goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0); XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
...@@ -2188,6 +2271,9 @@ xfs_alloc_read_agf( ...@@ -2188,6 +2271,9 @@ xfs_alloc_read_agf(
be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) && be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp); be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp);
if (xfs_sb_version_haslazysbcount(&mp->m_sb))
agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <=
be32_to_cpu(agf->agf_length);
if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF, if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
XFS_RANDOM_ALLOC_READ_AGF))) { XFS_RANDOM_ALLOC_READ_AGF))) {
XFS_CORRUPTION_ERROR("xfs_alloc_read_agf", XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
...@@ -2213,6 +2299,7 @@ xfs_alloc_read_agf( ...@@ -2213,6 +2299,7 @@ xfs_alloc_read_agf(
#ifdef DEBUG #ifdef DEBUG
else if (!XFS_FORCED_SHUTDOWN(mp)) { else if (!XFS_FORCED_SHUTDOWN(mp)) {
ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks)); ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount)); ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest)); ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] == ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
......
...@@ -121,6 +121,19 @@ extern ktrace_t *xfs_alloc_trace_buf; ...@@ -121,6 +121,19 @@ extern ktrace_t *xfs_alloc_trace_buf;
#define XFS_ALLOC_KTRACE_BUSYSEARCH 6 #define XFS_ALLOC_KTRACE_BUSYSEARCH 6
#endif #endif
void
xfs_alloc_mark_busy(xfs_trans_t *tp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
xfs_extlen_t len);
void
xfs_alloc_clear_busy(xfs_trans_t *tp,
xfs_agnumber_t ag,
int idx);
#endif /* __KERNEL__ */
/* /*
* Compute and fill in value of m_ag_maxlevels. * Compute and fill in value of m_ag_maxlevels.
*/ */
...@@ -196,18 +209,4 @@ xfs_free_extent( ...@@ -196,18 +209,4 @@ xfs_free_extent(
xfs_fsblock_t bno, /* starting block number of extent */ xfs_fsblock_t bno, /* starting block number of extent */
xfs_extlen_t len); /* length of extent */ xfs_extlen_t len); /* length of extent */
void
xfs_alloc_mark_busy(xfs_trans_t *tp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
xfs_extlen_t len);
void
xfs_alloc_clear_busy(xfs_trans_t *tp,
xfs_agnumber_t ag,
int idx);
#endif /* __KERNEL__ */
#endif /* __XFS_ALLOC_H__ */ #endif /* __XFS_ALLOC_H__ */
此差异已折叠。
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
struct xfs_buf; struct xfs_buf;
struct xfs_btree_cur; struct xfs_btree_cur;
struct xfs_btree_sblock;
struct xfs_mount; struct xfs_mount;
/* /*
...@@ -50,16 +49,6 @@ typedef struct xfs_alloc_rec_incore { ...@@ -50,16 +49,6 @@ typedef struct xfs_alloc_rec_incore {
/* btree pointer type */ /* btree pointer type */
typedef __be32 xfs_alloc_ptr_t; typedef __be32 xfs_alloc_ptr_t;
/* btree block header type */
typedef struct xfs_btree_sblock xfs_alloc_block_t;
#define XFS_BUF_TO_ALLOC_BLOCK(bp) ((xfs_alloc_block_t *)XFS_BUF_PTR(bp))
/*
* Real block structures have a size equal to the disk block size.
*/
#define XFS_ALLOC_BLOCK_MAXRECS(lev,cur) ((cur)->bc_mp->m_alloc_mxr[lev != 0])
#define XFS_ALLOC_BLOCK_MINRECS(lev,cur) ((cur)->bc_mp->m_alloc_mnr[lev != 0])
/* /*
* Minimum and maximum blocksize and sectorsize. * Minimum and maximum blocksize and sectorsize.
...@@ -83,73 +72,39 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t; ...@@ -83,73 +72,39 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t;
#define XFS_CNT_BLOCK(mp) ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1)) #define XFS_CNT_BLOCK(mp) ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
/* /*
* Record, key, and pointer address macros for btree blocks. * Btree block header size depends on a superblock flag.
*/ *
#define XFS_ALLOC_REC_ADDR(bb,i,cur) \ * (not quite yet, but soon)
XFS_BTREE_REC_ADDR(xfs_alloc, bb, i)
#define XFS_ALLOC_KEY_ADDR(bb,i,cur) \
XFS_BTREE_KEY_ADDR(xfs_alloc, bb, i)
#define XFS_ALLOC_PTR_ADDR(bb,i,cur) \
XFS_BTREE_PTR_ADDR(xfs_alloc, bb, i, XFS_ALLOC_BLOCK_MAXRECS(1, cur))
/*
* Decrement cursor by one record at the level.
* For nonzero levels the leaf-ward information is untouched.
*/
extern int xfs_alloc_decrement(struct xfs_btree_cur *cur, int level, int *stat);
/*
* Delete the record pointed to by cur.
* The cursor refers to the place where the record was (could be inserted)
* when the operation returns.
*/
extern int xfs_alloc_delete(struct xfs_btree_cur *cur, int *stat);
/*
* Get the data from the pointed-to record.
*/
extern int xfs_alloc_get_rec(struct xfs_btree_cur *cur, xfs_agblock_t *bno,
xfs_extlen_t *len, int *stat);
/*
* Increment cursor by one record at the level.
* For nonzero levels the leaf-ward information is untouched.
*/
extern int xfs_alloc_increment(struct xfs_btree_cur *cur, int level, int *stat);
/*
* Insert the current record at the point referenced by cur.
* The cursor may be inconsistent on return if splits have been done.
*/
extern int xfs_alloc_insert(struct xfs_btree_cur *cur, int *stat);
/*
* Lookup the record equal to [bno, len] in the btree given by cur.
*/
extern int xfs_alloc_lookup_eq(struct xfs_btree_cur *cur, xfs_agblock_t bno,
xfs_extlen_t len, int *stat);
/*
* Lookup the first record greater than or equal to [bno, len]
* in the btree given by cur.
*/
extern int xfs_alloc_lookup_ge(struct xfs_btree_cur *cur, xfs_agblock_t bno,
xfs_extlen_t len, int *stat);
/*
* Lookup the first record less than or equal to [bno, len]
* in the btree given by cur.
*/ */
extern int xfs_alloc_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno, #define XFS_ALLOC_BLOCK_LEN(mp) XFS_BTREE_SBLOCK_LEN
xfs_extlen_t len, int *stat);
/* /*
* Update the record referred to by cur, to the value given by [bno, len]. * Record, key, and pointer address macros for btree blocks.
* This either works (return 0) or gets an EFSCORRUPTED error. *
*/ * (note that some of these may appear unused, but they are used in userspace)
extern int xfs_alloc_update(struct xfs_btree_cur *cur, xfs_agblock_t bno, */
xfs_extlen_t len); #define XFS_ALLOC_REC_ADDR(mp, block, index) \
((xfs_alloc_rec_t *) \
((char *)(block) + \
XFS_ALLOC_BLOCK_LEN(mp) + \
(((index) - 1) * sizeof(xfs_alloc_rec_t))))
#define XFS_ALLOC_KEY_ADDR(mp, block, index) \
((xfs_alloc_key_t *) \
((char *)(block) + \
XFS_ALLOC_BLOCK_LEN(mp) + \
((index) - 1) * sizeof(xfs_alloc_key_t)))
#define XFS_ALLOC_PTR_ADDR(mp, block, index, maxrecs) \
((xfs_alloc_ptr_t *) \
((char *)(block) + \
XFS_ALLOC_BLOCK_LEN(mp) + \
(maxrecs) * sizeof(xfs_alloc_key_t) + \
((index) - 1) * sizeof(xfs_alloc_ptr_t)))
extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_buf *,
xfs_agnumber_t, xfs_btnum_t);
extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
#endif /* __XFS_ALLOC_BTREE_H__ */ #endif /* __XFS_ALLOC_BTREE_H__ */
...@@ -41,21 +41,36 @@ ...@@ -41,21 +41,36 @@
#endif #endif
#ifdef XFS_NATIVE_HOST #ifdef XFS_NATIVE_HOST
#define cpu_to_be16(val) ((__be16)(val)) #define cpu_to_be16(val) ((__force __be16)(__u16)(val))
#define cpu_to_be32(val) ((__be32)(val)) #define cpu_to_be32(val) ((__force __be32)(__u32)(val))
#define cpu_to_be64(val) ((__be64)(val)) #define cpu_to_be64(val) ((__force __be64)(__u64)(val))
#define be16_to_cpu(val) ((__uint16_t)(val)) #define be16_to_cpu(val) ((__force __u16)(__be16)(val))
#define be32_to_cpu(val) ((__uint32_t)(val)) #define be32_to_cpu(val) ((__force __u32)(__be32)(val))
#define be64_to_cpu(val) ((__uint64_t)(val)) #define be64_to_cpu(val) ((__force __u64)(__be64)(val))
#else #else
#define cpu_to_be16(val) (__swab16((__uint16_t)(val))) #define cpu_to_be16(val) ((__force __be16)__swab16((__u16)(val)))
#define cpu_to_be32(val) (__swab32((__uint32_t)(val))) #define cpu_to_be32(val) ((__force __be32)__swab32((__u32)(val)))
#define cpu_to_be64(val) (__swab64((__uint64_t)(val))) #define cpu_to_be64(val) ((__force __be64)__swab64((__u64)(val)))
#define be16_to_cpu(val) (__swab16((__be16)(val))) #define be16_to_cpu(val) (__swab16((__force __u16)(__be16)(val)))
#define be32_to_cpu(val) (__swab32((__be32)(val))) #define be32_to_cpu(val) (__swab32((__force __u32)(__be32)(val)))
#define be64_to_cpu(val) (__swab64((__be64)(val))) #define be64_to_cpu(val) (__swab64((__force __u64)(__be64)(val)))
#endif #endif
static inline void be16_add_cpu(__be16 *a, __s16 b)
{
*a = cpu_to_be16(be16_to_cpu(*a) + b);
}
static inline void be32_add_cpu(__be32 *a, __s32 b)
{
*a = cpu_to_be32(be32_to_cpu(*a) + b);
}
static inline void be64_add_cpu(__be64 *a, __s64 b)
{
*a = cpu_to_be64(be64_to_cpu(*a) + b);
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
/* do we need conversion? */ /* do we need conversion? */
......
...@@ -61,8 +61,7 @@ static inline int xfs_highbit64(__uint64_t v) ...@@ -61,8 +61,7 @@ static inline int xfs_highbit64(__uint64_t v)
/* Get low bit set out of 32-bit argument, -1 if none set */ /* Get low bit set out of 32-bit argument, -1 if none set */
static inline int xfs_lowbit32(__uint32_t v) static inline int xfs_lowbit32(__uint32_t v)
{ {
unsigned long t = v; return ffs(v) - 1;
return (v) ? find_first_bit(&t, 32) : -1;
} }
/* Get low bit set out of 64-bit argument, -1 if none set */ /* Get low bit set out of 64-bit argument, -1 if none set */
......
此差异已折叠。
...@@ -137,9 +137,7 @@ typedef struct xfs_bmalloca { ...@@ -137,9 +137,7 @@ typedef struct xfs_bmalloca {
char conv; /* overwriting unwritten extents */ char conv; /* overwriting unwritten extents */
} xfs_bmalloca_t; } xfs_bmalloca_t;
#ifdef __KERNEL__ #if defined(__KERNEL__) && defined(XFS_BMAP_TRACE)
#if defined(XFS_BMAP_TRACE)
/* /*
* Trace operations for bmap extent tracing * Trace operations for bmap extent tracing
*/ */
...@@ -163,9 +161,12 @@ xfs_bmap_trace_exlist( ...@@ -163,9 +161,12 @@ xfs_bmap_trace_exlist(
int whichfork); /* data or attr fork */ int whichfork); /* data or attr fork */
#define XFS_BMAP_TRACE_EXLIST(ip,c,w) \ #define XFS_BMAP_TRACE_EXLIST(ip,c,w) \
xfs_bmap_trace_exlist(__func__,ip,c,w) xfs_bmap_trace_exlist(__func__,ip,c,w)
#else
#else /* __KERNEL__ && XFS_BMAP_TRACE */
#define XFS_BMAP_TRACE_EXLIST(ip,c,w) #define XFS_BMAP_TRACE_EXLIST(ip,c,w)
#endif
#endif /* __KERNEL__ && XFS_BMAP_TRACE */
/* /*
* Convert inode from non-attributed to attributed. * Convert inode from non-attributed to attributed.
...@@ -205,20 +206,6 @@ xfs_bmap_compute_maxlevels( ...@@ -205,20 +206,6 @@ xfs_bmap_compute_maxlevels(
struct xfs_mount *mp, /* file system mount structure */ struct xfs_mount *mp, /* file system mount structure */
int whichfork); /* data or attr fork */ int whichfork); /* data or attr fork */
/*
* Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
* caller. Frees all the extents that need freeing, which must be done
* last due to locking considerations.
*
* Return 1 if the given transaction was committed and a new one allocated,
* and 0 otherwise.
*/
int /* error */
xfs_bmap_finish(
struct xfs_trans **tp, /* transaction pointer addr */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
int *committed); /* xact committed or not */
/* /*
* Returns the file-relative block number of the first unused block in the file. * Returns the file-relative block number of the first unused block in the file.
* This is the lowest-address hole if the file has holes, else the first block * This is the lowest-address hole if the file has holes, else the first block
...@@ -343,6 +330,32 @@ xfs_bunmapi( ...@@ -343,6 +330,32 @@ xfs_bunmapi(
extents */ extents */
int *done); /* set if not done yet */ int *done); /* set if not done yet */
/*
* Check an extent list, which has just been read, for
* any bit in the extent flag field.
*/
int
xfs_check_nostate_extents(
struct xfs_ifork *ifp,
xfs_extnum_t idx,
xfs_extnum_t num);
#ifdef __KERNEL__
/*
* Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
* caller. Frees all the extents that need freeing, which must be done
* last due to locking considerations.
*
* Return 1 if the given transaction was committed and a new one allocated,
* and 0 otherwise.
*/
int /* error */
xfs_bmap_finish(
struct xfs_trans **tp, /* transaction pointer addr */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
int *committed); /* xact committed or not */
/* /*
* Fcntl interface to xfs_bmapi. * Fcntl interface to xfs_bmapi.
*/ */
...@@ -374,16 +387,6 @@ xfs_bmap_count_blocks( ...@@ -374,16 +387,6 @@ xfs_bmap_count_blocks(
int whichfork, int whichfork,
int *count); int *count);
/*
* Check an extent list, which has just been read, for
* any bit in the extent flag field.
*/
int
xfs_check_nostate_extents(
struct xfs_ifork *ifp,
xfs_extnum_t idx,
xfs_extnum_t num);
/* /*
* Search the extent records for the entry containing block bno. * Search the extent records for the entry containing block bno.
* If bno lies in a hole, point to the next entry. If bno lies * If bno lies in a hole, point to the next entry. If bno lies
......
此差异已折叠。
...@@ -21,9 +21,10 @@ ...@@ -21,9 +21,10 @@
#define XFS_BMAP_MAGIC 0x424d4150 /* 'BMAP' */ #define XFS_BMAP_MAGIC 0x424d4150 /* 'BMAP' */
struct xfs_btree_cur; struct xfs_btree_cur;
struct xfs_btree_lblock; struct xfs_btree_block;
struct xfs_mount; struct xfs_mount;
struct xfs_inode; struct xfs_inode;
struct xfs_trans;
/* /*
* Bmap root header, on-disk form only. * Bmap root header, on-disk form only.
...@@ -145,71 +146,60 @@ typedef struct xfs_bmbt_key { ...@@ -145,71 +146,60 @@ typedef struct xfs_bmbt_key {
/* btree pointer type */ /* btree pointer type */
typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t; typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
/* btree block header type */ /*
typedef struct xfs_btree_lblock xfs_bmbt_block_t; * Btree block header size depends on a superblock flag.
*
#define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)XFS_BUF_PTR(bp)) * (not quite yet, but soon)
*/
#define XFS_BMAP_RBLOCK_DSIZE(lev,cur) ((cur)->bc_private.b.forksize) #define XFS_BMBT_BLOCK_LEN(mp) XFS_BTREE_LBLOCK_LEN
#define XFS_BMAP_RBLOCK_ISIZE(lev,cur) \
((int)XFS_IFORK_PTR((cur)->bc_private.b.ip, \ #define XFS_BMBT_REC_ADDR(mp, block, index) \
(cur)->bc_private.b.whichfork)->if_broot_bytes) ((xfs_bmbt_rec_t *) \
((char *)(block) + \
#define XFS_BMAP_BLOCK_DMAXRECS(lev,cur) \ XFS_BMBT_BLOCK_LEN(mp) + \
(((lev) == (cur)->bc_nlevels - 1 ? \ ((index) - 1) * sizeof(xfs_bmbt_rec_t)))
XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur), \
xfs_bmdr, (lev) == 0) : \ #define XFS_BMBT_KEY_ADDR(mp, block, index) \
((cur)->bc_mp->m_bmap_dmxr[(lev) != 0]))) ((xfs_bmbt_key_t *) \
#define XFS_BMAP_BLOCK_IMAXRECS(lev,cur) \ ((char *)(block) + \
(((lev) == (cur)->bc_nlevels - 1 ? \ XFS_BMBT_BLOCK_LEN(mp) + \
XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur),\ ((index) - 1) * sizeof(xfs_bmbt_key_t)))
xfs_bmbt, (lev) == 0) : \
((cur)->bc_mp->m_bmap_dmxr[(lev) != 0]))) #define XFS_BMBT_PTR_ADDR(mp, block, index, maxrecs) \
((xfs_bmbt_ptr_t *) \
#define XFS_BMAP_BLOCK_DMINRECS(lev,cur) \ ((char *)(block) + \
(((lev) == (cur)->bc_nlevels - 1 ? \ XFS_BMBT_BLOCK_LEN(mp) + \
XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur),\ (maxrecs) * sizeof(xfs_bmbt_key_t) + \
xfs_bmdr, (lev) == 0) : \ ((index) - 1) * sizeof(xfs_bmbt_ptr_t)))
((cur)->bc_mp->m_bmap_dmnr[(lev) != 0])))
#define XFS_BMAP_BLOCK_IMINRECS(lev,cur) \ #define XFS_BMDR_REC_ADDR(block, index) \
(((lev) == (cur)->bc_nlevels - 1 ? \ ((xfs_bmdr_rec_t *) \
XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur),\ ((char *)(block) + \
xfs_bmbt, (lev) == 0) : \ sizeof(struct xfs_bmdr_block) + \
((cur)->bc_mp->m_bmap_dmnr[(lev) != 0]))) ((index) - 1) * sizeof(xfs_bmdr_rec_t)))
#define XFS_BMAP_REC_DADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i)) #define XFS_BMDR_KEY_ADDR(block, index) \
((xfs_bmdr_key_t *) \
#define XFS_BMAP_REC_IADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i)) ((char *)(block) + \
sizeof(struct xfs_bmdr_block) + \
#define XFS_BMAP_KEY_DADDR(bb,i,cur) \ ((index) - 1) * sizeof(xfs_bmdr_key_t)))
(XFS_BTREE_KEY_ADDR(xfs_bmbt, bb, i))
#define XFS_BMDR_PTR_ADDR(block, index, maxrecs) \
#define XFS_BMAP_KEY_IADDR(bb,i,cur) \ ((xfs_bmdr_ptr_t *) \
(XFS_BTREE_KEY_ADDR(xfs_bmbt, bb, i)) ((char *)(block) + \
sizeof(struct xfs_bmdr_block) + \
#define XFS_BMAP_PTR_DADDR(bb,i,cur) \ (maxrecs) * sizeof(xfs_bmdr_key_t) + \
(XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ ((index) - 1) * sizeof(xfs_bmdr_ptr_t)))
be16_to_cpu((bb)->bb_level), cur)))
#define XFS_BMAP_PTR_IADDR(bb,i,cur) \
(XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \
be16_to_cpu((bb)->bb_level), cur)))
/* /*
* These are to be used when we know the size of the block and * These are to be used when we know the size of the block and
* we don't have a cursor. * we don't have a cursor.
*/ */
#define XFS_BMAP_BROOT_REC_ADDR(bb,i,sz) \ #define XFS_BMAP_BROOT_PTR_ADDR(mp, bb, i, sz) \
(XFS_BTREE_REC_ADDR(xfs_bmbt,bb,i)) XFS_BMBT_PTR_ADDR(mp, bb, i, xfs_bmbt_maxrecs(mp, sz, 0))
#define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz) \
(XFS_BTREE_KEY_ADDR(xfs_bmbt,bb,i))
#define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \
(XFS_BTREE_PTR_ADDR(xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)))
#define XFS_BMAP_BROOT_NUMRECS(bb) be16_to_cpu((bb)->bb_numrecs)
#define XFS_BMAP_BROOT_MAXRECS(sz) XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0)
#define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \ #define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \
(int)(sizeof(xfs_bmbt_block_t) + \ (int)(XFS_BTREE_LBLOCK_LEN + \
((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))) ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))
#define XFS_BMAP_BROOT_SPACE(bb) \ #define XFS_BMAP_BROOT_SPACE(bb) \
...@@ -223,42 +213,12 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; ...@@ -223,42 +213,12 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t;
*/ */
#define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[(w)]) #define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[(w)])
#define XFS_BMAP_SANITY_CHECK(mp,bb,level) \
(be32_to_cpu((bb)->bb_magic) == XFS_BMAP_MAGIC && \
be16_to_cpu((bb)->bb_level) == level && \
be16_to_cpu((bb)->bb_numrecs) > 0 && \
be16_to_cpu((bb)->bb_numrecs) <= (mp)->m_bmap_dmxr[(level) != 0])
#ifdef __KERNEL__
#if defined(XFS_BMBT_TRACE)
/*
* Trace buffer entry types.
*/
#define XFS_BMBT_KTRACE_ARGBI 1
#define XFS_BMBT_KTRACE_ARGBII 2
#define XFS_BMBT_KTRACE_ARGFFFI 3
#define XFS_BMBT_KTRACE_ARGI 4
#define XFS_BMBT_KTRACE_ARGIFK 5
#define XFS_BMBT_KTRACE_ARGIFR 6
#define XFS_BMBT_KTRACE_ARGIK 7
#define XFS_BMBT_KTRACE_CUR 8
#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */
#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */
extern ktrace_t *xfs_bmbt_trace_buf;
#endif
/* /*
* Prototypes for xfs_bmap.c to call. * Prototypes for xfs_bmap.c to call.
*/ */
extern void xfs_bmdr_to_bmbt(xfs_bmdr_block_t *, int, xfs_bmbt_block_t *, int); extern void xfs_bmdr_to_bmbt(struct xfs_mount *, xfs_bmdr_block_t *, int,
extern int xfs_bmbt_decrement(struct xfs_btree_cur *, int, int *); struct xfs_btree_block *, int);
extern int xfs_bmbt_delete(struct xfs_btree_cur *, int *);
extern void xfs_bmbt_get_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s); extern void xfs_bmbt_get_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s);
extern xfs_bmbt_block_t *xfs_bmbt_get_block(struct xfs_btree_cur *cur,
int, struct xfs_buf **bpp);
extern xfs_filblks_t xfs_bmbt_get_blockcount(xfs_bmbt_rec_host_t *r); extern xfs_filblks_t xfs_bmbt_get_blockcount(xfs_bmbt_rec_host_t *r);
extern xfs_fsblock_t xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t *r); extern xfs_fsblock_t xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t *r);
extern xfs_fileoff_t xfs_bmbt_get_startoff(xfs_bmbt_rec_host_t *r); extern xfs_fileoff_t xfs_bmbt_get_startoff(xfs_bmbt_rec_host_t *r);
...@@ -268,22 +228,6 @@ extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s); ...@@ -268,22 +228,6 @@ extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r); extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r);
extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r); extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r);
extern int xfs_bmbt_increment(struct xfs_btree_cur *, int, int *);
extern int xfs_bmbt_insert(struct xfs_btree_cur *, int *);
extern void xfs_bmbt_log_block(struct xfs_btree_cur *, struct xfs_buf *, int);
extern void xfs_bmbt_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int,
int);
extern int xfs_bmbt_lookup_eq(struct xfs_btree_cur *, xfs_fileoff_t,
xfs_fsblock_t, xfs_filblks_t, int *);
extern int xfs_bmbt_lookup_ge(struct xfs_btree_cur *, xfs_fileoff_t,
xfs_fsblock_t, xfs_filblks_t, int *);
/*
* Give the bmap btree a new root block. Copy the old broot contents
* down into a real block and make the broot point to it.
*/
extern int xfs_bmbt_newroot(struct xfs_btree_cur *cur, int *lflags, int *stat);
extern void xfs_bmbt_set_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s); extern void xfs_bmbt_set_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s);
extern void xfs_bmbt_set_allf(xfs_bmbt_rec_host_t *r, xfs_fileoff_t o, extern void xfs_bmbt_set_allf(xfs_bmbt_rec_host_t *r, xfs_fileoff_t o,
xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v); xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v);
...@@ -296,10 +240,15 @@ extern void xfs_bmbt_disk_set_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s); ...@@ -296,10 +240,15 @@ extern void xfs_bmbt_disk_set_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o, extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o,
xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v); xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v);
extern void xfs_bmbt_to_bmdr(xfs_bmbt_block_t *, int, xfs_bmdr_block_t *, int); extern void xfs_bmbt_to_bmdr(struct xfs_mount *, struct xfs_btree_block *, int,
extern int xfs_bmbt_update(struct xfs_btree_cur *, xfs_fileoff_t, xfs_bmdr_block_t *, int);
xfs_fsblock_t, xfs_filblks_t, xfs_exntst_t);
extern int xfs_bmbt_get_maxrecs(struct xfs_btree_cur *, int level);
extern int xfs_bmdr_maxrecs(struct xfs_mount *, int blocklen, int leaf);
extern int xfs_bmbt_maxrecs(struct xfs_mount *, int blocklen, int leaf);
extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_inode *, int);
#endif /* __KERNEL__ */
#endif /* __XFS_BMAP_BTREE_H__ */ #endif /* __XFS_BMAP_BTREE_H__ */
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -1566,11 +1566,14 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) ...@@ -1566,11 +1566,14 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
int nmap, error, w, count, c, got, i, mapi; int nmap, error, w, count, c, got, i, mapi;
xfs_trans_t *tp; xfs_trans_t *tp;
xfs_mount_t *mp; xfs_mount_t *mp;
xfs_drfsbno_t nblks;
dp = args->dp; dp = args->dp;
mp = dp->i_mount; mp = dp->i_mount;
w = args->whichfork; w = args->whichfork;
tp = args->trans; tp = args->trans;
nblks = dp->i_d.di_nblocks;
/* /*
* For new directories adjust the file offset and block count. * For new directories adjust the file offset and block count.
*/ */
...@@ -1647,6 +1650,8 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) ...@@ -1647,6 +1650,8 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
} }
if (mapp != &map) if (mapp != &map)
kmem_free(mapp); kmem_free(mapp);
/* account for newly allocated blocks in reserved blocks total */
args->total -= dp->i_d.di_nblocks - nblks;
*new_blkno = (xfs_dablk_t)bno; *new_blkno = (xfs_dablk_t)bno;
return 0; return 0;
} }
......
此差异已折叠。
...@@ -78,8 +78,7 @@ typedef struct xfs_dinode ...@@ -78,8 +78,7 @@ typedef struct xfs_dinode
xfs_dinode_core_t di_core; xfs_dinode_core_t di_core;
/* /*
* In adding anything between the core and the union, be * In adding anything between the core and the union, be
* sure to update the macros like XFS_LITINO below and * sure to update the macros like XFS_LITINO below.
* XFS_BMAP_RBLOCK_DSIZE in xfs_bmap_btree.h.
*/ */
__be32 di_next_unlinked;/* agi unlinked list ptr */ __be32 di_next_unlinked;/* agi unlinked list ptr */
union { union {
...@@ -166,7 +165,7 @@ typedef enum xfs_dinode_fmt ...@@ -166,7 +165,7 @@ typedef enum xfs_dinode_fmt
*/ */
#define XFS_LITINO(mp) ((mp)->m_litino) #define XFS_LITINO(mp) ((mp)->m_litino)
#define XFS_BROOT_SIZE_ADJ \ #define XFS_BROOT_SIZE_ADJ \
(sizeof(xfs_bmbt_block_t) - sizeof(xfs_bmdr_block_t)) (XFS_BTREE_LBLOCK_LEN - sizeof(xfs_bmdr_block_t))
/* /*
* Inode data & attribute fork sizes, per inode. * Inode data & attribute fork sizes, per inode.
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -30,11 +30,9 @@ typedef struct xfs_imap { ...@@ -30,11 +30,9 @@ typedef struct xfs_imap {
ushort im_boffset; /* inode offset in block in bytes */ ushort im_boffset; /* inode offset in block in bytes */
} xfs_imap_t; } xfs_imap_t;
#ifdef __KERNEL__
struct xfs_mount; struct xfs_mount;
struct xfs_trans; struct xfs_trans;
int xfs_imap(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, int xfs_imap(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
xfs_imap_t *, uint); xfs_imap_t *, uint);
#endif
#endif /* __XFS_IMAP_H__ */ #endif /* __XFS_IMAP_H__ */
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册