提交 6a94cb73 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs: (184 commits)
  [XFS] Fix race in xfs_write() between direct and buffered I/O with DMAPI
  [XFS] handle unaligned data in xfs_bmbt_disk_get_all
  [XFS] avoid memory allocations in xfs_fs_vcmn_err
  [XFS] Fix speculative allocation beyond eof
  [XFS] Remove XFS_BUF_SHUT() and friends
  [XFS] Use the incore inode size in xfs_file_readdir()
  [XFS] set b_error from bio error in xfs_buf_bio_end_io
  [XFS] use inode_change_ok for setattr permission checking
  [XFS] add a FMODE flag to make XFS invisible I/O less hacky
  [XFS] resync headers with libxfs
  [XFS] simplify projid check in xfs_rename
  [XFS] replace b_fspriv with b_mount
  [XFS] Remove unused tracing code
  [XFS] Remove unnecessary assertion
  [XFS] Remove unused variable in ktrace_free()
  [XFS] Check return value of xfs_buf_get_noaddr()
  [XFS] Fix hang after disallowed rename across directory quota domains
  [XFS] Fix compile with CONFIG_COMPAT enabled
  move inode tracing out of xfs_vnode.
  move vn_iowait / vn_iowake into xfs_aops.c
  ...
...@@ -229,10 +229,6 @@ The following sysctls are available for the XFS filesystem: ...@@ -229,10 +229,6 @@ The following sysctls are available for the XFS filesystem:
ISGID bit is cleared if the irix_sgid_inherit compatibility sysctl ISGID bit is cleared if the irix_sgid_inherit compatibility sysctl
is set. is set.
fs.xfs.restrict_chown (Min: 0 Default: 1 Max: 1)
Controls whether unprivileged users can use chown to "give away"
a file to another user.
fs.xfs.inherit_sync (Min: 0 Default: 1 Max: 1) fs.xfs.inherit_sync (Min: 0 Default: 1 Max: 1)
Setting this to "1" will cause the "sync" flag set Setting this to "1" will cause the "sync" flag set
by the xfs_io(8) chattr command on a directory to be by the xfs_io(8) chattr command on a directory to be
......
...@@ -108,19 +108,20 @@ static void wake_up_inode(struct inode *inode) ...@@ -108,19 +108,20 @@ static void wake_up_inode(struct inode *inode)
wake_up_bit(&inode->i_state, __I_LOCK); wake_up_bit(&inode->i_state, __I_LOCK);
} }
static struct inode *alloc_inode(struct super_block *sb) /**
* inode_init_always - perform inode structure intialisation
* @sb - superblock inode belongs to.
* @inode - inode to initialise
*
* These are initializations that need to be done on every inode
* allocation as the fields are not initialised by slab allocation.
*/
struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
{ {
static const struct address_space_operations empty_aops; static const struct address_space_operations empty_aops;
static struct inode_operations empty_iops; static struct inode_operations empty_iops;
static const struct file_operations empty_fops; static const struct file_operations empty_fops;
struct inode *inode;
if (sb->s_op->alloc_inode)
inode = sb->s_op->alloc_inode(sb);
else
inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
if (inode) {
struct address_space * const mapping = &inode->i_data; struct address_space * const mapping = &inode->i_data;
inode->i_sb = sb; inode->i_sb = sb;
...@@ -183,9 +184,24 @@ static struct inode *alloc_inode(struct super_block *sb) ...@@ -183,9 +184,24 @@ static struct inode *alloc_inode(struct super_block *sb)
} }
inode->i_private = NULL; inode->i_private = NULL;
inode->i_mapping = mapping; inode->i_mapping = mapping;
}
return inode; return inode;
} }
EXPORT_SYMBOL(inode_init_always);
static struct inode *alloc_inode(struct super_block *sb)
{
struct inode *inode;
if (sb->s_op->alloc_inode)
inode = sb->s_op->alloc_inode(sb);
else
inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
if (inode)
return inode_init_always(sb, inode);
return NULL;
}
void destroy_inode(struct inode *inode) void destroy_inode(struct inode *inode)
{ {
...@@ -196,6 +212,7 @@ void destroy_inode(struct inode *inode) ...@@ -196,6 +212,7 @@ void destroy_inode(struct inode *inode)
else else
kmem_cache_free(inode_cachep, (inode)); kmem_cache_free(inode_cachep, (inode));
} }
EXPORT_SYMBOL(destroy_inode);
/* /*
...@@ -534,6 +551,49 @@ static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head ...@@ -534,6 +551,49 @@ static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head
return node ? inode : NULL; return node ? inode : NULL;
} }
static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
unsigned long tmp;
tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
L1_CACHE_BYTES;
tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
return tmp & I_HASHMASK;
}
static inline void
__inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
struct inode *inode)
{
inodes_stat.nr_inodes++;
list_add(&inode->i_list, &inode_in_use);
list_add(&inode->i_sb_list, &sb->s_inodes);
if (head)
hlist_add_head(&inode->i_hash, head);
}
/**
* inode_add_to_lists - add a new inode to relevant lists
* @sb - superblock inode belongs to.
* @inode - inode to mark in use
*
* When an inode is allocated it needs to be accounted for, added to the in use
* list, the owning superblock and the inode hash. This needs to be done under
* the inode_lock, so export a function to do this rather than the inode lock
* itself. We calculate the hash list to add to here so it is all internal
* which requires the caller to have already set up the inode number in the
* inode to add.
*/
void inode_add_to_lists(struct super_block *sb, struct inode *inode)
{
struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino);
spin_lock(&inode_lock);
__inode_add_to_lists(sb, head, inode);
spin_unlock(&inode_lock);
}
EXPORT_SYMBOL_GPL(inode_add_to_lists);
/** /**
* new_inode - obtain an inode * new_inode - obtain an inode
* @sb: superblock * @sb: superblock
...@@ -561,9 +621,7 @@ struct inode *new_inode(struct super_block *sb) ...@@ -561,9 +621,7 @@ struct inode *new_inode(struct super_block *sb)
inode = alloc_inode(sb); inode = alloc_inode(sb);
if (inode) { if (inode) {
spin_lock(&inode_lock); spin_lock(&inode_lock);
inodes_stat.nr_inodes++; __inode_add_to_lists(sb, NULL, inode);
list_add(&inode->i_list, &inode_in_use);
list_add(&inode->i_sb_list, &sb->s_inodes);
inode->i_ino = ++last_ino; inode->i_ino = ++last_ino;
inode->i_state = 0; inode->i_state = 0;
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
...@@ -622,10 +680,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h ...@@ -622,10 +680,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h
if (set(inode, data)) if (set(inode, data))
goto set_failed; goto set_failed;
inodes_stat.nr_inodes++; __inode_add_to_lists(sb, head, inode);
list_add(&inode->i_list, &inode_in_use);
list_add(&inode->i_sb_list, &sb->s_inodes);
hlist_add_head(&inode->i_hash, head);
inode->i_state = I_LOCK|I_NEW; inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
...@@ -671,10 +726,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he ...@@ -671,10 +726,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
old = find_inode_fast(sb, head, ino); old = find_inode_fast(sb, head, ino);
if (!old) { if (!old) {
inode->i_ino = ino; inode->i_ino = ino;
inodes_stat.nr_inodes++; __inode_add_to_lists(sb, head, inode);
list_add(&inode->i_list, &inode_in_use);
list_add(&inode->i_sb_list, &sb->s_inodes);
hlist_add_head(&inode->i_hash, head);
inode->i_state = I_LOCK|I_NEW; inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
...@@ -698,16 +750,6 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he ...@@ -698,16 +750,6 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
return inode; return inode;
} }
static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
unsigned long tmp;
tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
L1_CACHE_BYTES;
tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
return tmp & I_HASHMASK;
}
/** /**
* iunique - get a unique inode number * iunique - get a unique inode number
* @sb: superblock * @sb: superblock
...@@ -1292,6 +1334,7 @@ int inode_wait(void *word) ...@@ -1292,6 +1334,7 @@ int inode_wait(void *word)
schedule(); schedule();
return 0; return 0;
} }
EXPORT_SYMBOL(inode_wait);
/* /*
* If we try to find an inode in the inode hash while it is being * If we try to find an inode in the inode hash while it is being
......
...@@ -85,13 +85,13 @@ xfs-y += xfs_alloc.o \ ...@@ -85,13 +85,13 @@ xfs-y += xfs_alloc.o \
xfs_trans_inode.o \ xfs_trans_inode.o \
xfs_trans_item.o \ xfs_trans_item.o \
xfs_utils.o \ xfs_utils.o \
xfs_vfsops.o \
xfs_vnodeops.o \ xfs_vnodeops.o \
xfs_rw.o \ xfs_rw.o \
xfs_dmops.o \ xfs_dmops.o \
xfs_qmops.o xfs_qmops.o
xfs-$(CONFIG_XFS_TRACE) += xfs_dir2_trace.o xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \
xfs_dir2_trace.o
# Objects in linux/ # Objects in linux/
xfs-y += $(addprefix $(XFS_LINUX)/, \ xfs-y += $(addprefix $(XFS_LINUX)/, \
...@@ -106,7 +106,7 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \ ...@@ -106,7 +106,7 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \
xfs_iops.o \ xfs_iops.o \
xfs_lrw.o \ xfs_lrw.o \
xfs_super.o \ xfs_super.o \
xfs_vnode.o \ xfs_sync.o \
xfs_xattr.o) xfs_xattr.o)
# Objects in support/ # Objects in support/
......
...@@ -32,23 +32,15 @@ typedef struct sv_s { ...@@ -32,23 +32,15 @@ typedef struct sv_s {
wait_queue_head_t waiters; wait_queue_head_t waiters;
} sv_t; } sv_t;
#define SV_FIFO 0x0 /* sv_t is FIFO type */ static inline void _sv_wait(sv_t *sv, spinlock_t *lock)
#define SV_LIFO 0x2 /* sv_t is LIFO type */
#define SV_PRIO 0x4 /* sv_t is PRIO type */
#define SV_KEYED 0x6 /* sv_t is KEYED type */
#define SV_DEFAULT SV_FIFO
static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state,
unsigned long timeout)
{ {
DECLARE_WAITQUEUE(wait, current); DECLARE_WAITQUEUE(wait, current);
add_wait_queue_exclusive(&sv->waiters, &wait); add_wait_queue_exclusive(&sv->waiters, &wait);
__set_current_state(state); __set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(lock); spin_unlock(lock);
schedule_timeout(timeout); schedule();
remove_wait_queue(&sv->waiters, &wait); remove_wait_queue(&sv->waiters, &wait);
} }
...@@ -58,13 +50,7 @@ static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state, ...@@ -58,13 +50,7 @@ static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state,
#define sv_destroy(sv) \ #define sv_destroy(sv) \
/*NOTHING*/ /*NOTHING*/
#define sv_wait(sv, pri, lock, s) \ #define sv_wait(sv, pri, lock, s) \
_sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) _sv_wait(sv, lock)
#define sv_wait_sig(sv, pri, lock, s) \
_sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \
_sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts))
#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \
_sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts))
#define sv_signal(sv) \ #define sv_signal(sv) \
wake_up(&(sv)->waiters) wake_up(&(sv)->waiters)
#define sv_broadcast(sv) \ #define sv_broadcast(sv) \
......
...@@ -42,6 +42,40 @@ ...@@ -42,6 +42,40 @@
#include <linux/pagevec.h> #include <linux/pagevec.h>
#include <linux/writeback.h> #include <linux/writeback.h>
/*
* Prime number of hash buckets since address is used as the key.
*/
#define NVSYNC 37
#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
static wait_queue_head_t xfs_ioend_wq[NVSYNC];
void __init
xfs_ioend_init(void)
{
int i;
for (i = 0; i < NVSYNC; i++)
init_waitqueue_head(&xfs_ioend_wq[i]);
}
void
xfs_ioend_wait(
xfs_inode_t *ip)
{
wait_queue_head_t *wq = to_ioend_wq(ip);
wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
}
STATIC void
xfs_ioend_wake(
xfs_inode_t *ip)
{
if (atomic_dec_and_test(&ip->i_iocount))
wake_up(to_ioend_wq(ip));
}
STATIC void STATIC void
xfs_count_page_state( xfs_count_page_state(
struct page *page, struct page *page,
...@@ -146,16 +180,25 @@ xfs_destroy_ioend( ...@@ -146,16 +180,25 @@ xfs_destroy_ioend(
xfs_ioend_t *ioend) xfs_ioend_t *ioend)
{ {
struct buffer_head *bh, *next; struct buffer_head *bh, *next;
struct xfs_inode *ip = XFS_I(ioend->io_inode);
for (bh = ioend->io_buffer_head; bh; bh = next) { for (bh = ioend->io_buffer_head; bh; bh = next) {
next = bh->b_private; next = bh->b_private;
bh->b_end_io(bh, !ioend->io_error); bh->b_end_io(bh, !ioend->io_error);
} }
if (unlikely(ioend->io_error)) {
vn_ioerror(XFS_I(ioend->io_inode), ioend->io_error, /*
__FILE__,__LINE__); * Volume managers supporting multiple paths can send back ENODEV
* when the final path disappears. In this case continuing to fill
* the page cache with dirty data which cannot be written out is
* evil, so prevent that.
*/
if (unlikely(ioend->io_error == -ENODEV)) {
xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
__FILE__, __LINE__);
} }
vn_iowake(XFS_I(ioend->io_inode));
xfs_ioend_wake(ip);
mempool_free(ioend, xfs_ioend_pool); mempool_free(ioend, xfs_ioend_pool);
} }
...@@ -191,7 +234,7 @@ xfs_setfilesize( ...@@ -191,7 +234,7 @@ xfs_setfilesize(
ip->i_d.di_size = isize; ip->i_d.di_size = isize;
ip->i_update_core = 1; ip->i_update_core = 1;
ip->i_update_size = 1; ip->i_update_size = 1;
mark_inode_dirty_sync(ioend->io_inode); xfs_mark_inode_dirty_sync(ip);
} }
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
...@@ -317,14 +360,9 @@ xfs_map_blocks( ...@@ -317,14 +360,9 @@ xfs_map_blocks(
xfs_iomap_t *mapp, xfs_iomap_t *mapp,
int flags) int flags)
{ {
xfs_inode_t *ip = XFS_I(inode); int nmaps = 1;
int error, nmaps = 1;
error = xfs_iomap(ip, offset, count, return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
flags, mapp, &nmaps);
if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
xfs_iflags_set(ip, XFS_IMODIFIED);
return -error;
} }
STATIC_INLINE int STATIC_INLINE int
...@@ -512,7 +550,7 @@ xfs_cancel_ioend( ...@@ -512,7 +550,7 @@ xfs_cancel_ioend(
unlock_buffer(bh); unlock_buffer(bh);
} while ((bh = next_bh) != NULL); } while ((bh = next_bh) != NULL);
vn_iowake(XFS_I(ioend->io_inode)); xfs_ioend_wake(XFS_I(ioend->io_inode));
mempool_free(ioend, xfs_ioend_pool); mempool_free(ioend, xfs_ioend_pool);
} while ((ioend = next) != NULL); } while ((ioend = next) != NULL);
} }
......
...@@ -43,4 +43,7 @@ typedef struct xfs_ioend { ...@@ -43,4 +43,7 @@ typedef struct xfs_ioend {
extern const struct address_space_operations xfs_address_space_operations; extern const struct address_space_operations xfs_address_space_operations;
extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int); extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
extern void xfs_ioend_init(void);
extern void xfs_ioend_wait(struct xfs_inode *);
#endif /* __XFS_AOPS_H__ */ #endif /* __XFS_AOPS_H__ */
...@@ -630,6 +630,29 @@ xfs_buf_get_flags( ...@@ -630,6 +630,29 @@ xfs_buf_get_flags(
return NULL; return NULL;
} }
STATIC int
_xfs_buf_read(
xfs_buf_t *bp,
xfs_buf_flags_t flags)
{
int status;
XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags);
ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
XBF_READ_AHEAD | _XBF_RUN_QUEUES);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
XBF_READ_AHEAD | _XBF_RUN_QUEUES);
status = xfs_buf_iorequest(bp);
if (!status && !(flags & XBF_ASYNC))
status = xfs_buf_iowait(bp);
return status;
}
xfs_buf_t * xfs_buf_t *
xfs_buf_read_flags( xfs_buf_read_flags(
xfs_buftarg_t *target, xfs_buftarg_t *target,
...@@ -646,7 +669,7 @@ xfs_buf_read_flags( ...@@ -646,7 +669,7 @@ xfs_buf_read_flags(
if (!XFS_BUF_ISDONE(bp)) { if (!XFS_BUF_ISDONE(bp)) {
XB_TRACE(bp, "read", (unsigned long)flags); XB_TRACE(bp, "read", (unsigned long)flags);
XFS_STATS_INC(xb_get_read); XFS_STATS_INC(xb_get_read);
xfs_buf_iostart(bp, flags); _xfs_buf_read(bp, flags);
} else if (flags & XBF_ASYNC) { } else if (flags & XBF_ASYNC) {
XB_TRACE(bp, "read_async", (unsigned long)flags); XB_TRACE(bp, "read_async", (unsigned long)flags);
/* /*
...@@ -1048,50 +1071,39 @@ xfs_buf_ioerror( ...@@ -1048,50 +1071,39 @@ xfs_buf_ioerror(
XB_TRACE(bp, "ioerror", (unsigned long)error); XB_TRACE(bp, "ioerror", (unsigned long)error);
} }
/*
* Initiate I/O on a buffer, based on the flags supplied.
* The b_iodone routine in the buffer supplied will only be called
* when all of the subsidiary I/O requests, if any, have been completed.
*/
int int
xfs_buf_iostart( xfs_bawrite(
xfs_buf_t *bp, void *mp,
xfs_buf_flags_t flags) struct xfs_buf *bp)
{ {
int status = 0; XB_TRACE(bp, "bawrite", 0);
XB_TRACE(bp, "iostart", (unsigned long)flags); ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
if (flags & XBF_DELWRI) { xfs_buf_delwri_dequeue(bp);
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
xfs_buf_delwri_queue(bp, 1);
return 0;
}
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \ bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
XBF_READ_AHEAD | _XBF_RUN_QUEUES); bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
XBF_READ_AHEAD | _XBF_RUN_QUEUES);
BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL); bp->b_mount = mp;
bp->b_strat = xfs_bdstrat_cb;
return xfs_bdstrat_cb(bp);
}
/* For writes allow an alternate strategy routine to precede void
* the actual I/O request (which may not be issued at all in xfs_bdwrite(
* a shutdown situation, for example). void *mp,
*/ struct xfs_buf *bp)
status = (flags & XBF_WRITE) ? {
xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp); XB_TRACE(bp, "bdwrite", 0);
/* Wait for I/O if we are not an async request. bp->b_strat = xfs_bdstrat_cb;
* Note: async I/O request completion will release the buffer, bp->b_mount = mp;
* and that can already be done by this point. So using the
* buffer pointer from here on, after async I/O, is invalid.
*/
if (!status && !(flags & XBF_ASYNC))
status = xfs_buf_iowait(bp);
return status; bp->b_flags &= ~XBF_READ;
bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
xfs_buf_delwri_queue(bp, 1);
} }
STATIC_INLINE void STATIC_INLINE void
...@@ -1114,8 +1126,7 @@ xfs_buf_bio_end_io( ...@@ -1114,8 +1126,7 @@ xfs_buf_bio_end_io(
unsigned int blocksize = bp->b_target->bt_bsize; unsigned int blocksize = bp->b_target->bt_bsize;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) xfs_buf_ioerror(bp, -error);
bp->b_error = EIO;
do { do {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
......
...@@ -168,7 +168,7 @@ typedef struct xfs_buf { ...@@ -168,7 +168,7 @@ typedef struct xfs_buf {
struct completion b_iowait; /* queue for I/O waiters */ struct completion b_iowait; /* queue for I/O waiters */
void *b_fspriv; void *b_fspriv;
void *b_fspriv2; void *b_fspriv2;
void *b_fspriv3; struct xfs_mount *b_mount;
unsigned short b_error; /* error code on I/O */ unsigned short b_error; /* error code on I/O */
unsigned int b_page_count; /* size of page array */ unsigned int b_page_count; /* size of page array */
unsigned int b_offset; /* page offset in first page */ unsigned int b_offset; /* page offset in first page */
...@@ -214,9 +214,10 @@ extern void xfs_buf_lock(xfs_buf_t *); ...@@ -214,9 +214,10 @@ extern void xfs_buf_lock(xfs_buf_t *);
extern void xfs_buf_unlock(xfs_buf_t *); extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */ /* Buffer Read and Write Routines */
extern int xfs_bawrite(void *mp, xfs_buf_t *bp);
extern void xfs_bdwrite(void *mp, xfs_buf_t *bp);
extern void xfs_buf_ioend(xfs_buf_t *, int); extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int); extern void xfs_buf_ioerror(xfs_buf_t *, int);
extern int xfs_buf_iostart(xfs_buf_t *, xfs_buf_flags_t);
extern int xfs_buf_iorequest(xfs_buf_t *); extern int xfs_buf_iorequest(xfs_buf_t *);
extern int xfs_buf_iowait(xfs_buf_t *); extern int xfs_buf_iowait(xfs_buf_t *);
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t, extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
...@@ -311,10 +312,6 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); ...@@ -311,10 +312,6 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
#define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED) #define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED)
#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED) #define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED)
#define XFS_BUF_SHUT(bp) do { } while (0)
#define XFS_BUF_UNSHUT(bp) do { } while (0)
#define XFS_BUF_ISSHUT(bp) (0)
#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp) #define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) #define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
...@@ -334,8 +331,6 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); ...@@ -334,8 +331,6 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val)) #define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
#define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2) #define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2)
#define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val)) #define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val))
#define XFS_BUF_FSPRIVATE3(bp, type) ((type)(bp)->b_fspriv3)
#define XFS_BUF_SET_FSPRIVATE3(bp, val) ((bp)->b_fspriv3 = (void*)(val))
#define XFS_BUF_SET_START(bp) do { } while (0) #define XFS_BUF_SET_START(bp) do { } while (0)
#define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func)) #define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func))
...@@ -366,14 +361,6 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); ...@@ -366,14 +361,6 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
#define XFS_BUF_TARGET(bp) ((bp)->b_target) #define XFS_BUF_TARGET(bp) ((bp)->b_target)
#define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target) #define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target)
static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
{
bp->b_fspriv3 = mp;
bp->b_strat = xfs_bdstrat_cb;
xfs_buf_delwri_dequeue(bp);
return xfs_buf_iostart(bp, XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
}
static inline void xfs_buf_relse(xfs_buf_t *bp) static inline void xfs_buf_relse(xfs_buf_t *bp)
{ {
if (!bp->b_relse) if (!bp->b_relse)
...@@ -414,17 +401,6 @@ static inline int XFS_bwrite(xfs_buf_t *bp) ...@@ -414,17 +401,6 @@ static inline int XFS_bwrite(xfs_buf_t *bp)
return error; return error;
} }
/*
* No error can be returned from xfs_buf_iostart for delwri
* buffers as they are queued and no I/O is issued.
*/
static inline void xfs_bdwrite(void *mp, xfs_buf_t *bp)
{
bp->b_strat = xfs_bdstrat_cb;
bp->b_fspriv3 = mp;
(void)xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC);
}
#define XFS_bdstrat(bp) xfs_buf_iorequest(bp) #define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
#define xfs_iowait(bp) xfs_buf_iowait(bp) #define xfs_iowait(bp) xfs_buf_iowait(bp)
......
...@@ -25,12 +25,4 @@ ...@@ -25,12 +25,4 @@
*/ */
typedef const struct cred cred_t; typedef const struct cred cred_t;
extern cred_t *sys_cred;
/* this is a hack.. (assumes sys_cred is the only cred_t in the system) */
static inline int capable_cred(cred_t *cr, int cid)
{
return (cr == sys_cred) ? 1 : capable(cid);
}
#endif /* __XFS_CRED_H__ */ #endif /* __XFS_CRED_H__ */
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#include "xfs_vnodeops.h" #include "xfs_vnodeops.h"
#include "xfs_bmap_btree.h" #include "xfs_bmap_btree.h"
#include "xfs_inode.h" #include "xfs_inode.h"
#include "xfs_vfsops.h"
/* /*
* Note that we only accept fileids which are long enough rather than allow * Note that we only accept fileids which are long enough rather than allow
......
...@@ -36,88 +36,53 @@ ...@@ -36,88 +36,53 @@
#include "xfs_inode.h" #include "xfs_inode.h"
#include "xfs_error.h" #include "xfs_error.h"
#include "xfs_rw.h" #include "xfs_rw.h"
#include "xfs_ioctl32.h"
#include "xfs_vnodeops.h" #include "xfs_vnodeops.h"
#include "xfs_da_btree.h"
#include "xfs_ioctl.h"
#include <linux/dcache.h> #include <linux/dcache.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
static struct vm_operations_struct xfs_file_vm_ops; static struct vm_operations_struct xfs_file_vm_ops;
STATIC_INLINE ssize_t STATIC ssize_t
__xfs_file_read( xfs_file_aio_read(
struct kiocb *iocb, struct kiocb *iocb,
const struct iovec *iov, const struct iovec *iov,
unsigned long nr_segs, unsigned long nr_segs,
int ioflags,
loff_t pos) loff_t pos)
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
int ioflags = IO_ISAIO;
BUG_ON(iocb->ki_pos != pos); BUG_ON(iocb->ki_pos != pos);
if (unlikely(file->f_flags & O_DIRECT)) if (unlikely(file->f_flags & O_DIRECT))
ioflags |= IO_ISDIRECT; ioflags |= IO_ISDIRECT;
if (file->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
return xfs_read(XFS_I(file->f_path.dentry->d_inode), iocb, iov, return xfs_read(XFS_I(file->f_path.dentry->d_inode), iocb, iov,
nr_segs, &iocb->ki_pos, ioflags); nr_segs, &iocb->ki_pos, ioflags);
} }
STATIC ssize_t STATIC ssize_t
xfs_file_aio_read( xfs_file_aio_write(
struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs,
loff_t pos)
{
return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO, pos);
}
STATIC ssize_t
xfs_file_aio_read_invis(
struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs,
loff_t pos)
{
return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
}
STATIC_INLINE ssize_t
__xfs_file_write(
struct kiocb *iocb, struct kiocb *iocb,
const struct iovec *iov, const struct iovec *iov,
unsigned long nr_segs, unsigned long nr_segs,
int ioflags,
loff_t pos) loff_t pos)
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
int ioflags = IO_ISAIO;
BUG_ON(iocb->ki_pos != pos); BUG_ON(iocb->ki_pos != pos);
if (unlikely(file->f_flags & O_DIRECT)) if (unlikely(file->f_flags & O_DIRECT))
ioflags |= IO_ISDIRECT; ioflags |= IO_ISDIRECT;
if (file->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
return xfs_write(XFS_I(file->f_mapping->host), iocb, iov, nr_segs, return xfs_write(XFS_I(file->f_mapping->host), iocb, iov, nr_segs,
&iocb->ki_pos, ioflags); &iocb->ki_pos, ioflags);
} }
STATIC ssize_t
xfs_file_aio_write(
struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs,
loff_t pos)
{
return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO, pos);
}
STATIC ssize_t
xfs_file_aio_write_invis(
struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs,
loff_t pos)
{
return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
}
STATIC ssize_t STATIC ssize_t
xfs_file_splice_read( xfs_file_splice_read(
struct file *infilp, struct file *infilp,
...@@ -126,20 +91,13 @@ xfs_file_splice_read( ...@@ -126,20 +91,13 @@ xfs_file_splice_read(
size_t len, size_t len,
unsigned int flags) unsigned int flags)
{ {
return xfs_splice_read(XFS_I(infilp->f_path.dentry->d_inode), int ioflags = 0;
infilp, ppos, pipe, len, flags, 0);
} if (infilp->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
STATIC ssize_t
xfs_file_splice_read_invis(
struct file *infilp,
loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len,
unsigned int flags)
{
return xfs_splice_read(XFS_I(infilp->f_path.dentry->d_inode), return xfs_splice_read(XFS_I(infilp->f_path.dentry->d_inode),
infilp, ppos, pipe, len, flags, IO_INVIS); infilp, ppos, pipe, len, flags, ioflags);
} }
STATIC ssize_t STATIC ssize_t
...@@ -150,30 +108,49 @@ xfs_file_splice_write( ...@@ -150,30 +108,49 @@ xfs_file_splice_write(
size_t len, size_t len,
unsigned int flags) unsigned int flags)
{ {
return xfs_splice_write(XFS_I(outfilp->f_path.dentry->d_inode), int ioflags = 0;
pipe, outfilp, ppos, len, flags, 0);
} if (outfilp->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
STATIC ssize_t
xfs_file_splice_write_invis(
struct pipe_inode_info *pipe,
struct file *outfilp,
loff_t *ppos,
size_t len,
unsigned int flags)
{
return xfs_splice_write(XFS_I(outfilp->f_path.dentry->d_inode), return xfs_splice_write(XFS_I(outfilp->f_path.dentry->d_inode),
pipe, outfilp, ppos, len, flags, IO_INVIS); pipe, outfilp, ppos, len, flags, ioflags);
} }
STATIC int STATIC int
xfs_file_open( xfs_file_open(
struct inode *inode, struct inode *inode,
struct file *filp) struct file *file)
{ {
if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
return -EFBIG; return -EFBIG;
return -xfs_open(XFS_I(inode)); if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
return -EIO;
return 0;
}
STATIC int
xfs_dir_open(
struct inode *inode,
struct file *file)
{
struct xfs_inode *ip = XFS_I(inode);
int mode;
int error;
error = xfs_file_open(inode, file);
if (error)
return error;
/*
* If there are any blocks, read-ahead block 0 as we're almost
* certain to have the next operation be a read there.
*/
mode = xfs_ilock_map_shared(ip);
if (ip->i_d.di_nextents > 0)
xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
xfs_iunlock(ip, mode);
return 0;
} }
STATIC int STATIC int
...@@ -227,7 +204,7 @@ xfs_file_readdir( ...@@ -227,7 +204,7 @@ xfs_file_readdir(
* point we can change the ->readdir prototype to include the * point we can change the ->readdir prototype to include the
* buffer size. * buffer size.
*/ */
bufsize = (size_t)min_t(loff_t, PAGE_SIZE, inode->i_size); bufsize = (size_t)min_t(loff_t, PAGE_SIZE, ip->i_d.di_size);
error = xfs_readdir(ip, dirent, bufsize, error = xfs_readdir(ip, dirent, bufsize,
(xfs_off_t *)&filp->f_pos, filldir); (xfs_off_t *)&filp->f_pos, filldir);
...@@ -248,48 +225,6 @@ xfs_file_mmap( ...@@ -248,48 +225,6 @@ xfs_file_mmap(
return 0; return 0;
} }
STATIC long
xfs_file_ioctl(
struct file *filp,
unsigned int cmd,
unsigned long p)
{
int error;
struct inode *inode = filp->f_path.dentry->d_inode;
error = xfs_ioctl(XFS_I(inode), filp, 0, cmd, (void __user *)p);
xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED);
/* NOTE: some of the ioctl's return positive #'s as a
* byte count indicating success, such as
* readlink_by_handle. So we don't "sign flip"
* like most other routines. This means true
* errors need to be returned as a negative value.
*/
return error;
}
STATIC long
xfs_file_ioctl_invis(
struct file *filp,
unsigned int cmd,
unsigned long p)
{
int error;
struct inode *inode = filp->f_path.dentry->d_inode;
error = xfs_ioctl(XFS_I(inode), filp, IO_INVIS, cmd, (void __user *)p);
xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED);
/* NOTE: some of the ioctl's return positive #'s as a
* byte count indicating success, such as
* readlink_by_handle. So we don't "sign flip"
* like most other routines. This means true
* errors need to be returned as a negative value.
*/
return error;
}
/* /*
* mmap()d file has taken write protection fault and is being made * mmap()d file has taken write protection fault and is being made
* writable. We can set the page state up correctly for a writable * writable. We can set the page state up correctly for a writable
...@@ -325,26 +260,8 @@ const struct file_operations xfs_file_operations = { ...@@ -325,26 +260,8 @@ const struct file_operations xfs_file_operations = {
#endif #endif
}; };
const struct file_operations xfs_invis_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = xfs_file_aio_read_invis,
.aio_write = xfs_file_aio_write_invis,
.splice_read = xfs_file_splice_read_invis,
.splice_write = xfs_file_splice_write_invis,
.unlocked_ioctl = xfs_file_ioctl_invis,
#ifdef CONFIG_COMPAT
.compat_ioctl = xfs_file_compat_invis_ioctl,
#endif
.mmap = xfs_file_mmap,
.open = xfs_file_open,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
};
const struct file_operations xfs_dir_file_operations = { const struct file_operations xfs_dir_file_operations = {
.open = xfs_dir_open,
.read = generic_read_dir, .read = generic_read_dir,
.readdir = xfs_file_readdir, .readdir = xfs_file_readdir,
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
......
...@@ -24,6 +24,10 @@ int fs_noerr(void) { return 0; } ...@@ -24,6 +24,10 @@ int fs_noerr(void) { return 0; }
int fs_nosys(void) { return ENOSYS; } int fs_nosys(void) { return ENOSYS; }
void fs_noval(void) { return; } void fs_noval(void) { return; }
/*
* note: all filemap functions return negative error codes. These
* need to be inverted before returning to the xfs core functions.
*/
void void
xfs_tosspages( xfs_tosspages(
xfs_inode_t *ip, xfs_inode_t *ip,
...@@ -53,7 +57,7 @@ xfs_flushinval_pages( ...@@ -53,7 +57,7 @@ xfs_flushinval_pages(
if (!ret) if (!ret)
truncate_inode_pages(mapping, first); truncate_inode_pages(mapping, first);
} }
return ret; return -ret;
} }
int int
...@@ -72,10 +76,23 @@ xfs_flush_pages( ...@@ -72,10 +76,23 @@ xfs_flush_pages(
xfs_iflags_clear(ip, XFS_ITRUNCATED); xfs_iflags_clear(ip, XFS_ITRUNCATED);
ret = filemap_fdatawrite(mapping); ret = filemap_fdatawrite(mapping);
if (flags & XFS_B_ASYNC) if (flags & XFS_B_ASYNC)
return ret; return -ret;
ret2 = filemap_fdatawait(mapping); ret2 = filemap_fdatawait(mapping);
if (!ret) if (!ret)
ret = ret2; ret = ret2;
} }
return ret; return -ret;
}
int
xfs_wait_on_pages(
xfs_inode_t *ip,
xfs_off_t first,
xfs_off_t last)
{
struct address_space *mapping = VFS_I(ip)->i_mapping;
if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
return -filemap_fdatawait(mapping);
return 0;
} }
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
*/ */
xfs_param_t xfs_params = { xfs_param_t xfs_params = {
/* MIN DFLT MAX */ /* MIN DFLT MAX */
.restrict_chown = { 0, 1, 1 },
.sgid_inherit = { 0, 0, 1 }, .sgid_inherit = { 0, 0, 1 },
.symlink_mode = { 0, 0, 1 }, .symlink_mode = { 0, 0, 1 },
.panic_mask = { 0, 0, 255 }, .panic_mask = { 0, 0, 255 },
...@@ -43,10 +42,3 @@ xfs_param_t xfs_params = { ...@@ -43,10 +42,3 @@ xfs_param_t xfs_params = {
.inherit_nodfrg = { 0, 1, 1 }, .inherit_nodfrg = { 0, 1, 1 },
.fstrm_timer = { 1, 30*100, 3600*100}, .fstrm_timer = { 1, 30*100, 3600*100},
}; };
/*
* Global system credential structure.
*/
static cred_t sys_cred_val;
cred_t *sys_cred = &sys_cred_val;
...@@ -19,6 +19,5 @@ ...@@ -19,6 +19,5 @@
#define __XFS_GLOBALS_H__ #define __XFS_GLOBALS_H__
extern uint64_t xfs_panic_mask; /* set to cause more panics */ extern uint64_t xfs_panic_mask; /* set to cause more panics */
extern cred_t *sys_cred;
#endif /* __XFS_GLOBALS_H__ */ #endif /* __XFS_GLOBALS_H__ */
...@@ -68,26 +68,22 @@ ...@@ -68,26 +68,22 @@
* XFS_IOC_PATH_TO_HANDLE * XFS_IOC_PATH_TO_HANDLE
* returns full handle for a path * returns full handle for a path
*/ */
STATIC int int
xfs_find_handle( xfs_find_handle(
unsigned int cmd, unsigned int cmd,
void __user *arg) xfs_fsop_handlereq_t *hreq)
{ {
int hsize; int hsize;
xfs_handle_t handle; xfs_handle_t handle;
xfs_fsop_handlereq_t hreq;
struct inode *inode; struct inode *inode;
if (copy_from_user(&hreq, arg, sizeof(hreq)))
return -XFS_ERROR(EFAULT);
memset((char *)&handle, 0, sizeof(handle)); memset((char *)&handle, 0, sizeof(handle));
switch (cmd) { switch (cmd) {
case XFS_IOC_PATH_TO_FSHANDLE: case XFS_IOC_PATH_TO_FSHANDLE:
case XFS_IOC_PATH_TO_HANDLE: { case XFS_IOC_PATH_TO_HANDLE: {
struct path path; struct path path;
int error = user_lpath((const char __user *)hreq.path, &path); int error = user_lpath((const char __user *)hreq->path, &path);
if (error) if (error)
return error; return error;
...@@ -101,7 +97,7 @@ xfs_find_handle( ...@@ -101,7 +97,7 @@ xfs_find_handle(
case XFS_IOC_FD_TO_HANDLE: { case XFS_IOC_FD_TO_HANDLE: {
struct file *file; struct file *file;
file = fget(hreq.fd); file = fget(hreq->fd);
if (!file) if (!file)
return -EBADF; return -EBADF;
...@@ -158,8 +154,8 @@ xfs_find_handle( ...@@ -158,8 +154,8 @@ xfs_find_handle(
} }
/* now copy our handle into the user buffer & write out the size */ /* now copy our handle into the user buffer & write out the size */
if (copy_to_user(hreq.ohandle, &handle, hsize) || if (copy_to_user(hreq->ohandle, &handle, hsize) ||
copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) { copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) {
iput(inode); iput(inode);
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);
} }
...@@ -249,10 +245,10 @@ xfs_vget_fsop_handlereq( ...@@ -249,10 +245,10 @@ xfs_vget_fsop_handlereq(
return 0; return 0;
} }
STATIC int int
xfs_open_by_handle( xfs_open_by_handle(
xfs_mount_t *mp, xfs_mount_t *mp,
void __user *arg, xfs_fsop_handlereq_t *hreq,
struct file *parfilp, struct file *parfilp,
struct inode *parinode) struct inode *parinode)
{ {
...@@ -263,14 +259,11 @@ xfs_open_by_handle( ...@@ -263,14 +259,11 @@ xfs_open_by_handle(
struct file *filp; struct file *filp;
struct inode *inode; struct inode *inode;
struct dentry *dentry; struct dentry *dentry;
xfs_fsop_handlereq_t hreq;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM); return -XFS_ERROR(EPERM);
if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
return -XFS_ERROR(EFAULT);
error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode); error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode);
if (error) if (error)
return -error; return -error;
...@@ -281,10 +274,10 @@ xfs_open_by_handle( ...@@ -281,10 +274,10 @@ xfs_open_by_handle(
} }
#if BITS_PER_LONG != 32 #if BITS_PER_LONG != 32
hreq.oflags |= O_LARGEFILE; hreq->oflags |= O_LARGEFILE;
#endif #endif
/* Put open permission in namei format. */ /* Put open permission in namei format. */
permflag = hreq.oflags; permflag = hreq->oflags;
if ((permflag+1) & O_ACCMODE) if ((permflag+1) & O_ACCMODE)
permflag++; permflag++;
if (permflag & O_TRUNC) if (permflag & O_TRUNC)
...@@ -322,15 +315,16 @@ xfs_open_by_handle( ...@@ -322,15 +315,16 @@ xfs_open_by_handle(
mntget(parfilp->f_path.mnt); mntget(parfilp->f_path.mnt);
/* Create file pointer. */ /* Create file pointer. */
filp = dentry_open(dentry, parfilp->f_path.mnt, hreq.oflags, cred); filp = dentry_open(dentry, parfilp->f_path.mnt, hreq->oflags, cred);
if (IS_ERR(filp)) { if (IS_ERR(filp)) {
put_unused_fd(new_fd); put_unused_fd(new_fd);
return -XFS_ERROR(-PTR_ERR(filp)); return -XFS_ERROR(-PTR_ERR(filp));
} }
if (inode->i_mode & S_IFREG) { if (inode->i_mode & S_IFREG) {
/* invisible operation should not change atime */ /* invisible operation should not change atime */
filp->f_flags |= O_NOATIME; filp->f_flags |= O_NOATIME;
filp->f_op = &xfs_invis_file_operations; filp->f_mode |= FMODE_NOCMTIME;
} }
fd_install(new_fd, filp); fd_install(new_fd, filp);
...@@ -363,24 +357,21 @@ do_readlink( ...@@ -363,24 +357,21 @@ do_readlink(
} }
STATIC int int
xfs_readlink_by_handle( xfs_readlink_by_handle(
xfs_mount_t *mp, xfs_mount_t *mp,
void __user *arg, xfs_fsop_handlereq_t *hreq,
struct inode *parinode) struct inode *parinode)
{ {
struct inode *inode; struct inode *inode;
xfs_fsop_handlereq_t hreq;
__u32 olen; __u32 olen;
void *link; void *link;
int error; int error;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM); return -XFS_ERROR(EPERM);
if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
return -XFS_ERROR(EFAULT);
error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode); error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode);
if (error) if (error)
return -error; return -error;
...@@ -390,7 +381,7 @@ xfs_readlink_by_handle( ...@@ -390,7 +381,7 @@ xfs_readlink_by_handle(
goto out_iput; goto out_iput;
} }
if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) { if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) {
error = -XFS_ERROR(EFAULT); error = -XFS_ERROR(EFAULT);
goto out_iput; goto out_iput;
} }
...@@ -402,7 +393,7 @@ xfs_readlink_by_handle( ...@@ -402,7 +393,7 @@ xfs_readlink_by_handle(
error = -xfs_readlink(XFS_I(inode), link); error = -xfs_readlink(XFS_I(inode), link);
if (error) if (error)
goto out_kfree; goto out_kfree;
error = do_readlink(hreq.ohandle, olen, link); error = do_readlink(hreq->ohandle, olen, link);
if (error) if (error)
goto out_kfree; goto out_kfree;
...@@ -501,7 +492,7 @@ xfs_attrlist_by_handle( ...@@ -501,7 +492,7 @@ xfs_attrlist_by_handle(
return -error; return -error;
} }
STATIC int int
xfs_attrmulti_attr_get( xfs_attrmulti_attr_get(
struct inode *inode, struct inode *inode,
char *name, char *name,
...@@ -530,7 +521,7 @@ xfs_attrmulti_attr_get( ...@@ -530,7 +521,7 @@ xfs_attrmulti_attr_get(
return error; return error;
} }
STATIC int int
xfs_attrmulti_attr_set( xfs_attrmulti_attr_set(
struct inode *inode, struct inode *inode,
char *name, char *name,
...@@ -560,7 +551,7 @@ xfs_attrmulti_attr_set( ...@@ -560,7 +551,7 @@ xfs_attrmulti_attr_set(
return error; return error;
} }
STATIC int int
xfs_attrmulti_attr_remove( xfs_attrmulti_attr_remove(
struct inode *inode, struct inode *inode,
char *name, char *name,
...@@ -662,19 +653,26 @@ xfs_attrmulti_by_handle( ...@@ -662,19 +653,26 @@ xfs_attrmulti_by_handle(
return -error; return -error;
} }
STATIC int int
xfs_ioc_space( xfs_ioc_space(
struct xfs_inode *ip, struct xfs_inode *ip,
struct inode *inode, struct inode *inode,
struct file *filp, struct file *filp,
int ioflags, int ioflags,
unsigned int cmd, unsigned int cmd,
void __user *arg) xfs_flock64_t *bf)
{ {
xfs_flock64_t bf;
int attr_flags = 0; int attr_flags = 0;
int error; int error;
/*
* Only allow the sys admin to reserve space unless
* unwritten extents are enabled.
*/
if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) &&
!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
return -XFS_ERROR(EPERM); return -XFS_ERROR(EPERM);
...@@ -684,16 +682,12 @@ xfs_ioc_space( ...@@ -684,16 +682,12 @@ xfs_ioc_space(
if (!S_ISREG(inode->i_mode)) if (!S_ISREG(inode->i_mode))
return -XFS_ERROR(EINVAL); return -XFS_ERROR(EINVAL);
if (copy_from_user(&bf, arg, sizeof(bf)))
return -XFS_ERROR(EFAULT);
if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
attr_flags |= XFS_ATTR_NONBLOCK; attr_flags |= XFS_ATTR_NONBLOCK;
if (ioflags & IO_INVIS) if (ioflags & IO_INVIS)
attr_flags |= XFS_ATTR_DMI; attr_flags |= XFS_ATTR_DMI;
error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos, error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags);
NULL, attr_flags);
return -error; return -error;
} }
...@@ -1105,10 +1099,6 @@ xfs_ioctl_setattr( ...@@ -1105,10 +1099,6 @@ xfs_ioctl_setattr(
/* /*
* Change file ownership. Must be the owner or privileged. * Change file ownership. Must be the owner or privileged.
* If the system was configured with the "restricted_chown"
* option, the owner is not permitted to give away the file,
* and can change the group id only to a group of which he
* or she is a member.
*/ */
if (mask & FSX_PROJID) { if (mask & FSX_PROJID) {
/* /*
...@@ -1137,7 +1127,7 @@ xfs_ioctl_setattr( ...@@ -1137,7 +1127,7 @@ xfs_ioctl_setattr(
* the superblock version number since projids didn't * the superblock version number since projids didn't
* exist before DINODE_VERSION_2 and SB_VERSION_NLINK. * exist before DINODE_VERSION_2 and SB_VERSION_NLINK.
*/ */
if (ip->i_d.di_version == XFS_DINODE_VERSION_1) if (ip->i_d.di_version == 1)
xfs_bump_ino_vers2(tp, ip); xfs_bump_ino_vers2(tp, ip);
} }
...@@ -1255,6 +1245,19 @@ xfs_ioc_setxflags( ...@@ -1255,6 +1245,19 @@ xfs_ioc_setxflags(
return -xfs_ioctl_setattr(ip, &fa, mask); return -xfs_ioctl_setattr(ip, &fa, mask);
} }
STATIC int
xfs_getbmap_format(void **ap, struct getbmapx *bmv, int *full)
{
struct getbmap __user *base = *ap;
/* copy only getbmap portion (not getbmapx) */
if (copy_to_user(base, bmv, sizeof(struct getbmap)))
return XFS_ERROR(EFAULT);
*ap += sizeof(struct getbmap);
return 0;
}
STATIC int STATIC int
xfs_ioc_getbmap( xfs_ioc_getbmap(
struct xfs_inode *ip, struct xfs_inode *ip,
...@@ -1262,37 +1265,48 @@ xfs_ioc_getbmap( ...@@ -1262,37 +1265,48 @@ xfs_ioc_getbmap(
unsigned int cmd, unsigned int cmd,
void __user *arg) void __user *arg)
{ {
struct getbmap bm; struct getbmapx bmx;
int iflags;
int error; int error;
if (copy_from_user(&bm, arg, sizeof(bm))) if (copy_from_user(&bmx, arg, sizeof(struct getbmapx)))
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);
if (bm.bmv_count < 2) if (bmx.bmv_count < 2)
return -XFS_ERROR(EINVAL); return -XFS_ERROR(EINVAL);
iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
if (ioflags & IO_INVIS) if (ioflags & IO_INVIS)
iflags |= BMV_IF_NO_DMAPI_READ; bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
error = xfs_getbmap(ip, &bm, (struct getbmap __user *)arg+1, iflags); error = xfs_getbmap(ip, &bmx, xfs_getbmap_format,
(struct getbmap *)arg+1);
if (error) if (error)
return -error; return -error;
if (copy_to_user(arg, &bm, sizeof(bm))) /* copy back header - only size of getbmap */
if (copy_to_user(arg, &bmx, sizeof(struct getbmap)))
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);
return 0; return 0;
} }
STATIC int
xfs_getbmapx_format(void **ap, struct getbmapx *bmv, int *full)
{
struct getbmapx __user *base = *ap;
if (copy_to_user(base, bmv, sizeof(struct getbmapx)))
return XFS_ERROR(EFAULT);
*ap += sizeof(struct getbmapx);
return 0;
}
STATIC int STATIC int
xfs_ioc_getbmapx( xfs_ioc_getbmapx(
struct xfs_inode *ip, struct xfs_inode *ip,
void __user *arg) void __user *arg)
{ {
struct getbmapx bmx; struct getbmapx bmx;
struct getbmap bm;
int iflags;
int error; int error;
if (copy_from_user(&bmx, arg, sizeof(bmx))) if (copy_from_user(&bmx, arg, sizeof(bmx)))
...@@ -1301,46 +1315,46 @@ xfs_ioc_getbmapx( ...@@ -1301,46 +1315,46 @@ xfs_ioc_getbmapx(
if (bmx.bmv_count < 2) if (bmx.bmv_count < 2)
return -XFS_ERROR(EINVAL); return -XFS_ERROR(EINVAL);
/* if (bmx.bmv_iflags & (~BMV_IF_VALID))
* Map input getbmapx structure to a getbmap
* structure for xfs_getbmap.
*/
GETBMAP_CONVERT(bmx, bm);
iflags = bmx.bmv_iflags;
if (iflags & (~BMV_IF_VALID))
return -XFS_ERROR(EINVAL); return -XFS_ERROR(EINVAL);
iflags |= BMV_IF_EXTENDED; error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format,
(struct getbmapx *)arg+1);
error = xfs_getbmap(ip, &bm, (struct getbmapx __user *)arg+1, iflags);
if (error) if (error)
return -error; return -error;
GETBMAP_CONVERT(bm, bmx); /* copy back header */
if (copy_to_user(arg, &bmx, sizeof(struct getbmapx)))
if (copy_to_user(arg, &bmx, sizeof(bmx)))
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);
return 0; return 0;
} }
int /*
xfs_ioctl( * Note: some of the ioctl's return positive numbers as a
xfs_inode_t *ip, * byte count indicating success, such as readlink_by_handle.
* So we don't "sign flip" like most other routines. This means
* true errors need to be returned as a negative value.
*/
long
xfs_file_ioctl(
struct file *filp, struct file *filp,
int ioflags,
unsigned int cmd, unsigned int cmd,
void __user *arg) unsigned long p)
{ {
struct inode *inode = filp->f_path.dentry->d_inode; struct inode *inode = filp->f_path.dentry->d_inode;
xfs_mount_t *mp = ip->i_mount; struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
void __user *arg = (void __user *)p;
int ioflags = 0;
int error; int error;
xfs_itrace_entry(XFS_I(inode)); if (filp->f_mode & FMODE_NOCMTIME)
switch (cmd) { ioflags |= IO_INVIS;
xfs_itrace_entry(ip);
switch (cmd) {
case XFS_IOC_ALLOCSP: case XFS_IOC_ALLOCSP:
case XFS_IOC_FREESP: case XFS_IOC_FREESP:
case XFS_IOC_RESVSP: case XFS_IOC_RESVSP:
...@@ -1348,17 +1362,13 @@ xfs_ioctl( ...@@ -1348,17 +1362,13 @@ xfs_ioctl(
case XFS_IOC_ALLOCSP64: case XFS_IOC_ALLOCSP64:
case XFS_IOC_FREESP64: case XFS_IOC_FREESP64:
case XFS_IOC_RESVSP64: case XFS_IOC_RESVSP64:
case XFS_IOC_UNRESVSP64: case XFS_IOC_UNRESVSP64: {
/* xfs_flock64_t bf;
* Only allow the sys admin to reserve space unless
* unwritten extents are enabled.
*/
if (!xfs_sb_version_hasextflgbit(&mp->m_sb) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
return xfs_ioc_space(ip, inode, filp, ioflags, cmd, arg);
if (copy_from_user(&bf, arg, sizeof(bf)))
return -XFS_ERROR(EFAULT);
return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf);
}
case XFS_IOC_DIOINFO: { case XFS_IOC_DIOINFO: {
struct dioattr da; struct dioattr da;
xfs_buftarg_t *target = xfs_buftarg_t *target =
...@@ -1418,18 +1428,30 @@ xfs_ioctl( ...@@ -1418,18 +1428,30 @@ xfs_ioctl(
case XFS_IOC_FD_TO_HANDLE: case XFS_IOC_FD_TO_HANDLE:
case XFS_IOC_PATH_TO_HANDLE: case XFS_IOC_PATH_TO_HANDLE:
case XFS_IOC_PATH_TO_FSHANDLE: case XFS_IOC_PATH_TO_FSHANDLE: {
return xfs_find_handle(cmd, arg); xfs_fsop_handlereq_t hreq;
case XFS_IOC_OPEN_BY_HANDLE: if (copy_from_user(&hreq, arg, sizeof(hreq)))
return xfs_open_by_handle(mp, arg, filp, inode); return -XFS_ERROR(EFAULT);
return xfs_find_handle(cmd, &hreq);
}
case XFS_IOC_OPEN_BY_HANDLE: {
xfs_fsop_handlereq_t hreq;
if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
return -XFS_ERROR(EFAULT);
return xfs_open_by_handle(mp, &hreq, filp, inode);
}
case XFS_IOC_FSSETDM_BY_HANDLE: case XFS_IOC_FSSETDM_BY_HANDLE:
return xfs_fssetdm_by_handle(mp, arg, inode); return xfs_fssetdm_by_handle(mp, arg, inode);
case XFS_IOC_READLINK_BY_HANDLE: case XFS_IOC_READLINK_BY_HANDLE: {
return xfs_readlink_by_handle(mp, arg, inode); xfs_fsop_handlereq_t hreq;
if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
return -XFS_ERROR(EFAULT);
return xfs_readlink_by_handle(mp, &hreq, inode);
}
case XFS_IOC_ATTRLIST_BY_HANDLE: case XFS_IOC_ATTRLIST_BY_HANDLE:
return xfs_attrlist_by_handle(mp, arg, inode); return xfs_attrlist_by_handle(mp, arg, inode);
...@@ -1437,7 +1459,11 @@ xfs_ioctl( ...@@ -1437,7 +1459,11 @@ xfs_ioctl(
return xfs_attrmulti_by_handle(mp, arg, filp, inode); return xfs_attrmulti_by_handle(mp, arg, filp, inode);
case XFS_IOC_SWAPEXT: { case XFS_IOC_SWAPEXT: {
error = xfs_swapext((struct xfs_swapext __user *)arg); struct xfs_swapext sxp;
if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
return -XFS_ERROR(EFAULT);
error = xfs_swapext(&sxp);
return -error; return -error;
} }
...@@ -1493,9 +1519,6 @@ xfs_ioctl( ...@@ -1493,9 +1519,6 @@ xfs_ioctl(
case XFS_IOC_FSGROWFSDATA: { case XFS_IOC_FSGROWFSDATA: {
xfs_growfs_data_t in; xfs_growfs_data_t in;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&in, arg, sizeof(in))) if (copy_from_user(&in, arg, sizeof(in)))
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);
...@@ -1506,9 +1529,6 @@ xfs_ioctl( ...@@ -1506,9 +1529,6 @@ xfs_ioctl(
case XFS_IOC_FSGROWFSLOG: { case XFS_IOC_FSGROWFSLOG: {
xfs_growfs_log_t in; xfs_growfs_log_t in;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&in, arg, sizeof(in))) if (copy_from_user(&in, arg, sizeof(in)))
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);
...@@ -1519,9 +1539,6 @@ xfs_ioctl( ...@@ -1519,9 +1539,6 @@ xfs_ioctl(
case XFS_IOC_FSGROWFSRT: { case XFS_IOC_FSGROWFSRT: {
xfs_growfs_rt_t in; xfs_growfs_rt_t in;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&in, arg, sizeof(in))) if (copy_from_user(&in, arg, sizeof(in)))
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);
......
/* /*
* Copyright (c) 2000,2005 Silicon Graphics, Inc. * Copyright (c) 2008 Silicon Graphics, Inc.
* All Rights Reserved. * All Rights Reserved.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
...@@ -15,26 +15,68 @@ ...@@ -15,26 +15,68 @@
* along with this program; if not, write the Free Software Foundation, * along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#ifndef __XFS_IMAP_H__ #ifndef __XFS_IOCTL_H__
#define __XFS_IMAP_H__ #define __XFS_IOCTL_H__
/* extern int
* This is the structure passed to xfs_imap() to map xfs_ioc_space(
* an inode number to its on disk location. struct xfs_inode *ip,
*/ struct inode *inode,
typedef struct xfs_imap { struct file *filp,
xfs_daddr_t im_blkno; /* starting BB of inode chunk */ int ioflags,
uint im_len; /* length in BBs of inode chunk */ unsigned int cmd,
xfs_agblock_t im_agblkno; /* logical block of inode chunk in ag */ xfs_flock64_t *bf);
ushort im_ioffset; /* inode offset in block in "inodes" */
ushort im_boffset; /* inode offset in block in bytes */ extern int
} xfs_imap_t; xfs_find_handle(
unsigned int cmd,
#ifdef __KERNEL__ xfs_fsop_handlereq_t *hreq);
struct xfs_mount;
struct xfs_trans; extern int
int xfs_imap(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, xfs_open_by_handle(
xfs_imap_t *, uint); xfs_mount_t *mp,
#endif xfs_fsop_handlereq_t *hreq,
struct file *parfilp,
struct inode *parinode);
extern int
xfs_readlink_by_handle(
xfs_mount_t *mp,
xfs_fsop_handlereq_t *hreq,
struct inode *parinode);
#endif /* __XFS_IMAP_H__ */ extern int
xfs_attrmulti_attr_get(
struct inode *inode,
char *name,
char __user *ubuf,
__uint32_t *len,
__uint32_t flags);
extern int
xfs_attrmulti_attr_set(
struct inode *inode,
char *name,
const char __user *ubuf,
__uint32_t len,
__uint32_t flags);
extern int
xfs_attrmulti_attr_remove(
struct inode *inode,
char *name,
__uint32_t flags);
extern long
xfs_file_ioctl(
struct file *filp,
unsigned int cmd,
unsigned long p);
extern long
xfs_file_compat_ioctl(
struct file *file,
unsigned int cmd,
unsigned long arg);
#endif
此差异已折叠。
...@@ -18,7 +18,217 @@ ...@@ -18,7 +18,217 @@
#ifndef __XFS_IOCTL32_H__ #ifndef __XFS_IOCTL32_H__
#define __XFS_IOCTL32_H__ #define __XFS_IOCTL32_H__
extern long xfs_file_compat_ioctl(struct file *, unsigned, unsigned long); #include <linux/compat.h>
extern long xfs_file_compat_invis_ioctl(struct file *, unsigned, unsigned long);
/*
* on 32-bit arches, ioctl argument structures may have different sizes
* and/or alignment. We define compat structures which match the
* 32-bit sizes/alignments here, and their associated ioctl numbers.
*
* xfs_ioctl32.c contains routines to copy these structures in and out.
*/
/* stock kernel-level ioctls we support */
#define XFS_IOC_GETXFLAGS_32 FS_IOC32_GETFLAGS
#define XFS_IOC_SETXFLAGS_32 FS_IOC32_SETFLAGS
#define XFS_IOC_GETVERSION_32 FS_IOC32_GETVERSION
/*
* On intel, even if sizes match, alignment and/or padding may differ.
*/
#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
#define BROKEN_X86_ALIGNMENT
#define __compat_packed __attribute__((packed))
#else
#define __compat_packed
#endif
typedef struct compat_xfs_bstime {
compat_time_t tv_sec; /* seconds */
__s32 tv_nsec; /* and nanoseconds */
} compat_xfs_bstime_t;
typedef struct compat_xfs_bstat {
__u64 bs_ino; /* inode number */
__u16 bs_mode; /* type and mode */
__u16 bs_nlink; /* number of links */
__u32 bs_uid; /* user id */
__u32 bs_gid; /* group id */
__u32 bs_rdev; /* device value */
__s32 bs_blksize; /* block size */
__s64 bs_size; /* file size */
compat_xfs_bstime_t bs_atime; /* access time */
compat_xfs_bstime_t bs_mtime; /* modify time */
compat_xfs_bstime_t bs_ctime; /* inode change time */
int64_t bs_blocks; /* number of blocks */
__u32 bs_xflags; /* extended flags */
__s32 bs_extsize; /* extent size */
__s32 bs_extents; /* number of extents */
__u32 bs_gen; /* generation count */
__u16 bs_projid; /* project id */
unsigned char bs_pad[14]; /* pad space, unused */
__u32 bs_dmevmask; /* DMIG event mask */
__u16 bs_dmstate; /* DMIG state info */
__u16 bs_aextents; /* attribute number of extents */
} __compat_packed compat_xfs_bstat_t;
typedef struct compat_xfs_fsop_bulkreq {
compat_uptr_t lastip; /* last inode # pointer */
__s32 icount; /* count of entries in buffer */
compat_uptr_t ubuffer; /* user buffer for inode desc. */
compat_uptr_t ocount; /* output count pointer */
} compat_xfs_fsop_bulkreq_t;
#define XFS_IOC_FSBULKSTAT_32 \
_IOWR('X', 101, struct compat_xfs_fsop_bulkreq)
#define XFS_IOC_FSBULKSTAT_SINGLE_32 \
_IOWR('X', 102, struct compat_xfs_fsop_bulkreq)
#define XFS_IOC_FSINUMBERS_32 \
_IOWR('X', 103, struct compat_xfs_fsop_bulkreq)
typedef struct compat_xfs_fsop_handlereq {
__u32 fd; /* fd for FD_TO_HANDLE */
compat_uptr_t path; /* user pathname */
__u32 oflags; /* open flags */
compat_uptr_t ihandle; /* user supplied handle */
__u32 ihandlen; /* user supplied length */
compat_uptr_t ohandle; /* user buffer for handle */
compat_uptr_t ohandlen; /* user buffer length */
} compat_xfs_fsop_handlereq_t;
#define XFS_IOC_PATH_TO_FSHANDLE_32 \
_IOWR('X', 104, struct compat_xfs_fsop_handlereq)
#define XFS_IOC_PATH_TO_HANDLE_32 \
_IOWR('X', 105, struct compat_xfs_fsop_handlereq)
#define XFS_IOC_FD_TO_HANDLE_32 \
_IOWR('X', 106, struct compat_xfs_fsop_handlereq)
#define XFS_IOC_OPEN_BY_HANDLE_32 \
_IOWR('X', 107, struct compat_xfs_fsop_handlereq)
#define XFS_IOC_READLINK_BY_HANDLE_32 \
_IOWR('X', 108, struct compat_xfs_fsop_handlereq)
/* The bstat field in the swapext struct needs translation */
typedef struct compat_xfs_swapext {
__int64_t sx_version; /* version */
__int64_t sx_fdtarget; /* fd of target file */
__int64_t sx_fdtmp; /* fd of tmp file */
xfs_off_t sx_offset; /* offset into file */
xfs_off_t sx_length; /* leng from offset */
char sx_pad[16]; /* pad space, unused */
compat_xfs_bstat_t sx_stat; /* stat of target b4 copy */
} __compat_packed compat_xfs_swapext_t;
#define XFS_IOC_SWAPEXT_32 _IOWR('X', 109, struct compat_xfs_swapext)
typedef struct compat_xfs_fsop_attrlist_handlereq {
struct compat_xfs_fsop_handlereq hreq; /* handle interface structure */
struct xfs_attrlist_cursor pos; /* opaque cookie, list offset */
__u32 flags; /* which namespace to use */
__u32 buflen; /* length of buffer supplied */
compat_uptr_t buffer; /* returned names */
} __compat_packed compat_xfs_fsop_attrlist_handlereq_t;
/* Note: actually this is read/write */
#define XFS_IOC_ATTRLIST_BY_HANDLE_32 \
_IOW('X', 122, struct compat_xfs_fsop_attrlist_handlereq)
/* am_opcodes defined in xfs_fs.h */
typedef struct compat_xfs_attr_multiop {
__u32 am_opcode;
__s32 am_error;
compat_uptr_t am_attrname;
compat_uptr_t am_attrvalue;
__u32 am_length;
__u32 am_flags;
} compat_xfs_attr_multiop_t;
typedef struct compat_xfs_fsop_attrmulti_handlereq {
struct compat_xfs_fsop_handlereq hreq; /* handle interface structure */
__u32 opcount;/* count of following multiop */
/* ptr to compat_xfs_attr_multiop */
compat_uptr_t ops; /* attr_multi data */
} compat_xfs_fsop_attrmulti_handlereq_t;
#define XFS_IOC_ATTRMULTI_BY_HANDLE_32 \
_IOW('X', 123, struct compat_xfs_fsop_attrmulti_handlereq)
typedef struct compat_xfs_fsop_setdm_handlereq {
struct compat_xfs_fsop_handlereq hreq; /* handle information */
/* ptr to struct fsdmidata */
compat_uptr_t data; /* DMAPI data */
} compat_xfs_fsop_setdm_handlereq_t;
#define XFS_IOC_FSSETDM_BY_HANDLE_32 \
_IOW('X', 121, struct compat_xfs_fsop_setdm_handlereq)
#ifdef BROKEN_X86_ALIGNMENT
/* on ia32 l_start is on a 32-bit boundary */
typedef struct compat_xfs_flock64 {
__s16 l_type;
__s16 l_whence;
__s64 l_start __attribute__((packed));
/* len == 0 means until end of file */
__s64 l_len __attribute__((packed));
__s32 l_sysid;
__u32 l_pid;
__s32 l_pad[4]; /* reserve area */
} compat_xfs_flock64_t;
#define XFS_IOC_ALLOCSP_32 _IOW('X', 10, struct compat_xfs_flock64)
#define XFS_IOC_FREESP_32 _IOW('X', 11, struct compat_xfs_flock64)
#define XFS_IOC_ALLOCSP64_32 _IOW('X', 36, struct compat_xfs_flock64)
#define XFS_IOC_FREESP64_32 _IOW('X', 37, struct compat_xfs_flock64)
#define XFS_IOC_RESVSP_32 _IOW('X', 40, struct compat_xfs_flock64)
#define XFS_IOC_UNRESVSP_32 _IOW('X', 41, struct compat_xfs_flock64)
#define XFS_IOC_RESVSP64_32 _IOW('X', 42, struct compat_xfs_flock64)
#define XFS_IOC_UNRESVSP64_32 _IOW('X', 43, struct compat_xfs_flock64)
typedef struct compat_xfs_fsop_geom_v1 {
__u32 blocksize; /* filesystem (data) block size */
__u32 rtextsize; /* realtime extent size */
__u32 agblocks; /* fsblocks in an AG */
__u32 agcount; /* number of allocation groups */
__u32 logblocks; /* fsblocks in the log */
__u32 sectsize; /* (data) sector size, bytes */
__u32 inodesize; /* inode size in bytes */
__u32 imaxpct; /* max allowed inode space(%) */
__u64 datablocks; /* fsblocks in data subvolume */
__u64 rtblocks; /* fsblocks in realtime subvol */
__u64 rtextents; /* rt extents in realtime subvol*/
__u64 logstart; /* starting fsblock of the log */
unsigned char uuid[16]; /* unique id of the filesystem */
__u32 sunit; /* stripe unit, fsblocks */
__u32 swidth; /* stripe width, fsblocks */
__s32 version; /* structure version */
__u32 flags; /* superblock version flags */
__u32 logsectsize; /* log sector size, bytes */
__u32 rtsectsize; /* realtime sector size, bytes */
__u32 dirblocksize; /* directory block size, bytes */
} __attribute__((packed)) compat_xfs_fsop_geom_v1_t;
#define XFS_IOC_FSGEOMETRY_V1_32 \
_IOR('X', 100, struct compat_xfs_fsop_geom_v1)
typedef struct compat_xfs_inogrp {
__u64 xi_startino; /* starting inode number */
__s32 xi_alloccount; /* # bits set in allocmask */
__u64 xi_allocmask; /* mask of allocated inodes */
} __attribute__((packed)) compat_xfs_inogrp_t;
/* These growfs input structures have padding on the end, so must translate */
typedef struct compat_xfs_growfs_data {
__u64 newblocks; /* new data subvol size, fsblocks */
__u32 imaxpct; /* new inode space percentage limit */
} __attribute__((packed)) compat_xfs_growfs_data_t;
typedef struct compat_xfs_growfs_rt {
__u64 newblocks; /* new realtime size, fsblocks */
__u32 extsize; /* new realtime extent size, fsblocks */
} __attribute__((packed)) compat_xfs_growfs_rt_t;
#define XFS_IOC_FSGROWFSDATA_32 _IOW('X', 110, struct compat_xfs_growfs_data)
#define XFS_IOC_FSGROWFSRT_32 _IOW('X', 112, struct compat_xfs_growfs_rt)
#endif /* BROKEN_X86_ALIGNMENT */
#endif /* __XFS_IOCTL32_H__ */ #endif /* __XFS_IOCTL32_H__ */
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
#include <linux/namei.h> #include <linux/namei.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/falloc.h> #include <linux/falloc.h>
#include <linux/fiemap.h>
/* /*
* Bring the atime in the XFS inode uptodate. * Bring the atime in the XFS inode uptodate.
...@@ -64,14 +65,14 @@ xfs_synchronize_atime( ...@@ -64,14 +65,14 @@ xfs_synchronize_atime(
{ {
struct inode *inode = VFS_I(ip); struct inode *inode = VFS_I(ip);
if (inode) { if (!(inode->i_state & I_CLEAR)) {
ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec; ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
} }
} }
/* /*
* If the linux inode exists, mark it dirty. * If the linux inode is valid, mark it dirty.
* Used when commiting a dirty inode into a transaction so that * Used when commiting a dirty inode into a transaction so that
* the inode will get written back by the linux code * the inode will get written back by the linux code
*/ */
...@@ -81,7 +82,7 @@ xfs_mark_inode_dirty_sync( ...@@ -81,7 +82,7 @@ xfs_mark_inode_dirty_sync(
{ {
struct inode *inode = VFS_I(ip); struct inode *inode = VFS_I(ip);
if (inode) if (!(inode->i_state & (I_WILL_FREE|I_FREEING|I_CLEAR)))
mark_inode_dirty_sync(inode); mark_inode_dirty_sync(inode);
} }
...@@ -128,7 +129,7 @@ xfs_ichgtime( ...@@ -128,7 +129,7 @@ xfs_ichgtime(
if (sync_it) { if (sync_it) {
SYNCHRONIZE(); SYNCHRONIZE();
ip->i_update_core = 1; ip->i_update_core = 1;
mark_inode_dirty_sync(inode); xfs_mark_inode_dirty_sync(ip);
} }
} }
...@@ -158,8 +159,6 @@ xfs_init_security( ...@@ -158,8 +159,6 @@ xfs_init_security(
} }
error = xfs_attr_set(ip, name, value, length, ATTR_SECURE); error = xfs_attr_set(ip, name, value, length, ATTR_SECURE);
if (!error)
xfs_iflags_set(ip, XFS_IMODIFIED);
kfree(name); kfree(name);
kfree(value); kfree(value);
...@@ -260,7 +259,6 @@ xfs_vn_mknod( ...@@ -260,7 +259,6 @@ xfs_vn_mknod(
error = _ACL_INHERIT(inode, mode, default_acl); error = _ACL_INHERIT(inode, mode, default_acl);
if (unlikely(error)) if (unlikely(error))
goto out_cleanup_inode; goto out_cleanup_inode;
xfs_iflags_set(ip, XFS_IMODIFIED);
_ACL_FREE(default_acl); _ACL_FREE(default_acl);
} }
...@@ -366,21 +364,17 @@ xfs_vn_link( ...@@ -366,21 +364,17 @@ xfs_vn_link(
struct inode *dir, struct inode *dir,
struct dentry *dentry) struct dentry *dentry)
{ {
struct inode *inode; /* inode of guy being linked to */ struct inode *inode = old_dentry->d_inode;
struct xfs_name name; struct xfs_name name;
int error; int error;
inode = old_dentry->d_inode;
xfs_dentry_to_name(&name, dentry); xfs_dentry_to_name(&name, dentry);
igrab(inode);
error = xfs_link(XFS_I(dir), XFS_I(inode), &name); error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
if (unlikely(error)) { if (unlikely(error))
iput(inode);
return -error; return -error;
}
xfs_iflags_set(XFS_I(dir), XFS_IMODIFIED); atomic_inc(&inode->i_count);
d_instantiate(dentry, inode); d_instantiate(dentry, inode);
return 0; return 0;
} }
...@@ -601,7 +595,7 @@ xfs_vn_setattr( ...@@ -601,7 +595,7 @@ xfs_vn_setattr(
struct dentry *dentry, struct dentry *dentry,
struct iattr *iattr) struct iattr *iattr)
{ {
return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0, NULL); return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0);
} }
/* /*
...@@ -642,7 +636,7 @@ xfs_vn_fallocate( ...@@ -642,7 +636,7 @@ xfs_vn_fallocate(
xfs_ilock(ip, XFS_IOLOCK_EXCL); xfs_ilock(ip, XFS_IOLOCK_EXCL);
error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
0, NULL, XFS_ATTR_NOLOCK); 0, XFS_ATTR_NOLOCK);
if (!error && !(mode & FALLOC_FL_KEEP_SIZE) && if (!error && !(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) offset + len > i_size_read(inode))
new_size = offset + len; new_size = offset + len;
...@@ -653,7 +647,7 @@ xfs_vn_fallocate( ...@@ -653,7 +647,7 @@ xfs_vn_fallocate(
iattr.ia_valid = ATTR_SIZE; iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = new_size; iattr.ia_size = new_size;
error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK, NULL); error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
} }
xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_iunlock(ip, XFS_IOLOCK_EXCL);
...@@ -661,6 +655,88 @@ xfs_vn_fallocate( ...@@ -661,6 +655,88 @@ xfs_vn_fallocate(
return error; return error;
} }
#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
/*
* Call fiemap helper to fill in user data.
* Returns positive errors to xfs_getbmap.
*/
STATIC int
xfs_fiemap_format(
void **arg,
struct getbmapx *bmv,
int *full)
{
int error;
struct fiemap_extent_info *fieinfo = *arg;
u32 fiemap_flags = 0;
u64 logical, physical, length;
/* Do nothing for a hole */
if (bmv->bmv_block == -1LL)
return 0;
logical = BBTOB(bmv->bmv_offset);
physical = BBTOB(bmv->bmv_block);
length = BBTOB(bmv->bmv_length);
if (bmv->bmv_oflags & BMV_OF_PREALLOC)
fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN;
else if (bmv->bmv_oflags & BMV_OF_DELALLOC) {
fiemap_flags |= FIEMAP_EXTENT_DELALLOC;
physical = 0; /* no block yet */
}
if (bmv->bmv_oflags & BMV_OF_LAST)
fiemap_flags |= FIEMAP_EXTENT_LAST;
error = fiemap_fill_next_extent(fieinfo, logical, physical,
length, fiemap_flags);
if (error > 0) {
error = 0;
*full = 1; /* user array now full */
}
return -error;
}
STATIC int
xfs_vn_fiemap(
struct inode *inode,
struct fiemap_extent_info *fieinfo,
u64 start,
u64 length)
{
xfs_inode_t *ip = XFS_I(inode);
struct getbmapx bm;
int error;
error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS);
if (error)
return error;
/* Set up bmap header for xfs internal routine */
bm.bmv_offset = BTOBB(start);
/* Special case for whole file */
if (length == FIEMAP_MAX_OFFSET)
bm.bmv_length = -1LL;
else
bm.bmv_length = BTOBB(length);
/* our formatter will tell xfs_getbmap when to stop. */
bm.bmv_count = MAXEXTNUM;
bm.bmv_iflags = BMV_IF_PREALLOC;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
bm.bmv_iflags |= BMV_IF_ATTRFORK;
if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC))
bm.bmv_iflags |= BMV_IF_DELALLOC;
error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo);
if (error)
return -error;
return 0;
}
static const struct inode_operations xfs_inode_operations = { static const struct inode_operations xfs_inode_operations = {
.permission = xfs_vn_permission, .permission = xfs_vn_permission,
.truncate = xfs_vn_truncate, .truncate = xfs_vn_truncate,
...@@ -671,6 +747,7 @@ static const struct inode_operations xfs_inode_operations = { ...@@ -671,6 +747,7 @@ static const struct inode_operations xfs_inode_operations = {
.removexattr = generic_removexattr, .removexattr = generic_removexattr,
.listxattr = xfs_vn_listxattr, .listxattr = xfs_vn_listxattr,
.fallocate = xfs_vn_fallocate, .fallocate = xfs_vn_fallocate,
.fiemap = xfs_vn_fiemap,
}; };
static const struct inode_operations xfs_dir_inode_operations = { static const struct inode_operations xfs_dir_inode_operations = {
...@@ -766,12 +843,20 @@ xfs_diflags_to_iflags( ...@@ -766,12 +843,20 @@ xfs_diflags_to_iflags(
* When reading existing inodes from disk this is called directly * When reading existing inodes from disk this is called directly
* from xfs_iget, when creating a new inode it is called from * from xfs_iget, when creating a new inode it is called from
* xfs_ialloc after setting up the inode. * xfs_ialloc after setting up the inode.
*
* We are always called with an uninitialised linux inode here.
* We need to initialise the necessary fields and take a reference
* on it.
*/ */
void void
xfs_setup_inode( xfs_setup_inode(
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
struct inode *inode = ip->i_vnode; struct inode *inode = &ip->i_vnode;
inode->i_ino = ip->i_ino;
inode->i_state = I_NEW|I_LOCK;
inode_add_to_lists(ip->i_mount->m_super, inode);
inode->i_mode = ip->i_d.di_mode; inode->i_mode = ip->i_d.di_mode;
inode->i_nlink = ip->i_d.di_nlink; inode->i_nlink = ip->i_d.di_nlink;
...@@ -799,7 +884,6 @@ xfs_setup_inode( ...@@ -799,7 +884,6 @@ xfs_setup_inode(
inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
xfs_diflags_to_iflags(inode, ip); xfs_diflags_to_iflags(inode, ip);
xfs_iflags_clear(ip, XFS_IMODIFIED);
switch (inode->i_mode & S_IFMT) { switch (inode->i_mode & S_IFMT) {
case S_IFREG: case S_IFREG:
......
...@@ -22,7 +22,6 @@ struct xfs_inode; ...@@ -22,7 +22,6 @@ struct xfs_inode;
extern const struct file_operations xfs_file_operations; extern const struct file_operations xfs_file_operations;
extern const struct file_operations xfs_dir_file_operations; extern const struct file_operations xfs_dir_file_operations;
extern const struct file_operations xfs_invis_file_operations;
extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size); extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size);
......
...@@ -21,18 +21,12 @@ ...@@ -21,18 +21,12 @@
#include <linux/types.h> #include <linux/types.h>
/* /*
* Some types are conditional depending on the target system.
* XFS_BIG_BLKNOS needs block layer disk addresses to be 64 bits. * XFS_BIG_BLKNOS needs block layer disk addresses to be 64 bits.
* XFS_BIG_INUMS needs the VFS inode number to be 64 bits, as well * XFS_BIG_INUMS requires XFS_BIG_BLKNOS to be set.
* as requiring XFS_BIG_BLKNOS to be set.
*/ */
#if defined(CONFIG_LBD) || (BITS_PER_LONG == 64) #if defined(CONFIG_LBD) || (BITS_PER_LONG == 64)
# define XFS_BIG_BLKNOS 1 # define XFS_BIG_BLKNOS 1
# if BITS_PER_LONG == 64
# define XFS_BIG_INUMS 1 # define XFS_BIG_INUMS 1
# else
# define XFS_BIG_INUMS 0
# endif
#else #else
# define XFS_BIG_BLKNOS 0 # define XFS_BIG_BLKNOS 0
# define XFS_BIG_INUMS 0 # define XFS_BIG_INUMS 0
...@@ -77,6 +71,7 @@ ...@@ -77,6 +71,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/writeback.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/div64.h> #include <asm/div64.h>
...@@ -85,7 +80,6 @@ ...@@ -85,7 +80,6 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <xfs_vfs.h>
#include <xfs_cred.h> #include <xfs_cred.h>
#include <xfs_vnode.h> #include <xfs_vnode.h>
#include <xfs_stats.h> #include <xfs_stats.h>
...@@ -107,7 +101,6 @@ ...@@ -107,7 +101,6 @@
#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ #undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
#endif #endif
#define restricted_chown xfs_params.restrict_chown.val
#define irix_sgid_inherit xfs_params.sgid_inherit.val #define irix_sgid_inherit xfs_params.sgid_inherit.val
#define irix_symlink_mode xfs_params.symlink_mode.val #define irix_symlink_mode xfs_params.symlink_mode.val
#define xfs_panic_mask xfs_params.panic_mask.val #define xfs_panic_mask xfs_params.panic_mask.val
......
...@@ -51,7 +51,6 @@ ...@@ -51,7 +51,6 @@
#include "xfs_vnodeops.h" #include "xfs_vnodeops.h"
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/mount.h>
#include <linux/writeback.h> #include <linux/writeback.h>
...@@ -243,7 +242,7 @@ xfs_read( ...@@ -243,7 +242,7 @@ xfs_read(
if (unlikely(ioflags & IO_ISDIRECT)) { if (unlikely(ioflags & IO_ISDIRECT)) {
if (inode->i_mapping->nrpages) if (inode->i_mapping->nrpages)
ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK), ret = -xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
-1, FI_REMAPF_LOCKED); -1, FI_REMAPF_LOCKED);
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
if (ret) { if (ret) {
...@@ -668,15 +667,8 @@ xfs_write( ...@@ -668,15 +667,8 @@ xfs_write(
if (new_size > xip->i_size) if (new_size > xip->i_size)
xip->i_new_size = new_size; xip->i_new_size = new_size;
/* if (likely(!(ioflags & IO_INVIS)))
* We're not supposed to change timestamps in readonly-mounted
* filesystems. Throw it away if anyone asks us.
*/
if (likely(!(ioflags & IO_INVIS) &&
!mnt_want_write(file->f_path.mnt))) {
xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
mnt_drop_write(file->f_path.mnt);
}
/* /*
* If the offset is beyond the size of the file, we have a couple * If the offset is beyond the size of the file, we have a couple
...@@ -715,7 +707,6 @@ xfs_write( ...@@ -715,7 +707,6 @@ xfs_write(
} }
} }
retry:
/* We can write back this queue in page reclaim */ /* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info; current->backing_dev_info = mapping->backing_dev_info;
...@@ -771,6 +762,17 @@ xfs_write( ...@@ -771,6 +762,17 @@ xfs_write(
if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO)) if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
ret = wait_on_sync_kiocb(iocb); ret = wait_on_sync_kiocb(iocb);
isize = i_size_read(inode);
if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
*offset = isize;
if (*offset > xip->i_size) {
xfs_ilock(xip, XFS_ILOCK_EXCL);
if (*offset > xip->i_size)
xip->i_size = *offset;
xfs_iunlock(xip, XFS_ILOCK_EXCL);
}
if (ret == -ENOSPC && if (ret == -ENOSPC &&
DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) { DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
xfs_iunlock(xip, iolock); xfs_iunlock(xip, iolock);
...@@ -784,20 +786,7 @@ xfs_write( ...@@ -784,20 +786,7 @@ xfs_write(
xfs_ilock(xip, iolock); xfs_ilock(xip, iolock);
if (error) if (error)
goto out_unlock_internal; goto out_unlock_internal;
pos = xip->i_size; goto start;
ret = 0;
goto retry;
}
isize = i_size_read(inode);
if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
*offset = isize;
if (*offset > xip->i_size) {
xfs_ilock(xip, XFS_ILOCK_EXCL);
if (*offset > xip->i_size)
xip->i_size = *offset;
xfs_iunlock(xip, XFS_ILOCK_EXCL);
} }
error = -ret; error = -ret;
...@@ -855,13 +844,7 @@ xfs_write( ...@@ -855,13 +844,7 @@ xfs_write(
int int
xfs_bdstrat_cb(struct xfs_buf *bp) xfs_bdstrat_cb(struct xfs_buf *bp)
{ {
xfs_mount_t *mp; if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
if (!XFS_FORCED_SHUTDOWN(mp)) {
xfs_buf_iorequest(bp);
return 0;
} else {
xfs_buftrace("XFS__BDSTRAT IOERROR", bp); xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
/* /*
* Metadata write that didn't get logged but * Metadata write that didn't get logged but
...@@ -874,6 +857,9 @@ xfs_bdstrat_cb(struct xfs_buf *bp) ...@@ -874,6 +857,9 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
else else
return (xfs_bioerror(bp)); return (xfs_bioerror(bp));
} }
xfs_buf_iorequest(bp);
return 0;
} }
/* /*
......
...@@ -53,11 +53,15 @@ xfs_read_xfsstats( ...@@ -53,11 +53,15 @@ xfs_read_xfsstats(
{ "icluster", XFSSTAT_END_INODE_CLUSTER }, { "icluster", XFSSTAT_END_INODE_CLUSTER },
{ "vnodes", XFSSTAT_END_VNODE_OPS }, { "vnodes", XFSSTAT_END_VNODE_OPS },
{ "buf", XFSSTAT_END_BUF }, { "buf", XFSSTAT_END_BUF },
{ "abtb2", XFSSTAT_END_ABTB_V2 },
{ "abtc2", XFSSTAT_END_ABTC_V2 },
{ "bmbt2", XFSSTAT_END_BMBT_V2 },
{ "ibt2", XFSSTAT_END_IBT_V2 },
}; };
/* Loop over all stats groups */ /* Loop over all stats groups */
for (i=j=len = 0; i < ARRAY_SIZE(xstats); i++) { for (i=j=len = 0; i < ARRAY_SIZE(xstats); i++) {
len += sprintf(buffer + len, xstats[i].desc); len += sprintf(buffer + len, "%s", xstats[i].desc);
/* inner loop does each group */ /* inner loop does each group */
while (j < xstats[i].endpoint) { while (j < xstats[i].endpoint) {
val = 0; val = 0;
......
...@@ -118,6 +118,71 @@ struct xfsstats { ...@@ -118,6 +118,71 @@ struct xfsstats {
__uint32_t xb_page_retries; __uint32_t xb_page_retries;
__uint32_t xb_page_found; __uint32_t xb_page_found;
__uint32_t xb_get_read; __uint32_t xb_get_read;
/* Version 2 btree counters */
#define XFSSTAT_END_ABTB_V2 (XFSSTAT_END_BUF+15)
__uint32_t xs_abtb_2_lookup;
__uint32_t xs_abtb_2_compare;
__uint32_t xs_abtb_2_insrec;
__uint32_t xs_abtb_2_delrec;
__uint32_t xs_abtb_2_newroot;
__uint32_t xs_abtb_2_killroot;
__uint32_t xs_abtb_2_increment;
__uint32_t xs_abtb_2_decrement;
__uint32_t xs_abtb_2_lshift;
__uint32_t xs_abtb_2_rshift;
__uint32_t xs_abtb_2_split;
__uint32_t xs_abtb_2_join;
__uint32_t xs_abtb_2_alloc;
__uint32_t xs_abtb_2_free;
__uint32_t xs_abtb_2_moves;
#define XFSSTAT_END_ABTC_V2 (XFSSTAT_END_ABTB_V2+15)
__uint32_t xs_abtc_2_lookup;
__uint32_t xs_abtc_2_compare;
__uint32_t xs_abtc_2_insrec;
__uint32_t xs_abtc_2_delrec;
__uint32_t xs_abtc_2_newroot;
__uint32_t xs_abtc_2_killroot;
__uint32_t xs_abtc_2_increment;
__uint32_t xs_abtc_2_decrement;
__uint32_t xs_abtc_2_lshift;
__uint32_t xs_abtc_2_rshift;
__uint32_t xs_abtc_2_split;
__uint32_t xs_abtc_2_join;
__uint32_t xs_abtc_2_alloc;
__uint32_t xs_abtc_2_free;
__uint32_t xs_abtc_2_moves;
#define XFSSTAT_END_BMBT_V2 (XFSSTAT_END_ABTC_V2+15)
__uint32_t xs_bmbt_2_lookup;
__uint32_t xs_bmbt_2_compare;
__uint32_t xs_bmbt_2_insrec;
__uint32_t xs_bmbt_2_delrec;
__uint32_t xs_bmbt_2_newroot;
__uint32_t xs_bmbt_2_killroot;
__uint32_t xs_bmbt_2_increment;
__uint32_t xs_bmbt_2_decrement;
__uint32_t xs_bmbt_2_lshift;
__uint32_t xs_bmbt_2_rshift;
__uint32_t xs_bmbt_2_split;
__uint32_t xs_bmbt_2_join;
__uint32_t xs_bmbt_2_alloc;
__uint32_t xs_bmbt_2_free;
__uint32_t xs_bmbt_2_moves;
#define XFSSTAT_END_IBT_V2 (XFSSTAT_END_BMBT_V2+15)
__uint32_t xs_ibt_2_lookup;
__uint32_t xs_ibt_2_compare;
__uint32_t xs_ibt_2_insrec;
__uint32_t xs_ibt_2_delrec;
__uint32_t xs_ibt_2_newroot;
__uint32_t xs_ibt_2_killroot;
__uint32_t xs_ibt_2_increment;
__uint32_t xs_ibt_2_decrement;
__uint32_t xs_ibt_2_lshift;
__uint32_t xs_ibt_2_rshift;
__uint32_t xs_ibt_2_split;
__uint32_t xs_ibt_2_join;
__uint32_t xs_ibt_2_alloc;
__uint32_t xs_ibt_2_free;
__uint32_t xs_ibt_2_moves;
/* Extra precision counters */ /* Extra precision counters */
__uint64_t xs_xstrat_bytes; __uint64_t xs_xstrat_bytes;
__uint64_t xs_write_bytes; __uint64_t xs_write_bytes;
......
此差异已折叠。
...@@ -20,24 +20,12 @@ ...@@ -20,24 +20,12 @@
#include <linux/exportfs.h> #include <linux/exportfs.h>
#ifdef CONFIG_XFS_DMAPI
# define vfs_insertdmapi(vfs) vfs_insertops(vfsp, &xfs_dmops)
# define vfs_initdmapi() dmapi_init()
# define vfs_exitdmapi() dmapi_uninit()
#else
# define vfs_insertdmapi(vfs) do { } while (0)
# define vfs_initdmapi() do { } while (0)
# define vfs_exitdmapi() do { } while (0)
#endif
#ifdef CONFIG_XFS_QUOTA #ifdef CONFIG_XFS_QUOTA
# define vfs_insertquota(vfs) vfs_insertops(vfsp, &xfs_qmops)
extern void xfs_qm_init(void); extern void xfs_qm_init(void);
extern void xfs_qm_exit(void); extern void xfs_qm_exit(void);
# define vfs_initquota() xfs_qm_init() # define vfs_initquota() xfs_qm_init()
# define vfs_exitquota() xfs_qm_exit() # define vfs_exitquota() xfs_qm_exit()
#else #else
# define vfs_insertquota(vfs) do { } while (0)
# define vfs_initquota() do { } while (0) # define vfs_initquota() do { } while (0)
# define vfs_exitquota() do { } while (0) # define vfs_exitquota() do { } while (0)
#endif #endif
...@@ -101,9 +89,6 @@ struct block_device; ...@@ -101,9 +89,6 @@ struct block_device;
extern __uint64_t xfs_max_file_offset(unsigned int); extern __uint64_t xfs_max_file_offset(unsigned int);
extern void xfs_flush_inode(struct xfs_inode *);
extern void xfs_flush_device(struct xfs_inode *);
extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
extern const struct export_operations xfs_export_operations; extern const struct export_operations xfs_export_operations;
......
此差异已折叠。
...@@ -15,23 +15,10 @@ ...@@ -15,23 +15,10 @@
* along with this program; if not, write the Free Software Foundation, * along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#ifndef __XFS_VFS_H__ #ifndef XFS_SYNC_H
#define __XFS_VFS_H__ #define XFS_SYNC_H 1
#include <linux/vfs.h>
#include "xfs_fs.h"
struct inode;
struct fid;
struct cred;
struct seq_file;
struct super_block;
struct xfs_inode;
struct xfs_mount; struct xfs_mount;
struct xfs_mount_args;
typedef struct kstatfs bhv_statvfs_t;
typedef struct bhv_vfs_sync_work { typedef struct bhv_vfs_sync_work {
struct list_head w_list; struct list_head w_list;
...@@ -41,37 +28,28 @@ typedef struct bhv_vfs_sync_work { ...@@ -41,37 +28,28 @@ typedef struct bhv_vfs_sync_work {
} bhv_vfs_sync_work_t; } bhv_vfs_sync_work_t;
#define SYNC_ATTR 0x0001 /* sync attributes */ #define SYNC_ATTR 0x0001 /* sync attributes */
#define SYNC_CLOSE 0x0002 /* close file system down */ #define SYNC_DELWRI 0x0002 /* look at delayed writes */
#define SYNC_DELWRI 0x0004 /* look at delayed writes */ #define SYNC_WAIT 0x0004 /* wait for i/o to complete */
#define SYNC_WAIT 0x0008 /* wait for i/o to complete */ #define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */
#define SYNC_BDFLUSH 0x0010 /* BDFLUSH is calling -- don't block */ #define SYNC_IOWAIT 0x0010 /* wait for all I/O to complete */
#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */
#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */
#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */
#define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */
/* int xfs_syncd_init(struct xfs_mount *mp);
* When remounting a filesystem read-only or freezing the filesystem, void xfs_syncd_stop(struct xfs_mount *mp);
* we have two phases to execute. This first phase is syncing the data
* before we quiesce the fielsystem, and the second is flushing all the int xfs_sync_inodes(struct xfs_mount *mp, int flags);
* inodes out after we've waited for all the transactions created by int xfs_sync_fsdata(struct xfs_mount *mp, int flags);
* the first phase to complete. The second phase uses SYNC_INODE_QUIESCE
* to ensure that the inodes are written to their location on disk int xfs_quiesce_data(struct xfs_mount *mp);
* rather than just existing in transactions in the log. This means void xfs_quiesce_attr(struct xfs_mount *mp);
* after a quiesce there is no log replay required to write the inodes
* to disk (this is the main difference between a sync and a quiesce).
*/
#define SYNC_DATA_QUIESCE (SYNC_DELWRI|SYNC_FSDATA|SYNC_WAIT|SYNC_IOWAIT)
#define SYNC_INODE_QUIESCE (SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT)
#define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ void xfs_flush_inode(struct xfs_inode *ip);
#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ void xfs_flush_device(struct xfs_inode *ip);
#define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */
#define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */
#define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */
#define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */
#define xfs_test_for_freeze(mp) ((mp)->m_super->s_frozen) int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
#define xfs_wait_for_freeze(mp,l) vfs_check_frozen((mp)->m_super, (l)) int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode);
#endif /* __XFS_VFS_H__ */ void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip);
void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
struct xfs_inode *ip);
#endif
...@@ -55,17 +55,6 @@ xfs_stats_clear_proc_handler( ...@@ -55,17 +55,6 @@ xfs_stats_clear_proc_handler(
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
static ctl_table xfs_table[] = { static ctl_table xfs_table[] = {
{
.ctl_name = XFS_RESTRICT_CHOWN,
.procname = "restrict_chown",
.data = &xfs_params.restrict_chown.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec_minmax,
.strategy = &sysctl_intvec,
.extra1 = &xfs_params.restrict_chown.min,
.extra2 = &xfs_params.restrict_chown.max
},
{ {
.ctl_name = XFS_SGID_INHERIT, .ctl_name = XFS_SGID_INHERIT,
.procname = "irix_sgid_inherit", .procname = "irix_sgid_inherit",
......
...@@ -31,7 +31,6 @@ typedef struct xfs_sysctl_val { ...@@ -31,7 +31,6 @@ typedef struct xfs_sysctl_val {
} xfs_sysctl_val_t; } xfs_sysctl_val_t;
typedef struct xfs_param { typedef struct xfs_param {
xfs_sysctl_val_t restrict_chown;/* Root/non-root can give away files.*/
xfs_sysctl_val_t sgid_inherit; /* Inherit S_ISGID if process' GID is xfs_sysctl_val_t sgid_inherit; /* Inherit S_ISGID if process' GID is
* not a member of parent dir GID. */ * not a member of parent dir GID. */
xfs_sysctl_val_t symlink_mode; /* Link creat mode affected by umask */ xfs_sysctl_val_t symlink_mode; /* Link creat mode affected by umask */
...@@ -68,7 +67,7 @@ typedef struct xfs_param { ...@@ -68,7 +67,7 @@ typedef struct xfs_param {
enum { enum {
/* XFS_REFCACHE_SIZE = 1 */ /* XFS_REFCACHE_SIZE = 1 */
/* XFS_REFCACHE_PURGE = 2 */ /* XFS_REFCACHE_PURGE = 2 */
XFS_RESTRICT_CHOWN = 3, /* XFS_RESTRICT_CHOWN = 3 */
XFS_SGID_INHERIT = 4, XFS_SGID_INHERIT = 4,
XFS_SYMLINK_MODE = 5, XFS_SYMLINK_MODE = 5,
XFS_PANIC_MASK = 6, XFS_PANIC_MASK = 6,
......
/*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_vnodeops.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
/*
* And this gunk is needed for xfs_mount.h"
*/
#include "xfs_log.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_dmapi.h"
#include "xfs_inum.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
/*
* Dedicated vnode inactive/reclaim sync wait queues.
* Prime number of hash buckets since address is used as the key.
*/
#define NVSYNC 37
#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC])
static wait_queue_head_t vsync[NVSYNC];
void __init
vn_init(void)
{
int i;
for (i = 0; i < NVSYNC; i++)
init_waitqueue_head(&vsync[i]);
}
void
vn_iowait(
xfs_inode_t *ip)
{
wait_queue_head_t *wq = vptosync(ip);
wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
}
void
vn_iowake(
xfs_inode_t *ip)
{
if (atomic_dec_and_test(&ip->i_iocount))
wake_up(vptosync(ip));
}
/*
* Volume managers supporting multiple paths can send back ENODEV when the
* final path disappears. In this case continuing to fill the page cache
* with dirty data which cannot be written out is evil, so prevent that.
*/
void
vn_ioerror(
xfs_inode_t *ip,
int error,
char *f,
int l)
{
if (unlikely(error == -ENODEV))
xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l);
}
#ifdef XFS_INODE_TRACE
/*
* Reference count of Linux inode if present, -1 if the xfs_inode
* has no associated Linux inode.
*/
static inline int xfs_icount(struct xfs_inode *ip)
{
struct inode *vp = VFS_I(ip);
if (vp)
return vn_count(vp);
return -1;
}
#define KTRACE_ENTER(ip, vk, s, line, ra) \
ktrace_enter( (ip)->i_trace, \
/* 0 */ (void *)(__psint_t)(vk), \
/* 1 */ (void *)(s), \
/* 2 */ (void *)(__psint_t) line, \
/* 3 */ (void *)(__psint_t)xfs_icount(ip), \
/* 4 */ (void *)(ra), \
/* 5 */ NULL, \
/* 6 */ (void *)(__psint_t)current_cpu(), \
/* 7 */ (void *)(__psint_t)current_pid(), \
/* 8 */ (void *)__return_address, \
/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
/*
* Vnode tracing code.
*/
void
_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
{
KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
}
void
_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
{
KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
}
void
xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
{
KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
}
void
_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
{
KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
}
void
xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
{
KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
}
#endif /* XFS_INODE_TRACE */
...@@ -18,7 +18,10 @@ ...@@ -18,7 +18,10 @@
#ifndef __XFS_VNODE_H__ #ifndef __XFS_VNODE_H__
#define __XFS_VNODE_H__ #define __XFS_VNODE_H__
#include "xfs_fs.h"
struct file; struct file;
struct xfs_inode;
struct xfs_iomap; struct xfs_iomap;
struct attrlist_cursor_kern; struct attrlist_cursor_kern;
...@@ -51,40 +54,6 @@ struct attrlist_cursor_kern; ...@@ -51,40 +54,6 @@ struct attrlist_cursor_kern;
Prevent VM access to the pages until Prevent VM access to the pages until
the operation completes. */ the operation completes. */
extern void vn_init(void);
/*
* Yeah, these don't take vnode anymore at all, all this should be
* cleaned up at some point.
*/
extern void vn_iowait(struct xfs_inode *ip);
extern void vn_iowake(struct xfs_inode *ip);
extern void vn_ioerror(struct xfs_inode *ip, int error, char *f, int l);
static inline int vn_count(struct inode *vp)
{
return atomic_read(&vp->i_count);
}
#define IHOLD(ip) \
do { \
ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
atomic_inc(&(VFS_I(ip)->i_count)); \
xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
} while (0)
#define IRELE(ip) \
do { \
xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
iput(VFS_I(ip)); \
} while (0)
static inline struct inode *vn_grab(struct inode *vp)
{
return igrab(vp);
}
/* /*
* Dealing with bad inodes * Dealing with bad inodes
*/ */
...@@ -121,39 +90,4 @@ static inline void vn_atime_to_time_t(struct inode *vp, time_t *tt) ...@@ -121,39 +90,4 @@ static inline void vn_atime_to_time_t(struct inode *vp, time_t *tt)
PAGECACHE_TAG_DIRTY) PAGECACHE_TAG_DIRTY)
/*
* Tracking vnode activity.
*/
#if defined(XFS_INODE_TRACE)
#define INODE_TRACE_SIZE 16 /* number of trace entries */
#define INODE_KTRACE_ENTRY 1
#define INODE_KTRACE_EXIT 2
#define INODE_KTRACE_HOLD 3
#define INODE_KTRACE_REF 4
#define INODE_KTRACE_RELE 5
extern void _xfs_itrace_entry(struct xfs_inode *, const char *, inst_t *);
extern void _xfs_itrace_exit(struct xfs_inode *, const char *, inst_t *);
extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *);
extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *);
extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *);
#define xfs_itrace_entry(ip) \
_xfs_itrace_entry(ip, __func__, (inst_t *)__return_address)
#define xfs_itrace_exit(ip) \
_xfs_itrace_exit(ip, __func__, (inst_t *)__return_address)
#define xfs_itrace_exit_tag(ip, tag) \
_xfs_itrace_exit(ip, tag, (inst_t *)__return_address)
#define xfs_itrace_ref(ip) \
_xfs_itrace_ref(ip, __FILE__, __LINE__, (inst_t *)__return_address)
#else
#define xfs_itrace_entry(a)
#define xfs_itrace_exit(a)
#define xfs_itrace_exit_tag(a, b)
#define xfs_itrace_hold(a, b, c, d)
#define xfs_itrace_ref(a)
#define xfs_itrace_rele(a, b, c, d)
#endif
#endif /* __XFS_VNODE_H__ */ #endif /* __XFS_VNODE_H__ */
...@@ -101,7 +101,7 @@ xfs_qm_dqinit( ...@@ -101,7 +101,7 @@ xfs_qm_dqinit(
if (brandnewdquot) { if (brandnewdquot) {
dqp->dq_flnext = dqp->dq_flprev = dqp; dqp->dq_flnext = dqp->dq_flprev = dqp;
mutex_init(&dqp->q_qlock); mutex_init(&dqp->q_qlock);
sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq"); init_waitqueue_head(&dqp->q_pinwait);
/* /*
* Because we want to use a counting completion, complete * Because we want to use a counting completion, complete
...@@ -131,7 +131,7 @@ xfs_qm_dqinit( ...@@ -131,7 +131,7 @@ xfs_qm_dqinit(
dqp->q_res_bcount = 0; dqp->q_res_bcount = 0;
dqp->q_res_icount = 0; dqp->q_res_icount = 0;
dqp->q_res_rtbcount = 0; dqp->q_res_rtbcount = 0;
dqp->q_pincount = 0; atomic_set(&dqp->q_pincount, 0);
dqp->q_hash = NULL; dqp->q_hash = NULL;
ASSERT(dqp->dq_flnext == dqp->dq_flprev); ASSERT(dqp->dq_flnext == dqp->dq_flprev);
...@@ -1221,16 +1221,14 @@ xfs_qm_dqflush( ...@@ -1221,16 +1221,14 @@ xfs_qm_dqflush(
xfs_dqtrace_entry(dqp, "DQFLUSH"); xfs_dqtrace_entry(dqp, "DQFLUSH");
/* /*
* If not dirty, nada. * If not dirty, or it's pinned and we are not supposed to
* block, nada.
*/ */
if (!XFS_DQ_IS_DIRTY(dqp)) { if (!XFS_DQ_IS_DIRTY(dqp) ||
(!(flags & XFS_QMOPT_SYNC) && atomic_read(&dqp->q_pincount) > 0)) {
xfs_dqfunlock(dqp); xfs_dqfunlock(dqp);
return (0); return 0;
} }
/*
* Cant flush a pinned dquot. Wait for it.
*/
xfs_qm_dqunpin_wait(dqp); xfs_qm_dqunpin_wait(dqp);
/* /*
...@@ -1274,10 +1272,8 @@ xfs_qm_dqflush( ...@@ -1274,10 +1272,8 @@ xfs_qm_dqflush(
dqp->dq_flags &= ~(XFS_DQ_DIRTY); dqp->dq_flags &= ~(XFS_DQ_DIRTY);
mp = dqp->q_mount; mp = dqp->q_mount;
/* lsn is 64 bits */ xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
spin_lock(&mp->m_ail_lock); &dqp->q_logitem.qli_item.li_lsn);
dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn;
spin_unlock(&mp->m_ail_lock);
/* /*
* Attach an iodone routine so that we can remove this dquot from the * Attach an iodone routine so that we can remove this dquot from the
...@@ -1323,8 +1319,10 @@ xfs_qm_dqflush_done( ...@@ -1323,8 +1319,10 @@ xfs_qm_dqflush_done(
xfs_dq_logitem_t *qip) xfs_dq_logitem_t *qip)
{ {
xfs_dquot_t *dqp; xfs_dquot_t *dqp;
struct xfs_ail *ailp;
dqp = qip->qli_dquot; dqp = qip->qli_dquot;
ailp = qip->qli_item.li_ailp;
/* /*
* We only want to pull the item from the AIL if its * We only want to pull the item from the AIL if its
...@@ -1337,15 +1335,12 @@ xfs_qm_dqflush_done( ...@@ -1337,15 +1335,12 @@ xfs_qm_dqflush_done(
if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
qip->qli_item.li_lsn == qip->qli_flush_lsn) { qip->qli_item.li_lsn == qip->qli_flush_lsn) {
spin_lock(&dqp->q_mount->m_ail_lock); /* xfs_trans_ail_delete() drops the AIL lock. */
/* spin_lock(&ailp->xa_lock);
* xfs_trans_delete_ail() drops the AIL lock.
*/
if (qip->qli_item.li_lsn == qip->qli_flush_lsn) if (qip->qli_item.li_lsn == qip->qli_flush_lsn)
xfs_trans_delete_ail(dqp->q_mount, xfs_trans_ail_delete(ailp, (xfs_log_item_t*)qip);
(xfs_log_item_t*)qip);
else else
spin_unlock(&dqp->q_mount->m_ail_lock); spin_unlock(&ailp->xa_lock);
} }
/* /*
...@@ -1375,7 +1370,7 @@ xfs_dqunlock( ...@@ -1375,7 +1370,7 @@ xfs_dqunlock(
mutex_unlock(&(dqp->q_qlock)); mutex_unlock(&(dqp->q_qlock));
if (dqp->q_logitem.qli_dquot == dqp) { if (dqp->q_logitem.qli_dquot == dqp) {
/* Once was dqp->q_mount, but might just have been cleared */ /* Once was dqp->q_mount, but might just have been cleared */
xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_mountp, xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp,
(xfs_log_item_t*)&(dqp->q_logitem)); (xfs_log_item_t*)&(dqp->q_logitem));
} }
} }
...@@ -1489,7 +1484,7 @@ xfs_qm_dqpurge( ...@@ -1489,7 +1484,7 @@ xfs_qm_dqpurge(
"xfs_qm_dqpurge: dquot %p flush failed", dqp); "xfs_qm_dqpurge: dquot %p flush failed", dqp);
xfs_dqflock(dqp); xfs_dqflock(dqp);
} }
ASSERT(dqp->q_pincount == 0); ASSERT(atomic_read(&dqp->q_pincount) == 0);
ASSERT(XFS_FORCED_SHUTDOWN(mp) || ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
!(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
......
...@@ -83,8 +83,8 @@ typedef struct xfs_dquot { ...@@ -83,8 +83,8 @@ typedef struct xfs_dquot {
xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
mutex_t q_qlock; /* quota lock */ mutex_t q_qlock; /* quota lock */
struct completion q_flush; /* flush completion queue */ struct completion q_flush; /* flush completion queue */
uint q_pincount; /* pin count for this dquot */ atomic_t q_pincount; /* dquot pin count */
sv_t q_pinwait; /* sync var for pinning */ wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
#ifdef XFS_DQUOT_TRACE #ifdef XFS_DQUOT_TRACE
struct ktrace *q_trace; /* trace header structure */ struct ktrace *q_trace; /* trace header structure */
#endif #endif
......
此差异已折叠。
此差异已折叠。
...@@ -106,7 +106,6 @@ typedef struct xfs_qm { ...@@ -106,7 +106,6 @@ typedef struct xfs_qm {
typedef struct xfs_quotainfo { typedef struct xfs_quotainfo {
xfs_inode_t *qi_uquotaip; /* user quota inode */ xfs_inode_t *qi_uquotaip; /* user quota inode */
xfs_inode_t *qi_gquotaip; /* group quota inode */ xfs_inode_t *qi_gquotaip; /* group quota inode */
spinlock_t qi_pinlock; /* dquot pinning lock */
xfs_dqlist_t qi_dqlist; /* all dquots in filesys */ xfs_dqlist_t qi_dqlist; /* all dquots in filesys */
int qi_dqreclaims; /* a change here indicates int qi_dqreclaims; /* a change here indicates
a removal in the dqlist */ a removal in the dqlist */
...@@ -168,7 +167,7 @@ extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); ...@@ -168,7 +167,7 @@ extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
extern void xfs_qm_mount_quotas(xfs_mount_t *); extern void xfs_qm_mount_quotas(xfs_mount_t *);
extern int xfs_qm_quotacheck(xfs_mount_t *); extern int xfs_qm_quotacheck(xfs_mount_t *);
extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *); extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *);
extern int xfs_qm_unmount_quotas(xfs_mount_t *); extern void xfs_qm_unmount_quotas(xfs_mount_t *);
extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
extern int xfs_qm_sync(xfs_mount_t *, int); extern int xfs_qm_sync(xfs_mount_t *, int);
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h" #include "xfs_inum.h"
#include "xfs_clnt.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
...@@ -51,7 +50,7 @@ ...@@ -51,7 +50,7 @@
STATIC void STATIC void
xfs_fill_statvfs_from_dquot( xfs_fill_statvfs_from_dquot(
bhv_statvfs_t *statp, struct kstatfs *statp,
xfs_disk_dquot_t *dp) xfs_disk_dquot_t *dp)
{ {
__uint64_t limit; __uint64_t limit;
...@@ -88,7 +87,7 @@ xfs_fill_statvfs_from_dquot( ...@@ -88,7 +87,7 @@ xfs_fill_statvfs_from_dquot(
STATIC void STATIC void
xfs_qm_statvfs( xfs_qm_statvfs(
xfs_inode_t *ip, xfs_inode_t *ip,
bhv_statvfs_t *statp) struct kstatfs *statp)
{ {
xfs_mount_t *mp = ip->i_mount; xfs_mount_t *mp = ip->i_mount;
xfs_dquot_t *dqp; xfs_dquot_t *dqp;
......
此差异已折叠。
此差异已折叠。
...@@ -27,8 +27,6 @@ ...@@ -27,8 +27,6 @@
#define CE_ALERT 1 /* alert */ #define CE_ALERT 1 /* alert */
#define CE_PANIC 0 /* panic */ #define CE_PANIC 0 /* panic */
extern void icmn_err(int, char *, va_list)
__attribute__ ((format (printf, 2, 0)));
extern void cmn_err(int, char *, ...) extern void cmn_err(int, char *, ...)
__attribute__ ((format (printf, 2, 3))); __attribute__ ((format (printf, 2, 3)));
extern void assfail(char *expr, char *f, int l); extern void assfail(char *expr, char *f, int l);
......
...@@ -113,21 +113,16 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) ...@@ -113,21 +113,16 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
void void
ktrace_free(ktrace_t *ktp) ktrace_free(ktrace_t *ktp)
{ {
int entries_size;
if (ktp == (ktrace_t *)NULL) if (ktp == (ktrace_t *)NULL)
return; return;
/* /*
* Special treatment for the Vnode trace buffer. * Special treatment for the Vnode trace buffer.
*/ */
if (ktp->kt_nentries == ktrace_zentries) { if (ktp->kt_nentries == ktrace_zentries)
kmem_zone_free(ktrace_ent_zone, ktp->kt_entries); kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
} else { else
entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t));
kmem_free(ktp->kt_entries); kmem_free(ktp->kt_entries);
}
kmem_zone_free(ktrace_hdr_zone, ktp); kmem_zone_free(ktrace_hdr_zone, ktp);
} }
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册