提交 5123bc35 编写于 作者: F Felix Blyakher

Merge branch 'master' of git://git.kernel.org/pub/scm/fs/xfs/xfs

/*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_SUPPORT_MUTEX_H__
#define __XFS_SUPPORT_MUTEX_H__
#include <linux/mutex.h>
typedef struct mutex mutex_t;
#endif /* __XFS_SUPPORT_MUTEX_H__ */
......@@ -1623,4 +1623,5 @@ const struct address_space_operations xfs_address_space_operations = {
.bmap = xfs_vm_bmap,
.direct_IO = xfs_vm_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
};
......@@ -541,9 +541,6 @@ xfs_vn_getattr(
stat->uid = ip->i_d.di_uid;
stat->gid = ip->i_d.di_gid;
stat->ino = ip->i_ino;
#if XFS_BIG_INUMS
stat->ino += mp->m_inoadd;
#endif
stat->atime = inode->i_atime;
stat->mtime.tv_sec = ip->i_d.di_mtime.t_sec;
stat->mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
......
......@@ -38,7 +38,6 @@
#include <kmem.h>
#include <mrlock.h>
#include <sv.h>
#include <mutex.h>
#include <time.h>
#include <support/ktrace.h>
......@@ -51,6 +50,7 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/file.h>
#include <linux/swap.h>
#include <linux/errno.h>
......
......@@ -78,7 +78,6 @@ mempool_t *xfs_ioend_pool;
#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
#define MNTOPT_INO64 "ino64" /* force inodes into 64-bit range */
#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
#define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
......@@ -290,16 +289,6 @@ xfs_parseargs(
mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
} else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
mp->m_flags |= XFS_MOUNT_NORECOVERY;
} else if (!strcmp(this_char, MNTOPT_INO64)) {
#if XFS_BIG_INUMS
mp->m_flags |= XFS_MOUNT_INO64;
mp->m_inoadd = XFS_INO64_OFFSET;
#else
cmn_err(CE_WARN,
"XFS: %s option not allowed on this system",
this_char);
return EINVAL;
#endif
} else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
mp->m_flags |= XFS_MOUNT_NOALIGN;
} else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
......@@ -528,7 +517,6 @@ xfs_showargs(
/* the few simple ones we can get from the mount struct */
{ XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
{ XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
{ XFS_MOUNT_INO64, "," MNTOPT_INO64 },
{ XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
{ XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
{ XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
......@@ -1199,18 +1187,12 @@ xfs_fs_statfs(
statp->f_bfree = statp->f_bavail =
sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
fakeinos = statp->f_bfree << sbp->sb_inopblog;
#if XFS_BIG_INUMS
fakeinos += mp->m_inoadd;
#endif
statp->f_files =
MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
if (mp->m_maxicount)
#if XFS_BIG_INUMS
if (!mp->m_inoadd)
#endif
statp->f_files = min_t(typeof(statp->f_files),
statp->f_files,
mp->m_maxicount);
statp->f_files = min_t(typeof(statp->f_files),
statp->f_files,
mp->m_maxicount);
statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
spin_unlock(&mp->m_sb_lock);
......
......@@ -34,7 +34,7 @@
*/
typedef struct xfs_dqhash {
struct xfs_dquot *qh_next;
mutex_t qh_lock;
struct mutex qh_lock;
uint qh_version; /* ever increasing version */
uint qh_nelems; /* number of dquots on the list */
} xfs_dqhash_t;
......@@ -81,7 +81,7 @@ typedef struct xfs_dquot {
xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */
xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
mutex_t q_qlock; /* quota lock */
struct mutex q_qlock; /* quota lock */
struct completion q_flush; /* flush completion queue */
atomic_t q_pincount; /* dquot pin count */
wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
......
......@@ -55,7 +55,7 @@
* quota functionality, including maintaining the freelist and hash
* tables of dquots.
*/
mutex_t xfs_Gqm_lock;
struct mutex xfs_Gqm_lock;
struct xfs_qm *xfs_Gqm;
uint ndquot;
......@@ -80,7 +80,7 @@ static struct shrinker xfs_qm_shaker = {
};
#ifdef DEBUG
extern mutex_t qcheck_lock;
extern struct mutex qcheck_lock;
#endif
#ifdef QUOTADEBUG
......
......@@ -27,7 +27,7 @@ struct xfs_qm;
struct xfs_inode;
extern uint ndquot;
extern mutex_t xfs_Gqm_lock;
extern struct mutex xfs_Gqm_lock;
extern struct xfs_qm *xfs_Gqm;
extern kmem_zone_t *qm_dqzone;
extern kmem_zone_t *qm_dqtrxzone;
......@@ -79,7 +79,7 @@ typedef xfs_dqhash_t xfs_dqlist_t;
typedef struct xfs_frlist {
struct xfs_dquot *qh_next;
struct xfs_dquot *qh_prev;
mutex_t qh_lock;
struct mutex qh_lock;
uint qh_version;
uint qh_nelems;
} xfs_frlist_t;
......@@ -115,7 +115,7 @@ typedef struct xfs_quotainfo {
xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */
xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */
xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */
mutex_t qi_quotaofflock;/* to serialize quotaoff */
struct mutex qi_quotaofflock;/* to serialize quotaoff */
xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */
uint qi_dqperchunk; /* # ondisk dqs in above chunk */
xfs_qcnt_t qi_bhardlimit; /* default data blk hard limit */
......
......@@ -960,7 +960,7 @@ xfs_dqhash_t *qmtest_udqtab;
xfs_dqhash_t *qmtest_gdqtab;
int qmtest_hashmask;
int qmtest_nfails;
mutex_t qcheck_lock;
struct mutex qcheck_lock;
#define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
(__psunsigned_t)(id)) & \
......
......@@ -17,10 +17,6 @@
*/
#include <xfs.h>
static DEFINE_MUTEX(uuid_monitor);
static int uuid_table_size;
static uuid_t *uuid_table;
/* IRIX interpretation of an uuid_t */
typedef struct {
__be32 uu_timelow;
......@@ -46,12 +42,6 @@ uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
fsid[1] = be32_to_cpu(uup->uu_timelow);
}
void
uuid_create_nil(uuid_t *uuid)
{
memset(uuid, 0, sizeof(*uuid));
}
int
uuid_is_nil(uuid_t *uuid)
{
......@@ -71,64 +61,3 @@ uuid_equal(uuid_t *uuid1, uuid_t *uuid2)
{
return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1;
}
/*
* Given a 128-bit uuid, return a 64-bit value by adding the top and bottom
* 64-bit words. NOTE: This function can not be changed EVER. Although
* brain-dead, some applications depend on this 64-bit value remaining
* persistent. Specifically, DMI vendors store the value as a persistent
* filehandle.
*/
__uint64_t
uuid_hash64(uuid_t *uuid)
{
__uint64_t *sp = (__uint64_t *)uuid;
return sp[0] + sp[1];
}
int
uuid_table_insert(uuid_t *uuid)
{
int i, hole;
mutex_lock(&uuid_monitor);
for (i = 0, hole = -1; i < uuid_table_size; i++) {
if (uuid_is_nil(&uuid_table[i])) {
hole = i;
continue;
}
if (uuid_equal(uuid, &uuid_table[i])) {
mutex_unlock(&uuid_monitor);
return 0;
}
}
if (hole < 0) {
uuid_table = kmem_realloc(uuid_table,
(uuid_table_size + 1) * sizeof(*uuid_table),
uuid_table_size * sizeof(*uuid_table),
KM_SLEEP);
hole = uuid_table_size++;
}
uuid_table[hole] = *uuid;
mutex_unlock(&uuid_monitor);
return 1;
}
void
uuid_table_remove(uuid_t *uuid)
{
int i;
mutex_lock(&uuid_monitor);
for (i = 0; i < uuid_table_size; i++) {
if (uuid_is_nil(&uuid_table[i]))
continue;
if (!uuid_equal(uuid, &uuid_table[i]))
continue;
uuid_create_nil(&uuid_table[i]);
break;
}
ASSERT(i < uuid_table_size);
mutex_unlock(&uuid_monitor);
}
......@@ -22,12 +22,8 @@ typedef struct {
unsigned char __u_bits[16];
} uuid_t;
extern void uuid_create_nil(uuid_t *uuid);
extern int uuid_is_nil(uuid_t *uuid);
extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2);
extern void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]);
extern __uint64_t uuid_hash64(uuid_t *uuid);
extern int uuid_table_insert(uuid_t *uuid);
extern void uuid_table_remove(uuid_t *uuid);
#endif /* __XFS_SUPPORT_UUID_H__ */
......@@ -155,7 +155,8 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
* minimum offset only needs to be the space required for
* the btree root.
*/
if (!dp->i_d.di_forkoff && dp->i_df.if_bytes > mp->m_attroffset)
if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
xfs_default_attroffset(dp))
dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
break;
......
......@@ -2479,7 +2479,7 @@ xfs_bmap_adjacent(
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
/*
* If allocating at eof, and there's a previous real block,
* try to use it's last block as our starting point.
* try to use its last block as our starting point.
*/
if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
!isnullstartblock(ap->prevp->br_startblock) &&
......@@ -3568,6 +3568,27 @@ xfs_bmap_extents_to_btree(
return 0;
}
/*
* Calculate the default attribute fork offset for newly created inodes.
*/
uint
xfs_default_attroffset(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
uint offset;
if (mp->m_sb.sb_inodesize == 256) {
offset = XFS_LITINO(mp) -
XFS_BMDR_SPACE_CALC(MINABTPTRS);
} else {
offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
}
ASSERT(offset < XFS_LITINO(mp));
return offset;
}
/*
* Helper routine to reset inode di_forkoff field when switching
* attribute fork from local to extent format - we reset it where
......@@ -3580,15 +3601,18 @@ xfs_bmap_forkoff_reset(
int whichfork)
{
if (whichfork == XFS_ATTR_FORK &&
(ip->i_d.di_format != XFS_DINODE_FMT_DEV) &&
(ip->i_d.di_format != XFS_DINODE_FMT_UUID) &&
(ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
((mp->m_attroffset >> 3) > ip->i_d.di_forkoff)) {
ip->i_d.di_forkoff = mp->m_attroffset >> 3;
ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) /
(uint)sizeof(xfs_bmbt_rec_t);
ip->i_afp->if_ext_max = XFS_IFORK_ASIZE(ip) /
(uint)sizeof(xfs_bmbt_rec_t);
ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
if (dfl_forkoff > ip->i_d.di_forkoff) {
ip->i_d.di_forkoff = dfl_forkoff;
ip->i_df.if_ext_max =
XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
ip->i_afp->if_ext_max =
XFS_IFORK_ASIZE(ip) / sizeof(xfs_bmbt_rec_t);
}
}
}
......@@ -4057,7 +4081,7 @@ xfs_bmap_add_attrfork(
case XFS_DINODE_FMT_BTREE:
ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
if (!ip->i_d.di_forkoff)
ip->i_d.di_forkoff = mp->m_attroffset >> 3;
ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
else if (mp->m_flags & XFS_MOUNT_ATTR2)
version = 2;
break;
......@@ -4204,12 +4228,12 @@ xfs_bmap_compute_maxlevels(
* (a signed 16-bit number, xfs_aextnum_t).
*
* Note that we can no longer assume that if we are in ATTR1 that
* the fork offset of all the inodes will be (m_attroffset >> 3)
* because we could have mounted with ATTR2 and then mounted back
* with ATTR1, keeping the di_forkoff's fixed but probably at
* various positions. Therefore, for both ATTR1 and ATTR2
* we have to assume the worst case scenario of a minimum size
* available.
* the fork offset of all the inodes will be
* (xfs_default_attroffset(ip) >> 3) because we could have mounted
* with ATTR2 and then mounted back with ATTR1, keeping the
* di_forkoff's fixed but probably at various positions. Therefore,
* for both ATTR1 and ATTR2 we have to assume the worst case scenario
* of a minimum size available.
*/
if (whichfork == XFS_DATA_FORK) {
maxleafents = MAXEXTNUM;
......@@ -4796,7 +4820,7 @@ xfs_bmapi(
xfs_extlen_t minlen; /* min allocation size */
xfs_mount_t *mp; /* xfs mount structure */
int n; /* current extent index */
int nallocs; /* number of extents alloc\'d */
int nallocs; /* number of extents alloc'd */
xfs_extnum_t nextents; /* number of extents in file */
xfs_fileoff_t obno; /* old block number (offset) */
xfs_bmbt_irec_t prev; /* previous file extent record */
......@@ -6486,7 +6510,7 @@ xfs_bmap_count_tree(
block = XFS_BUF_TO_BLOCK(bp);
if (--level) {
/* Not at node above leafs, count this level of nodes */
/* Not at node above leaves, count this level of nodes */
nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
while (nextbno != NULLFSBLOCK) {
if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
......
......@@ -125,7 +125,7 @@ typedef struct xfs_bmalloca {
struct xfs_bmbt_irec *gotp; /* extent after, or delayed */
xfs_extlen_t alen; /* i/o length asked/allocated */
xfs_extlen_t total; /* total blocks needed for xaction */
xfs_extlen_t minlen; /* mininum allocation size (blocks) */
xfs_extlen_t minlen; /* minimum allocation size (blocks) */
xfs_extlen_t minleft; /* amount must be left after alloc */
char eof; /* set if allocating past last extent */
char wasdel; /* replacing a delayed allocation */
......@@ -338,6 +338,10 @@ xfs_check_nostate_extents(
xfs_extnum_t idx,
xfs_extnum_t num);
uint
xfs_default_attroffset(
struct xfs_inode *ip);
#ifdef __KERNEL__
/*
......
......@@ -1883,7 +1883,7 @@ xfs_btree_lshift(
/*
* We add one entry to the left side and remove one for the right side.
* Accout for it here, the changes will be updated on disk and logged
* Account for it here, the changes will be updated on disk and logged
* later.
*/
lrecs++;
......@@ -3535,7 +3535,7 @@ xfs_btree_delrec(
XFS_BTREE_STATS_INC(cur, join);
/*
* Fix up the the number of records and right block pointer in the
* Fix up the number of records and right block pointer in the
* surviving block, and log it.
*/
xfs_btree_set_numrecs(left, lrecs + rrecs);
......
......@@ -41,7 +41,7 @@ extern kmem_zone_t *xfs_btree_cur_zone;
/*
* Generic btree header.
*
* This is a comination of the actual format used on disk for short and long
* This is a combination of the actual format used on disk for short and long
* format btrees. The first three fields are shared by both format, but
* the pointers are different and should be used with care.
*
......
......@@ -185,7 +185,7 @@ typedef struct xfs_da_state {
unsigned char inleaf; /* insert into 1->lf, 0->splf */
unsigned char extravalid; /* T/F: extrablk is in use */
unsigned char extraafter; /* T/F: extrablk is after new */
xfs_da_state_blk_t extrablk; /* for double-splits on leafs */
xfs_da_state_blk_t extrablk; /* for double-splits on leaves */
/* for dirv2 extrablk is data */
} xfs_da_state_t;
......
......@@ -103,7 +103,9 @@ typedef enum xfs_dinode_fmt {
/*
* Inode size for given fs.
*/
#define XFS_LITINO(mp) ((mp)->m_litino)
#define XFS_LITINO(mp) \
((int)(((mp)->m_sb.sb_inodesize) - sizeof(struct xfs_dinode)))
#define XFS_BROOT_SIZE_ADJ \
(XFS_BTREE_LBLOCK_LEN - sizeof(xfs_bmdr_block_t))
......
......@@ -448,7 +448,6 @@ xfs_dir2_block_getdents(
xfs_mount_t *mp; /* filesystem mount point */
char *ptr; /* current data entry */
int wantoff; /* starting block offset */
xfs_ino_t ino;
xfs_off_t cook;
mp = dp->i_mount;
......@@ -509,16 +508,12 @@ xfs_dir2_block_getdents(
cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
(char *)dep - (char *)block);
ino = be64_to_cpu(dep->inumber);
#if XFS_BIG_INUMS
ino += mp->m_inoadd;
#endif
/*
* If it didn't fit, set the final offset to here & return.
*/
if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff,
ino, DT_UNKNOWN)) {
be64_to_cpu(dep->inumber), DT_UNKNOWN)) {
*offset = cook & 0x7fffffff;
xfs_da_brelse(NULL, bp);
return 0;
......
......@@ -38,7 +38,7 @@ struct xfs_trans;
/*
* Directory address space divided into sections,
* spaces separated by 32gb.
* spaces separated by 32GB.
*/
#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG))
#define XFS_DIR2_DATA_SPACE 0
......
......@@ -780,7 +780,6 @@ xfs_dir2_leaf_getdents(
int ra_index; /* *map index for read-ahead */
int ra_offset; /* map entry offset for ra */
int ra_want; /* readahead count wanted */
xfs_ino_t ino;
/*
* If the offset is at or past the largest allowed value,
......@@ -1076,24 +1075,12 @@ xfs_dir2_leaf_getdents(
continue;
}
/*
* Copy the entry into the putargs, and try formatting it.
*/
dep = (xfs_dir2_data_entry_t *)ptr;
length = xfs_dir2_data_entsize(dep->namelen);
ino = be64_to_cpu(dep->inumber);
#if XFS_BIG_INUMS
ino += mp->m_inoadd;
#endif
/*
* Won't fit. Return to caller.
*/
if (filldir(dirent, dep->name, dep->namelen,
xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff,
ino, DT_UNKNOWN))
be64_to_cpu(dep->inumber), DT_UNKNOWN))
break;
/*
......
......@@ -1104,7 +1104,7 @@ xfs_dir2_leafn_remove(
}
xfs_dir2_leafn_check(dp, bp);
/*
* Return indication of whether this leaf block is emtpy enough
* Return indication of whether this leaf block is empty enough
* to justify trying to join it with a neighbor.
*/
*rval =
......
......@@ -748,11 +748,7 @@ xfs_dir2_sf_getdents(
* Put . entry unless we're starting past it.
*/
if (*offset <= dot_offset) {
ino = dp->i_ino;
#if XFS_BIG_INUMS
ino += mp->m_inoadd;
#endif
if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, ino, DT_DIR)) {
if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, dp->i_ino, DT_DIR)) {
*offset = dot_offset & 0x7fffffff;
return 0;
}
......@@ -763,9 +759,6 @@ xfs_dir2_sf_getdents(
*/
if (*offset <= dotdot_offset) {
ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
#if XFS_BIG_INUMS
ino += mp->m_inoadd;
#endif
if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) {
*offset = dotdot_offset & 0x7fffffff;
return 0;
......@@ -786,10 +779,6 @@ xfs_dir2_sf_getdents(
}
ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
#if XFS_BIG_INUMS
ino += mp->m_inoadd;
#endif
if (filldir(dirent, sfep->name, sfep->namelen,
off & 0x7fffffff, ino, DT_UNKNOWN)) {
*offset = off & 0x7fffffff;
......
......@@ -576,7 +576,7 @@ xfs_reserve_blocks(
if (fdblks_delta) {
/*
* If we are putting blocks back here, m_resblks_avail is
* already at it's max so this will put it in the free pool.
* already at its max so this will put it in the free pool.
*
* If we need space, we'll either succeed in getting it
* from the free block count or we'll get an enospc. If
......
......@@ -349,7 +349,7 @@ xfs_ialloc_ag_alloc(
* Initialize all inodes in this buffer and then log them.
*
* XXX: It would be much better if we had just one transaction to
* log a whole cluster of inodes instead of all the indivdual
* log a whole cluster of inodes instead of all the individual
* transactions causing a lot of log traffic.
*/
xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog);
......
......@@ -164,7 +164,7 @@ xfs_inobt_init_rec_from_cur(
}
/*
* intial value of ptr for lookup
* initial value of ptr for lookup
*/
STATIC void
xfs_inobt_init_ptr_from_cur(
......
......@@ -122,7 +122,7 @@ typedef struct xfs_ictimestamp {
/*
* NOTE: This structure must be kept identical to struct xfs_dinode
* in xfs_dinode.h except for the endianess annotations.
* in xfs_dinode.h except for the endianness annotations.
*/
typedef struct xfs_icdinode {
__uint16_t di_magic; /* inode magic # = XFS_DINODE_MAGIC */
......
......@@ -63,7 +63,7 @@ typedef enum {
*/
typedef struct xfs_iomap {
xfs_daddr_t iomap_bn; /* first 512b blk of mapping */
xfs_daddr_t iomap_bn; /* first 512B blk of mapping */
xfs_buftarg_t *iomap_target;
xfs_off_t iomap_offset; /* offset of mapping, bytes */
xfs_off_t iomap_bsize; /* size of mapping, bytes */
......
......@@ -584,7 +584,7 @@ xfs_bulkstat(
* first inode of the cluster.
*
* Careful with clustidx. There can be
* multple clusters per chunk, a single
* multiple clusters per chunk, a single
* cluster per chunk or a cluster that has
* inodes represented from several different
* chunks (if blocksize is large).
......
......@@ -1098,7 +1098,7 @@ xlog_bdstrat_cb(struct xfs_buf *bp)
/*
* Return size of each in-core log record buffer.
*
* All machines get 8 x 32KB buffers by default, unless tuned otherwise.
* All machines get 8 x 32kB buffers by default, unless tuned otherwise.
*
* If the filesystem blocksize is too large, we may need to choose a
* larger size since the directory code currently logs entire blocks.
......@@ -1128,8 +1128,8 @@ xlog_get_iclog_buffer_size(xfs_mount_t *mp,
}
if (xfs_sb_version_haslogv2(&mp->m_sb)) {
/* # headers = size / 32K
* one header holds cycles from 32K of data
/* # headers = size / 32k
* one header holds cycles from 32k of data
*/
xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE;
......@@ -1145,7 +1145,7 @@ xlog_get_iclog_buffer_size(xfs_mount_t *mp,
goto done;
}
/* All machines use 32KB buffers by default. */
/* All machines use 32kB buffers by default. */
log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
......@@ -3179,7 +3179,7 @@ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
*/
/*
* Free a used ticket when it's refcount falls to zero.
* Free a used ticket when its refcount falls to zero.
*/
void
xfs_log_ticket_put(
......
......@@ -45,7 +45,6 @@
#include "xfs_fsops.h"
#include "xfs_utils.h"
STATIC int xfs_uuid_mount(xfs_mount_t *);
STATIC void xfs_unmountfs_wait(xfs_mount_t *);
......@@ -121,6 +120,84 @@ static const struct {
{ sizeof(xfs_sb_t), 0 }
};
static DEFINE_MUTEX(xfs_uuid_table_mutex);
static int xfs_uuid_table_size;
static uuid_t *xfs_uuid_table;
/*
* See if the UUID is unique among mounted XFS filesystems.
* Mount fails if UUID is nil or a FS with the same UUID is already mounted.
*/
STATIC int
xfs_uuid_mount(
struct xfs_mount *mp)
{
uuid_t *uuid = &mp->m_sb.sb_uuid;
int hole, i;
if (mp->m_flags & XFS_MOUNT_NOUUID)
return 0;
if (uuid_is_nil(uuid)) {
cmn_err(CE_WARN,
"XFS: Filesystem %s has nil UUID - can't mount",
mp->m_fsname);
return XFS_ERROR(EINVAL);
}
mutex_lock(&xfs_uuid_table_mutex);
for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
if (uuid_is_nil(&xfs_uuid_table[i])) {
hole = i;
continue;
}
if (uuid_equal(uuid, &xfs_uuid_table[i]))
goto out_duplicate;
}
if (hole < 0) {
xfs_uuid_table = kmem_realloc(xfs_uuid_table,
(xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
xfs_uuid_table_size * sizeof(*xfs_uuid_table),
KM_SLEEP);
hole = xfs_uuid_table_size++;
}
xfs_uuid_table[hole] = *uuid;
mutex_unlock(&xfs_uuid_table_mutex);
return 0;
out_duplicate:
mutex_unlock(&xfs_uuid_table_mutex);
cmn_err(CE_WARN, "XFS: Filesystem %s has duplicate UUID - can't mount",
mp->m_fsname);
return XFS_ERROR(EINVAL);
}
STATIC void
xfs_uuid_unmount(
struct xfs_mount *mp)
{
uuid_t *uuid = &mp->m_sb.sb_uuid;
int i;
if (mp->m_flags & XFS_MOUNT_NOUUID)
return;
mutex_lock(&xfs_uuid_table_mutex);
for (i = 0; i < xfs_uuid_table_size; i++) {
if (uuid_is_nil(&xfs_uuid_table[i]))
continue;
if (!uuid_equal(uuid, &xfs_uuid_table[i]))
continue;
memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
break;
}
ASSERT(i < xfs_uuid_table_size);
mutex_unlock(&xfs_uuid_table_mutex);
}
/*
* Free up the resources associated with a mount structure. Assume that
* the structure was initially zeroed, so we can tell which fields got
......@@ -256,6 +333,22 @@ xfs_mount_validate_sb(
return XFS_ERROR(ENOSYS);
}
/*
* Currently only very few inode sizes are supported.
*/
switch (sbp->sb_inodesize) {
case 256:
case 512:
case 1024:
case 2048:
break;
default:
xfs_fs_mount_cmn_err(flags,
"inode size of %d bytes not supported",
sbp->sb_inodesize);
return XFS_ERROR(ENOSYS);
}
if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
xfs_fs_mount_cmn_err(flags,
......@@ -574,32 +667,10 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
mp->m_litino = sbp->sb_inodesize - sizeof(struct xfs_dinode);
mp->m_blockmask = sbp->sb_blocksize - 1;
mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
mp->m_blockwmask = mp->m_blockwsize - 1;
/*
* Setup for attributes, in case they get created.
* This value is for inodes getting attributes for the first time,
* the per-inode value is for old attribute values.
*/
ASSERT(sbp->sb_inodesize >= 256 && sbp->sb_inodesize <= 2048);
switch (sbp->sb_inodesize) {
case 256:
mp->m_attroffset = XFS_LITINO(mp) -
XFS_BMDR_SPACE_CALC(MINABTPTRS);
break;
case 512:
case 1024:
case 2048:
mp->m_attroffset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
break;
default:
ASSERT(0);
}
ASSERT(mp->m_attroffset < XFS_LITINO(mp));
mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
......@@ -645,7 +716,7 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
for (index = 0; index < agcount; index++) {
/*
* read the agf, then the agi. This gets us
* all the inforamtion we need and populates the
* all the information we need and populates the
* per-ag structures for us.
*/
error = xfs_alloc_pagf_init(mp, NULL, index, 0);
......@@ -968,18 +1039,9 @@ xfs_mountfs(
mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog);
/*
* XFS uses the uuid from the superblock as the unique
* identifier for fsid. We can not use the uuid from the volume
* since a single partition filesystem is identical to a single
* partition volume/filesystem.
*/
if (!(mp->m_flags & XFS_MOUNT_NOUUID)) {
if (xfs_uuid_mount(mp)) {
error = XFS_ERROR(EINVAL);
goto out;
}
}
error = xfs_uuid_mount(mp);
if (error)
goto out;
/*
* Set the minimum read and write sizes
......@@ -1198,8 +1260,7 @@ xfs_mountfs(
out_free_perag:
xfs_free_perag(mp);
out_remove_uuid:
if (!(mp->m_flags & XFS_MOUNT_NOUUID))
uuid_table_remove(&mp->m_sb.sb_uuid);
xfs_uuid_unmount(mp);
out:
return error;
}
......@@ -1226,7 +1287,7 @@ xfs_unmountfs(
/*
* We can potentially deadlock here if we have an inode cluster
* that has been freed has it's buffer still pinned in memory because
* that has been freed has its buffer still pinned in memory because
* the transaction is still sitting in a iclog. The stale inodes
* on that buffer will have their flush locks held until the
* transaction hits the disk and the callbacks run. the inode
......@@ -1258,7 +1319,7 @@ xfs_unmountfs(
* Unreserve any blocks we have so that when we unmount we don't account
* the reserved free space as used. This is really only necessary for
* lazy superblock counting because it trusts the incore superblock
* counters to be aboslutely correct on clean unmount.
* counters to be absolutely correct on clean unmount.
*
* We don't bother correcting this elsewhere for lazy superblock
* counting because on mount of an unclean filesystem we reconstruct the
......@@ -1282,9 +1343,7 @@ xfs_unmountfs(
xfs_unmountfs_wait(mp); /* wait for async bufs */
xfs_log_unmount_write(mp);
xfs_log_unmount(mp);
if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
uuid_table_remove(&mp->m_sb.sb_uuid);
xfs_uuid_unmount(mp);
#if defined(DEBUG)
xfs_errortag_clearall(mp, 0);
......@@ -1785,29 +1844,6 @@ xfs_freesb(
mp->m_sb_bp = NULL;
}
/*
* See if the UUID is unique among mounted XFS filesystems.
* Mount fails if UUID is nil or a FS with the same UUID is already mounted.
*/
STATIC int
xfs_uuid_mount(
xfs_mount_t *mp)
{
if (uuid_is_nil(&mp->m_sb.sb_uuid)) {
cmn_err(CE_WARN,
"XFS: Filesystem %s has nil UUID - can't mount",
mp->m_fsname);
return -1;
}
if (!uuid_table_insert(&mp->m_sb.sb_uuid)) {
cmn_err(CE_WARN,
"XFS: Filesystem %s has duplicate UUID - can't mount",
mp->m_fsname);
return -1;
}
return 0;
}
/*
* Used to log changes to the superblock unit and width fields which could
* be altered by the mount options, as well as any potential sb_features2
......@@ -1861,7 +1897,7 @@ xfs_mount_log_sb(
* we disable the per-cpu counter and go through the slow path.
*
* The slow path is the current xfs_mod_incore_sb() function. This means that
* when we disable a per-cpu counter, we need to drain it's resources back to
* when we disable a per-cpu counter, we need to drain its resources back to
* the global superblock. We do this after disabling the counter to prevent
* more threads from queueing up on the counter.
*
......
......@@ -276,12 +276,10 @@ typedef struct xfs_mount {
int m_fixedfsid[2]; /* unchanged for life of FS */
uint m_dmevmask; /* DMI events for this FS */
__uint64_t m_flags; /* global mount flags */
uint m_attroffset; /* inode attribute offset */
uint m_dir_node_ents; /* #entries in a dir danode */
uint m_attr_node_ents; /* #entries in attr danode */
int m_ialloc_inos; /* inodes in inode allocation */
int m_ialloc_blks; /* blocks in inode allocation */
int m_litino; /* size of inode union area */
int m_inoalign_mask;/* mask sb_inoalignmt if used */
uint m_qflags; /* quota status flags */
xfs_trans_reservations_t m_reservations;/* precomputed res values */
......@@ -289,9 +287,6 @@ typedef struct xfs_mount {
__uint64_t m_maxioffset; /* maximum inode offset */
__uint64_t m_resblks; /* total reserved blocks */
__uint64_t m_resblks_avail;/* available reserved blocks */
#if XFS_BIG_INUMS
xfs_ino_t m_inoadd; /* add value for ino64_offset */
#endif
int m_dalign; /* stripe unit */
int m_swidth; /* stripe width */
int m_sinoalign; /* stripe unit inode alignment */
......@@ -333,7 +328,6 @@ typedef struct xfs_mount {
#define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops
must be synchronous except
for space allocations */
#define XFS_MOUNT_INO64 (1ULL << 1)
#define XFS_MOUNT_DMAPI (1ULL << 2) /* dmapi is enabled */
#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
......@@ -385,8 +379,8 @@ typedef struct xfs_mount {
* Synchronous read and write sizes. This should be
* better for NFSv2 wsync filesystems.
*/
#define XFS_WSYNC_READIO_LOG 15 /* 32K */
#define XFS_WSYNC_WRITEIO_LOG 14 /* 16K */
#define XFS_WSYNC_READIO_LOG 15 /* 32k */
#define XFS_WSYNC_WRITEIO_LOG 14 /* 16k */
/*
* Allow large block sizes to be reported to userspace programs if the
......
......@@ -23,8 +23,8 @@ struct xfs_trans;
/* Min and max rt extent sizes, specified in bytes */
#define XFS_MAX_RTEXTSIZE (1024 * 1024 * 1024) /* 1GB */
#define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64KB */
#define XFS_MIN_RTEXTSIZE (4 * 1024) /* 4KB */
#define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64kB */
#define XFS_MIN_RTEXTSIZE (4 * 1024) /* 4kB */
/*
* Constants for bit manipulations.
......
......@@ -292,7 +292,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
* In a write transaction we can allocate a maximum of 2
* extents. This gives:
* the inode getting the new extents: inode size
* the inode\'s bmap btree: max depth * block size
* the inode's bmap btree: max depth * block size
* the agfs of the ags from which the extents are allocated: 2 * sector
* the superblock free block counter: sector size
* the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
......@@ -321,7 +321,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
/*
* In truncating a file we free up to two extents at once. We can modify:
* the inode being truncated: inode size
* the inode\'s bmap btree: (max depth + 1) * block size
* the inode's bmap btree: (max depth + 1) * block size
* And the bmap_finish transaction can free the blocks and bmap blocks:
* the agf for each of the ags: 4 * sector size
* the agfl for each of the ags: 4 * sector size
......@@ -431,8 +431,8 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
* the new inode: inode size
* the inode btree entry: 1 block
* the directory btree: (max depth + v2) * dir block size
* the directory inode\'s bmap btree: (max depth + v2) * block size
* the blocks for the symlink: 1 KB
* the directory inode's bmap btree: (max depth + v2) * block size
* the blocks for the symlink: 1 kB
* Or in the first xact we allocate some inodes giving:
* the agi and agf of the ag getting the new inodes: 2 * sectorsize
* the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
......@@ -463,7 +463,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
* the inode btree entry: block size
* the superblock for the nlink flag: sector size
* the directory btree: (max depth + v2) * dir block size
* the directory inode\'s bmap btree: (max depth + v2) * block size
* the directory inode's bmap btree: (max depth + v2) * block size
* Or in the first xact we allocate some inodes giving:
* the agi and agf of the ag getting the new inodes: 2 * sectorsize
* the superblock for the nlink flag: sector size
......@@ -637,7 +637,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
/*
* Removing the attribute fork of a file
* the inode being truncated: inode size
* the inode\'s bmap btree: max depth * block size
* the inode's bmap btree: max depth * block size
* And the bmap_finish transaction can free the blocks and bmap blocks:
* the agf for each of the ags: 4 * sector size
* the agfl for each of the ags: 4 * sector size
......
......@@ -79,7 +79,7 @@ xfs_trans_ail_tail(
* the push is run asynchronously in a separate thread, so we return the tail
* of the log right now instead of the tail after the push. This means we will
* either continue right away, or we will sleep waiting on the async thread to
* do it's work.
* do its work.
*
* We do this unlocked - we only need to know whether there is anything in the
* AIL at the time we are called. We don't need to access the contents of
......@@ -160,7 +160,7 @@ xfs_trans_ail_cursor_next(
/*
* Now that the traversal is complete, we need to remove the cursor
* from the list of traversing cursors. Avoid removing the embedded
* push cursor, but use the fact it is alway present to make the
* push cursor, but use the fact it is always present to make the
* list deletion simple.
*/
void
......
......@@ -22,7 +22,7 @@
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
/* XXX: from here down needed until struct xfs_trans has it's own ailp */
/* XXX: from here down needed until struct xfs_trans has its own ailp */
#include "xfs_bit.h"
#include "xfs_buf_item.h"
#include "xfs_sb.h"
......
......@@ -374,7 +374,7 @@ xfs_truncate_file(
/*
* Follow the normal truncate locking protocol. Since we
* hold the inode in the transaction, we know that it's number
* hold the inode in the transaction, we know that its number
* of references will stay constant.
*/
xfs_ilock(ip, XFS_ILOCK_EXCL);
......
......@@ -2862,7 +2862,7 @@ xfs_free_file_space(
/*
* Need to zero the stuff we're not freeing, on disk.
* If its a realtime file & can't use unwritten extents then we
* If it's a realtime file & can't use unwritten extents then we
* actually need to zero the extent edges. Otherwise xfs_bunmapi
* will take care of it for us.
*/
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册