提交 41f81e88 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6

* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6:
  [XFS] Fix xfs_ichgtime()s broken usage of I_SYNC
  [XFS] Make xfsbufd threads freezable
  [XFS] revert to double-buffering readdir
  [XFS] Fix broken inode cluster setup.
  [XFS] Clear XBF_READ_AHEAD flag on I/O completion.
  [XFS] Fixed a few bugs in xfs_buf_associate_memory()
  [XFS] 971064 Various fixups for xfs_bulkstat().
  [XFS] Fix dbflush panic in xfs_qm_sync.
...@@ -725,15 +725,15 @@ xfs_buf_associate_memory( ...@@ -725,15 +725,15 @@ xfs_buf_associate_memory(
{ {
int rval; int rval;
int i = 0; int i = 0;
size_t ptr; unsigned long pageaddr;
size_t end, end_cur; unsigned long offset;
off_t offset; size_t buflen;
int page_count; int page_count;
page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT; pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK); offset = (unsigned long)mem - pageaddr;
if (offset && (len > PAGE_CACHE_SIZE)) buflen = PAGE_CACHE_ALIGN(len + offset);
page_count++; page_count = buflen >> PAGE_CACHE_SHIFT;
/* Free any previous set of page pointers */ /* Free any previous set of page pointers */
if (bp->b_pages) if (bp->b_pages)
...@@ -747,22 +747,15 @@ xfs_buf_associate_memory( ...@@ -747,22 +747,15 @@ xfs_buf_associate_memory(
return rval; return rval;
bp->b_offset = offset; bp->b_offset = offset;
ptr = (size_t) mem & PAGE_CACHE_MASK;
end = PAGE_CACHE_ALIGN((size_t) mem + len); for (i = 0; i < bp->b_page_count; i++) {
end_cur = end; bp->b_pages[i] = mem_to_page((void *)pageaddr);
/* set up first page */ pageaddr += PAGE_CACHE_SIZE;
bp->b_pages[0] = mem_to_page(mem);
ptr += PAGE_CACHE_SIZE;
bp->b_page_count = ++i;
while (ptr < end) {
bp->b_pages[i] = mem_to_page((void *)ptr);
bp->b_page_count = ++i;
ptr += PAGE_CACHE_SIZE;
} }
bp->b_locked = 0; bp->b_locked = 0;
bp->b_count_desired = bp->b_buffer_length = len; bp->b_count_desired = len;
bp->b_buffer_length = buflen;
bp->b_flags |= XBF_MAPPED; bp->b_flags |= XBF_MAPPED;
return 0; return 0;
...@@ -1032,7 +1025,7 @@ xfs_buf_ioend( ...@@ -1032,7 +1025,7 @@ xfs_buf_ioend(
xfs_buf_t *bp, xfs_buf_t *bp,
int schedule) int schedule)
{ {
bp->b_flags &= ~(XBF_READ | XBF_WRITE); bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
if (bp->b_error == 0) if (bp->b_error == 0)
bp->b_flags |= XBF_DONE; bp->b_flags |= XBF_DONE;
...@@ -1750,6 +1743,8 @@ xfsbufd( ...@@ -1750,6 +1743,8 @@ xfsbufd(
current->flags |= PF_MEMALLOC; current->flags |= PF_MEMALLOC;
set_freezable();
do { do {
if (unlikely(freezing(current))) { if (unlikely(freezing(current))) {
set_bit(XBT_FORCE_SLEEP, &target->bt_flags); set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
......
...@@ -218,6 +218,15 @@ xfs_vm_fault( ...@@ -218,6 +218,15 @@ xfs_vm_fault(
} }
#endif /* CONFIG_XFS_DMAPI */ #endif /* CONFIG_XFS_DMAPI */
/*
* Unfortunately we can't just use the clean and simple readdir implementation
* below, because nfs might call back into ->lookup from the filldir callback
* and that will deadlock the low-level btree code.
*
* Hopefully we'll find a better workaround that allows to use the optimal
* version at least for local readdirs for 2.6.25.
*/
#if 0
STATIC int STATIC int
xfs_file_readdir( xfs_file_readdir(
struct file *filp, struct file *filp,
...@@ -249,6 +258,121 @@ xfs_file_readdir( ...@@ -249,6 +258,121 @@ xfs_file_readdir(
return -error; return -error;
return 0; return 0;
} }
#else
struct hack_dirent {
int namlen;
loff_t offset;
u64 ino;
unsigned int d_type;
char name[];
};
struct hack_callback {
char *dirent;
size_t len;
size_t used;
};
STATIC int
xfs_hack_filldir(
void *__buf,
const char *name,
int namlen,
loff_t offset,
u64 ino,
unsigned int d_type)
{
struct hack_callback *buf = __buf;
struct hack_dirent *de = (struct hack_dirent *)(buf->dirent + buf->used);
if (buf->used + sizeof(struct hack_dirent) + namlen > buf->len)
return -EINVAL;
de->namlen = namlen;
de->offset = offset;
de->ino = ino;
de->d_type = d_type;
memcpy(de->name, name, namlen);
buf->used += sizeof(struct hack_dirent) + namlen;
return 0;
}
STATIC int
xfs_file_readdir(
struct file *filp,
void *dirent,
filldir_t filldir)
{
struct inode *inode = filp->f_path.dentry->d_inode;
xfs_inode_t *ip = XFS_I(inode);
struct hack_callback buf;
struct hack_dirent *de;
int error;
loff_t size;
int eof = 0;
xfs_off_t start_offset, curr_offset, offset;
/*
* Try fairly hard to get memory
*/
buf.len = PAGE_CACHE_SIZE;
do {
buf.dirent = kmalloc(buf.len, GFP_KERNEL);
if (buf.dirent)
break;
buf.len >>= 1;
} while (buf.len >= 1024);
if (!buf.dirent)
return -ENOMEM;
curr_offset = filp->f_pos;
if (curr_offset == 0x7fffffff)
offset = 0xffffffff;
else
offset = filp->f_pos;
while (!eof) {
int reclen;
start_offset = offset;
buf.used = 0;
error = -xfs_readdir(ip, &buf, buf.len, &offset,
xfs_hack_filldir);
if (error || offset == start_offset) {
size = 0;
break;
}
size = buf.used;
de = (struct hack_dirent *)buf.dirent;
while (size > 0) {
if (filldir(dirent, de->name, de->namlen,
curr_offset & 0x7fffffff,
de->ino, de->d_type)) {
goto done;
}
reclen = sizeof(struct hack_dirent) + de->namlen;
size -= reclen;
curr_offset = de->offset /* & 0x7fffffff */;
de = (struct hack_dirent *)((char *)de + reclen);
}
}
done:
if (!error) {
if (size == 0)
filp->f_pos = offset & 0x7fffffff;
else if (de)
filp->f_pos = curr_offset;
}
kfree(buf.dirent);
return error;
}
#endif
STATIC int STATIC int
xfs_file_mmap( xfs_file_mmap(
......
...@@ -1047,24 +1047,20 @@ xfs_ioc_bulkstat( ...@@ -1047,24 +1047,20 @@ xfs_ioc_bulkstat(
if ((count = bulkreq.icount) <= 0) if ((count = bulkreq.icount) <= 0)
return -XFS_ERROR(EINVAL); return -XFS_ERROR(EINVAL);
if (bulkreq.ubuffer == NULL)
return -XFS_ERROR(EINVAL);
if (cmd == XFS_IOC_FSINUMBERS) if (cmd == XFS_IOC_FSINUMBERS)
error = xfs_inumbers(mp, &inlast, &count, error = xfs_inumbers(mp, &inlast, &count,
bulkreq.ubuffer, xfs_inumbers_fmt); bulkreq.ubuffer, xfs_inumbers_fmt);
else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
error = xfs_bulkstat_single(mp, &inlast, error = xfs_bulkstat_single(mp, &inlast,
bulkreq.ubuffer, &done); bulkreq.ubuffer, &done);
else { /* XFS_IOC_FSBULKSTAT */ else /* XFS_IOC_FSBULKSTAT */
if (count == 1 && inlast != 0) { error = xfs_bulkstat(mp, &inlast, &count,
inlast++; (bulkstat_one_pf)xfs_bulkstat_one, NULL,
error = xfs_bulkstat_single(mp, &inlast, sizeof(xfs_bstat_t), bulkreq.ubuffer,
bulkreq.ubuffer, &done); BULKSTAT_FG_QUICK, &done);
} else {
error = xfs_bulkstat(mp, &inlast, &count,
(bulkstat_one_pf)xfs_bulkstat_one, NULL,
sizeof(xfs_bstat_t), bulkreq.ubuffer,
BULKSTAT_FG_QUICK, &done);
}
}
if (error) if (error)
return -error; return -error;
......
...@@ -291,6 +291,9 @@ xfs_ioc_bulkstat_compat( ...@@ -291,6 +291,9 @@ xfs_ioc_bulkstat_compat(
if ((count = bulkreq.icount) <= 0) if ((count = bulkreq.icount) <= 0)
return -XFS_ERROR(EINVAL); return -XFS_ERROR(EINVAL);
if (bulkreq.ubuffer == NULL)
return -XFS_ERROR(EINVAL);
if (cmd == XFS_IOC_FSINUMBERS) if (cmd == XFS_IOC_FSINUMBERS)
error = xfs_inumbers(mp, &inlast, &count, error = xfs_inumbers(mp, &inlast, &count,
bulkreq.ubuffer, xfs_inumbers_fmt_compat); bulkreq.ubuffer, xfs_inumbers_fmt_compat);
......
...@@ -117,7 +117,7 @@ xfs_ichgtime( ...@@ -117,7 +117,7 @@ xfs_ichgtime(
*/ */
SYNCHRONIZE(); SYNCHRONIZE();
ip->i_update_core = 1; ip->i_update_core = 1;
if (!(inode->i_state & I_SYNC)) if (!(inode->i_state & I_NEW))
mark_inode_dirty_sync(inode); mark_inode_dirty_sync(inode);
} }
...@@ -169,7 +169,7 @@ xfs_ichgtime_fast( ...@@ -169,7 +169,7 @@ xfs_ichgtime_fast(
*/ */
SYNCHRONIZE(); SYNCHRONIZE();
ip->i_update_core = 1; ip->i_update_core = 1;
if (!(inode->i_state & I_SYNC)) if (!(inode->i_state & I_NEW))
mark_inode_dirty_sync(inode); mark_inode_dirty_sync(inode);
} }
......
...@@ -1008,6 +1008,9 @@ xfs_qm_sync( ...@@ -1008,6 +1008,9 @@ xfs_qm_sync(
boolean_t nowait; boolean_t nowait;
int error; int error;
if (! XFS_IS_QUOTA_ON(mp))
return 0;
restarts = 0; restarts = 0;
/* /*
* We won't block unless we are asked to. * We won't block unless we are asked to.
......
...@@ -267,7 +267,7 @@ xfs_iget_core( ...@@ -267,7 +267,7 @@ xfs_iget_core(
icl = NULL; icl = NULL;
if (radix_tree_gang_lookup(&pag->pag_ici_root, (void**)&iq, if (radix_tree_gang_lookup(&pag->pag_ici_root, (void**)&iq,
first_index, 1)) { first_index, 1)) {
if ((iq->i_ino & mask) == first_index) if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) == first_index)
icl = iq->i_cluster; icl = iq->i_cluster;
} }
......
...@@ -316,6 +316,8 @@ xfs_bulkstat_use_dinode( ...@@ -316,6 +316,8 @@ xfs_bulkstat_use_dinode(
return 1; return 1;
} }
#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
/* /*
* Return stat information in bulk (by-inode) for the filesystem. * Return stat information in bulk (by-inode) for the filesystem.
*/ */
...@@ -353,7 +355,7 @@ xfs_bulkstat( ...@@ -353,7 +355,7 @@ xfs_bulkstat(
xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
xfs_ino_t lastino=0; /* last inode number returned */ xfs_ino_t lastino; /* last inode number returned */
int nbcluster; /* # of blocks in a cluster */ int nbcluster; /* # of blocks in a cluster */
int nicluster; /* # of inodes in a cluster */ int nicluster; /* # of inodes in a cluster */
int nimask; /* mask for inode clusters */ int nimask; /* mask for inode clusters */
...@@ -373,6 +375,7 @@ xfs_bulkstat( ...@@ -373,6 +375,7 @@ xfs_bulkstat(
* Get the last inode value, see if there's nothing to do. * Get the last inode value, see if there's nothing to do.
*/ */
ino = (xfs_ino_t)*lastinop; ino = (xfs_ino_t)*lastinop;
lastino = ino;
dip = NULL; dip = NULL;
agno = XFS_INO_TO_AGNO(mp, ino); agno = XFS_INO_TO_AGNO(mp, ino);
agino = XFS_INO_TO_AGINO(mp, ino); agino = XFS_INO_TO_AGINO(mp, ino);
...@@ -382,6 +385,9 @@ xfs_bulkstat( ...@@ -382,6 +385,9 @@ xfs_bulkstat(
*ubcountp = 0; *ubcountp = 0;
return 0; return 0;
} }
if (!ubcountp || *ubcountp <= 0) {
return EINVAL;
}
ubcount = *ubcountp; /* statstruct's */ ubcount = *ubcountp; /* statstruct's */
ubleft = ubcount * statstruct_size; /* bytes */ ubleft = ubcount * statstruct_size; /* bytes */
*ubcountp = ubelem = 0; *ubcountp = ubelem = 0;
...@@ -402,7 +408,8 @@ xfs_bulkstat( ...@@ -402,7 +408,8 @@ xfs_bulkstat(
* inode returned; 0 means start of the allocation group. * inode returned; 0 means start of the allocation group.
*/ */
rval = 0; rval = 0;
while (ubleft >= statstruct_size && agno < mp->m_sb.sb_agcount) { while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
cond_resched();
bp = NULL; bp = NULL;
down_read(&mp->m_peraglock); down_read(&mp->m_peraglock);
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
...@@ -499,6 +506,7 @@ xfs_bulkstat( ...@@ -499,6 +506,7 @@ xfs_bulkstat(
break; break;
error = xfs_inobt_lookup_ge(cur, agino, 0, 0, error = xfs_inobt_lookup_ge(cur, agino, 0, 0,
&tmp); &tmp);
cond_resched();
} }
/* /*
* If ran off the end of the ag either with an error, * If ran off the end of the ag either with an error,
...@@ -542,6 +550,7 @@ xfs_bulkstat( ...@@ -542,6 +550,7 @@ xfs_bulkstat(
*/ */
agino = gino + XFS_INODES_PER_CHUNK; agino = gino + XFS_INODES_PER_CHUNK;
error = xfs_inobt_increment(cur, 0, &tmp); error = xfs_inobt_increment(cur, 0, &tmp);
cond_resched();
} }
/* /*
* Drop the btree buffers and the agi buffer. * Drop the btree buffers and the agi buffer.
...@@ -555,12 +564,12 @@ xfs_bulkstat( ...@@ -555,12 +564,12 @@ xfs_bulkstat(
*/ */
irbufend = irbp; irbufend = irbp;
for (irbp = irbuf; for (irbp = irbuf;
irbp < irbufend && ubleft >= statstruct_size; irbp++) { irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
/* /*
* Now process this chunk of inodes. * Now process this chunk of inodes.
*/ */
for (agino = irbp->ir_startino, chunkidx = clustidx = 0; for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
ubleft > 0 && XFS_BULKSTAT_UBLEFT(ubleft) &&
irbp->ir_freecount < XFS_INODES_PER_CHUNK; irbp->ir_freecount < XFS_INODES_PER_CHUNK;
chunkidx++, clustidx++, agino++) { chunkidx++, clustidx++, agino++) {
ASSERT(chunkidx < XFS_INODES_PER_CHUNK); ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
...@@ -663,15 +672,13 @@ xfs_bulkstat( ...@@ -663,15 +672,13 @@ xfs_bulkstat(
ubleft, private_data, ubleft, private_data,
bno, &ubused, dip, &fmterror); bno, &ubused, dip, &fmterror);
if (fmterror == BULKSTAT_RV_NOTHING) { if (fmterror == BULKSTAT_RV_NOTHING) {
if (error == EFAULT) { if (error && error != ENOENT &&
ubleft = 0; error != EINVAL) {
rval = error;
break;
}
else if (error == ENOMEM)
ubleft = 0; ubleft = 0;
else rval = error;
lastino = ino; break;
}
lastino = ino;
continue; continue;
} }
if (fmterror == BULKSTAT_RV_GIVEUP) { if (fmterror == BULKSTAT_RV_GIVEUP) {
...@@ -686,6 +693,8 @@ xfs_bulkstat( ...@@ -686,6 +693,8 @@ xfs_bulkstat(
ubelem++; ubelem++;
lastino = ino; lastino = ino;
} }
cond_resched();
} }
if (bp) if (bp)
...@@ -694,11 +703,12 @@ xfs_bulkstat( ...@@ -694,11 +703,12 @@ xfs_bulkstat(
/* /*
* Set up for the next loop iteration. * Set up for the next loop iteration.
*/ */
if (ubleft > 0) { if (XFS_BULKSTAT_UBLEFT(ubleft)) {
if (end_of_ag) { if (end_of_ag) {
agno++; agno++;
agino = 0; agino = 0;
} } else
agino = XFS_INO_TO_AGINO(mp, lastino);
} else } else
break; break;
} }
...@@ -707,6 +717,11 @@ xfs_bulkstat( ...@@ -707,6 +717,11 @@ xfs_bulkstat(
*/ */
kmem_free(irbuf, irbsize); kmem_free(irbuf, irbsize);
*ubcountp = ubelem; *ubcountp = ubelem;
/*
* Found some inodes, return them now and return the error next time.
*/
if (ubelem)
rval = 0;
if (agno >= mp->m_sb.sb_agcount) { if (agno >= mp->m_sb.sb_agcount) {
/* /*
* If we ran out of filesystem, mark lastino as off * If we ran out of filesystem, mark lastino as off
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册