提交 34a52c6c 编写于 作者: C Christoph Hellwig 提交者: Alex Elder

xfs: move I/O type flags into xfs_aops.c

The IOMAP_ flags are now only used inside xfs_aops.c for extent
probing and I/O completion tracking, so more them here, and rename
them to IO_* as there's no mapping involved at all.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NAlex Elder <aelder@sgi.com>
上级 207d0416
......@@ -45,6 +45,15 @@
#include <linux/pagevec.h>
#include <linux/writeback.h>
/*
* Types of I/O for bmap clustering and I/O completion tracking.
*/
enum {
IO_READ, /* mapping for a read */
IO_DELAY, /* mapping covers delalloc region */
IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
IO_NEW /* just allocated */
};
/*
* Prime number of hash buckets since address is used as the key.
......@@ -184,7 +193,7 @@ xfs_setfilesize(
xfs_fsize_t isize;
ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
ASSERT(ioend->io_type != IOMAP_READ);
ASSERT(ioend->io_type != IO_READ);
if (unlikely(ioend->io_error))
return 0;
......@@ -215,7 +224,7 @@ xfs_finish_ioend(
if (atomic_dec_and_test(&ioend->io_remaining)) {
struct workqueue_struct *wq;
wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
wq = (ioend->io_type == IO_UNWRITTEN) ?
xfsconvertd_workqueue : xfsdatad_workqueue;
queue_work(wq, &ioend->io_work);
if (wait)
......@@ -238,7 +247,7 @@ xfs_end_io(
* For unwritten extents we need to issue transactions to convert a
* range to normal written extens after the data I/O has finished.
*/
if (ioend->io_type == IOMAP_UNWRITTEN &&
if (ioend->io_type == IO_UNWRITTEN &&
likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
......@@ -251,7 +260,7 @@ xfs_end_io(
* We might have to update the on-disk file size after extending
* writes.
*/
if (ioend->io_type != IOMAP_READ) {
if (ioend->io_type != IO_READ) {
error = xfs_setfilesize(ioend);
ASSERT(!error || error == EAGAIN);
}
......@@ -724,11 +733,11 @@ xfs_is_delayed_page(
bh = head = page_buffers(page);
do {
if (buffer_unwritten(bh))
acceptable = (type == IOMAP_UNWRITTEN);
acceptable = (type == IO_UNWRITTEN);
else if (buffer_delay(bh))
acceptable = (type == IOMAP_DELAY);
acceptable = (type == IO_DELAY);
else if (buffer_dirty(bh) && buffer_mapped(bh))
acceptable = (type == IOMAP_NEW);
acceptable = (type == IO_NEW);
else
break;
} while ((bh = bh->b_this_page) != head);
......@@ -812,9 +821,9 @@ xfs_convert_page(
if (buffer_unwritten(bh) || buffer_delay(bh)) {
if (buffer_unwritten(bh))
type = IOMAP_UNWRITTEN;
type = IO_UNWRITTEN;
else
type = IOMAP_DELAY;
type = IO_DELAY;
if (!xfs_iomap_valid(inode, imap, offset)) {
done = 1;
......@@ -836,7 +845,7 @@ xfs_convert_page(
page_dirty--;
count++;
} else {
type = IOMAP_NEW;
type = IO_NEW;
if (buffer_mapped(bh) && all_bh && startio) {
lock_buffer(bh);
xfs_add_to_ioend(inode, bh, offset,
......@@ -940,7 +949,7 @@ xfs_aops_discard_page(
loff_t offset = page_offset(page);
ssize_t len = 1 << inode->i_blkbits;
if (!xfs_is_delayed_page(page, IOMAP_DELAY))
if (!xfs_is_delayed_page(page, IO_DELAY))
goto out_invalidate;
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
......@@ -1107,7 +1116,7 @@ xfs_page_state_convert(
bh = head = page_buffers(page);
offset = page_offset(page);
flags = BMAPI_READ;
type = IOMAP_NEW;
type = IO_NEW;
/* TODO: cleanup count and page_dirty */
......@@ -1150,13 +1159,13 @@ xfs_page_state_convert(
iomap_valid = 0;
if (buffer_unwritten(bh)) {
type = IOMAP_UNWRITTEN;
type = IO_UNWRITTEN;
flags = BMAPI_WRITE | BMAPI_IGNSTATE;
} else if (buffer_delay(bh)) {
type = IOMAP_DELAY;
type = IO_DELAY;
flags = BMAPI_ALLOCATE | trylock;
} else {
type = IOMAP_NEW;
type = IO_NEW;
flags = BMAPI_WRITE | BMAPI_MMAP;
}
......@@ -1170,7 +1179,7 @@ xfs_page_state_convert(
* for unwritten extent conversion.
*/
new_ioend = 1;
if (type == IOMAP_NEW) {
if (type == IO_NEW) {
size = xfs_probe_cluster(inode,
page, bh, head, 0);
} else {
......@@ -1215,14 +1224,14 @@ xfs_page_state_convert(
}
/*
* We set the type to IOMAP_NEW in case we are doing a
* We set the type to IO_NEW in case we are doing a
* small write at EOF that is extending the file but
* without needing an allocation. We need to update the
* file size on I/O completion in this case so it is
* the same case as having just allocated a new extent
* that we are writing into for the first time.
*/
type = IOMAP_NEW;
type = IO_NEW;
if (trylock_buffer(bh)) {
ASSERT(buffer_mapped(bh));
if (iomap_valid)
......@@ -1594,7 +1603,7 @@ xfs_end_io_direct(
*/
ioend->io_offset = offset;
ioend->io_size = size;
if (ioend->io_type == IOMAP_READ) {
if (ioend->io_type == IO_READ) {
xfs_finish_ioend(ioend, 0);
} else if (private && size > 0) {
xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
......@@ -1605,7 +1614,7 @@ xfs_end_io_direct(
* didn't map an unwritten extent so switch it's completion
* handler.
*/
ioend->io_type = IOMAP_NEW;
ioend->io_type = IO_NEW;
xfs_finish_ioend(ioend, 0);
}
......@@ -1633,7 +1642,7 @@ xfs_vm_direct_IO(
bdev = xfs_find_bdev_for_inode(inode);
iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
IOMAP_UNWRITTEN : IOMAP_READ);
IO_UNWRITTEN : IO_READ);
ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
offset, nr_segs,
......
......@@ -18,14 +18,6 @@
#ifndef __XFS_IOMAP_H__
#define __XFS_IOMAP_H__
typedef enum { /* iomap_flags values */
IOMAP_READ = 0, /* mapping for a read */
IOMAP_DELAY = 0x04, /* mapping covers delalloc region */
IOMAP_UNWRITTEN = 0x20, /* mapping covers allocated */
/* but uninitialized file data */
IOMAP_NEW = 0x40 /* just allocate */
} iomap_flags_t;
typedef enum {
/* base extent manipulation calls */
BMAPI_READ = (1 << 0), /* read extents */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册