提交 801cc4e1 编写于 作者: B Brian Foster 提交者: Dave Chinner

xfs: debug mode forced buffered write failure

Add a DEBUG mode-only sysfs knob to enable forced buffered write
failure. An additional side effect of this mode is brute force killing
of delayed allocation blocks in the range of the write. The latter is
the prime motiviation behind this patch, as userspace test
infrastructure requires a reliable mechanism to create and split
delalloc extents without causing extent conversion.

Certain fallocate operations (i.e., zero range) were used for this in
the past, but the implementations have changed such that delalloc
extents are flushed and converted to real blocks, rendering the test
useless.
Signed-off-by: NBrian Foster <bfoster@redhat.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NDave Chinner <david@fromorbit.com>
上级 36f90b0a
...@@ -1752,6 +1752,7 @@ xfs_vm_write_failed( ...@@ -1752,6 +1752,7 @@ xfs_vm_write_failed(
loff_t from = pos & (PAGE_CACHE_SIZE - 1); loff_t from = pos & (PAGE_CACHE_SIZE - 1);
loff_t to = from + len; loff_t to = from + len;
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
struct xfs_mount *mp = XFS_I(inode)->i_mount;
/* /*
* The request pos offset might be 32 or 64 bit, this is all fine * The request pos offset might be 32 or 64 bit, this is all fine
...@@ -1786,7 +1787,8 @@ xfs_vm_write_failed( ...@@ -1786,7 +1787,8 @@ xfs_vm_write_failed(
if (!buffer_delay(bh)) if (!buffer_delay(bh))
continue; continue;
if (!buffer_new(bh) && block_offset < i_size_read(inode)) if (!xfs_mp_fail_writes(mp) && !buffer_new(bh) &&
block_offset < i_size_read(inode))
continue; continue;
xfs_vm_kill_delalloc_range(inode, block_offset, xfs_vm_kill_delalloc_range(inode, block_offset,
...@@ -1824,6 +1826,7 @@ xfs_vm_write_begin( ...@@ -1824,6 +1826,7 @@ xfs_vm_write_begin(
pgoff_t index = pos >> PAGE_CACHE_SHIFT; pgoff_t index = pos >> PAGE_CACHE_SHIFT;
struct page *page; struct page *page;
int status; int status;
struct xfs_mount *mp = XFS_I(mapping->host)->i_mount;
ASSERT(len <= PAGE_CACHE_SIZE); ASSERT(len <= PAGE_CACHE_SIZE);
...@@ -1832,6 +1835,8 @@ xfs_vm_write_begin( ...@@ -1832,6 +1835,8 @@ xfs_vm_write_begin(
return -ENOMEM; return -ENOMEM;
status = __block_write_begin(page, pos, len, xfs_get_blocks); status = __block_write_begin(page, pos, len, xfs_get_blocks);
if (xfs_mp_fail_writes(mp))
status = -EIO;
if (unlikely(status)) { if (unlikely(status)) {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
size_t isize = i_size_read(inode); size_t isize = i_size_read(inode);
...@@ -1844,6 +1849,8 @@ xfs_vm_write_begin( ...@@ -1844,6 +1849,8 @@ xfs_vm_write_begin(
* allocated in this write, not blocks that were previously * allocated in this write, not blocks that were previously
* written successfully. * written successfully.
*/ */
if (xfs_mp_fail_writes(mp))
isize = 0;
if (pos + len > isize) { if (pos + len > isize) {
ssize_t start = max_t(ssize_t, pos, isize); ssize_t start = max_t(ssize_t, pos, isize);
......
...@@ -147,6 +147,17 @@ typedef struct xfs_mount { ...@@ -147,6 +147,17 @@ typedef struct xfs_mount {
* to various other kinds of pain inflicted on the pNFS server. * to various other kinds of pain inflicted on the pNFS server.
*/ */
__uint32_t m_generation; __uint32_t m_generation;
#ifdef DEBUG
/*
* DEBUG mode instrumentation to test and/or trigger delayed allocation
* block killing in the event of failed writes. When enabled, all
* buffered writes are forced to fail. All delalloc blocks in the range
* of the write (including pre-existing delalloc blocks!) are tossed as
* part of the write failure error handling sequence.
*/
bool m_fail_writes;
#endif
} xfs_mount_t; } xfs_mount_t;
/* /*
...@@ -264,6 +275,20 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) ...@@ -264,6 +275,20 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks); return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
} }
#ifdef DEBUG
static inline bool
xfs_mp_fail_writes(struct xfs_mount *mp)
{
return mp->m_fail_writes;
}
#else
static inline bool
xfs_mp_fail_writes(struct xfs_mount *mp)
{
return 0;
}
#endif
/* /*
* Per-ag incore structure, copies of information in agf and agi, to improve the * Per-ag incore structure, copies of information in agf and agi, to improve the
* performance of allocation group selection. * performance of allocation group selection.
......
...@@ -18,10 +18,13 @@ ...@@ -18,10 +18,13 @@
#include "xfs.h" #include "xfs.h"
#include "xfs_sysfs.h" #include "xfs_sysfs.h"
#include "xfs_format.h"
#include "xfs_log_format.h" #include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_log_priv.h" #include "xfs_log_priv.h"
#include "xfs_stats.h" #include "xfs_stats.h"
#include "xfs_mount.h"
struct xfs_sysfs_attr { struct xfs_sysfs_attr {
struct attribute attr; struct attribute attr;
...@@ -45,16 +48,6 @@ to_attr(struct attribute *attr) ...@@ -45,16 +48,6 @@ to_attr(struct attribute *attr)
#define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
/*
* xfs_mount kobject. This currently has no attributes and thus no need for show
* and store helpers. The mp kobject serves as the per-mount parent object that
* is identified by the fsname under sysfs.
*/
struct kobj_type xfs_mp_ktype = {
.release = xfs_sysfs_release,
};
STATIC ssize_t STATIC ssize_t
xfs_sysfs_object_show( xfs_sysfs_object_show(
struct kobject *kobject, struct kobject *kobject,
...@@ -83,6 +76,71 @@ static const struct sysfs_ops xfs_sysfs_ops = { ...@@ -83,6 +76,71 @@ static const struct sysfs_ops xfs_sysfs_ops = {
.store = xfs_sysfs_object_store, .store = xfs_sysfs_object_store,
}; };
/*
* xfs_mount kobject. The mp kobject also serves as the per-mount parent object
* that is identified by the fsname under sysfs.
*/
static inline struct xfs_mount *
to_mp(struct kobject *kobject)
{
struct xfs_kobj *kobj = to_kobj(kobject);
return container_of(kobj, struct xfs_mount, m_kobj);
}
#ifdef DEBUG
STATIC ssize_t
fail_writes_store(
struct kobject *kobject,
const char *buf,
size_t count)
{
struct xfs_mount *mp = to_mp(kobject);
int ret;
int val;
ret = kstrtoint(buf, 0, &val);
if (ret)
return ret;
if (val == 1)
mp->m_fail_writes = true;
else if (val == 0)
mp->m_fail_writes = false;
else
return -EINVAL;
return count;
}
STATIC ssize_t
fail_writes_show(
struct kobject *kobject,
char *buf)
{
struct xfs_mount *mp = to_mp(kobject);
return snprintf(buf, PAGE_SIZE, "%d\n", mp->m_fail_writes ? 1 : 0);
}
XFS_SYSFS_ATTR_RW(fail_writes);
#endif /* DEBUG */
static struct attribute *xfs_mp_attrs[] = {
#ifdef DEBUG
ATTR_LIST(fail_writes),
#endif
NULL,
};
struct kobj_type xfs_mp_ktype = {
.release = xfs_sysfs_release,
.sysfs_ops = &xfs_sysfs_ops,
.default_attrs = xfs_mp_attrs,
};
#ifdef DEBUG #ifdef DEBUG
/* debug */ /* debug */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册