• J
    Btrfs: turbo charge fsync · 5dc562c5
    Josef Bacik 提交于
    At least for the vm workload.  Currently on fsync we will
    
    1) Truncate all items in the log tree for the given inode if they exist
    
    and
    
    2) Copy all items for a given inode into the log
    
    The problem with this is that for things like VMs you can have lots of
    extents from the fragmented writing behavior, and worst yet you may have
    only modified a few extents, not the entire thing.  This patch fixes this
    problem by tracking which transid modified our extent, and then when we do
    the tree logging we find all of the extents we've modified in our current
    transaction, sort them and commit them.  We also only truncate up to the
    xattrs of the inode and copy that stuff in normally, and then just drop any
    extents in the range we have that exist in the log already.  Here are some
    numbers of a 50 meg fio job that does random writes and fsync()s after every
    write
    
    		Original	Patched
    SATA drive	82KB/s		140KB/s
    Fusion drive	431KB/s		2532KB/s
    
    So around 2-6 times faster depending on your hardware.  There are a few
    corner cases, for example if you truncate at all we have to do it the old
    way since there is no way to be sure what is in the log is ok.  This
    probably could be done smarter, but if you write-fsync-truncate-write-fsync
    you deserve what you get.  All this work is in RAM of course so if your
    inode gets evicted from cache and you read it in and fsync it we'll do it
    the slow way if we are still in the same transaction that we last modified
    the inode in.
    
    The biggest cool part of this is that it requires no changes to the recovery
    code, so if you fsync with this patch and crash and load an old kernel, it
    will run the recovery and be a-ok.  I have tested this pretty thoroughly
    with an fsync tester and everything comes back fine, as well as xfstests.
    Thanks,
    Signed-off-by: NJosef Bacik <jbacik@fusionio.com>
    5dc562c5
extent_map.h 1.8 KB
#ifndef __EXTENTMAP__
#define __EXTENTMAP__

#include <linux/rbtree.h>

#define EXTENT_MAP_LAST_BYTE (u64)-4
#define EXTENT_MAP_HOLE (u64)-3
#define EXTENT_MAP_INLINE (u64)-2
#define EXTENT_MAP_DELALLOC (u64)-1

/* bits for the flags field */
#define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */
#define EXTENT_FLAG_COMPRESSED 1
#define EXTENT_FLAG_VACANCY 2 /* no file extent item found */
#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */

struct extent_map {
	struct rb_node rb_node;

	/* all of these are in bytes */
	u64 start;
	u64 len;
	u64 orig_start;
	u64 block_start;
	u64 block_len;
	u64 generation;
	unsigned long flags;
	struct block_device *bdev;
	atomic_t refs;
	unsigned int in_tree;
	unsigned int compress_type;
	struct list_head list;
};

struct extent_map_tree {
	struct rb_root map;
	struct list_head modified_extents;
	rwlock_t lock;
};

static inline u64 extent_map_end(struct extent_map *em)
{
	if (em->start + em->len < em->start)
		return (u64)-1;
	return em->start + em->len;
}

static inline u64 extent_map_block_end(struct extent_map *em)
{
	if (em->block_start + em->block_len < em->block_start)
		return (u64)-1;
	return em->block_start + em->block_len;
}

void extent_map_tree_init(struct extent_map_tree *tree);
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
					 u64 start, u64 len);
int add_extent_mapping(struct extent_map_tree *tree,
		       struct extent_map *em);
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);

struct extent_map *alloc_extent_map(void);
void free_extent_map(struct extent_map *em);
int __init extent_map_init(void);
void extent_map_exit(void);
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen);
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
					 u64 start, u64 len);
#endif
反馈
建议
客服 返回
顶部