extent_map.c 6.9 KB
Newer Older
1
#include <linux/err.h>
2
#include <linux/gfp.h>
3
#include <linux/slab.h>
4 5
#include <linux/module.h>
#include <linux/spinlock.h>
6
#include <linux/version.h>
7
#include <linux/hardirq.h>
8 9
#include "extent_map.h"

10 11 12 13 14 15
/* temporary define until extent_map moves out of btrfs */
struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
				       unsigned long extra_flags,
				       void (*ctor)(void *, struct kmem_cache *,
						    unsigned long));

16
static struct kmem_cache *extent_map_cache;
17

18
int __init extent_map_init(void)
19
{
20
	extent_map_cache = btrfs_cache_create("extent_map",
21
					    sizeof(struct extent_map), 0,
22
					    NULL);
23 24 25
	if (!extent_map_cache)
		return -ENOMEM;
	return 0;
26 27
}

28
void extent_map_exit(void)
29 30 31 32 33
{
	if (extent_map_cache)
		kmem_cache_destroy(extent_map_cache);
}

34
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
35 36
{
	tree->map.rb_node = NULL;
37 38
	tree->last = NULL;
	spin_lock_init(&tree->lock);
39 40 41 42 43 44 45 46 47 48
}
EXPORT_SYMBOL(extent_map_tree_init);

struct extent_map *alloc_extent_map(gfp_t mask)
{
	struct extent_map *em;
	em = kmem_cache_alloc(extent_map_cache, mask);
	if (!em || IS_ERR(em))
		return em;
	em->in_tree = 0;
49
	em->flags = 0;
50 51 52 53 54 55 56
	atomic_set(&em->refs, 1);
	return em;
}
EXPORT_SYMBOL(alloc_extent_map);

void free_extent_map(struct extent_map *em)
{
C
Chris Mason 已提交
57 58
	if (!em)
		return;
59
	WARN_ON(atomic_read(&em->refs) == 0);
60 61 62 63 64 65 66 67 68 69 70 71
	if (atomic_dec_and_test(&em->refs)) {
		WARN_ON(em->in_tree);
		kmem_cache_free(extent_map_cache, em);
	}
}
EXPORT_SYMBOL(free_extent_map);

static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
				   struct rb_node *node)
{
	struct rb_node ** p = &root->rb_node;
	struct rb_node * parent = NULL;
72
	struct extent_map *entry;
73 74 75

	while(*p) {
		parent = *p;
76 77 78
		entry = rb_entry(parent, struct extent_map, rb_node);

		WARN_ON(!entry->in_tree);
79 80 81

		if (offset < entry->start)
			p = &(*p)->rb_left;
82
		else if (offset >= extent_map_end(entry))
83 84 85 86 87
			p = &(*p)->rb_right;
		else
			return parent;
	}

88
	entry = rb_entry(node, struct extent_map, rb_node);
89 90 91 92 93 94 95
	entry->in_tree = 1;
	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return NULL;
}

static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
96 97
				     struct rb_node **prev_ret,
				     struct rb_node **next_ret)
98 99 100
{
	struct rb_node * n = root->rb_node;
	struct rb_node *prev = NULL;
101
	struct rb_node *orig_prev = NULL;
102 103
	struct extent_map *entry;
	struct extent_map *prev_entry = NULL;
104 105

	while(n) {
106
		entry = rb_entry(n, struct extent_map, rb_node);
107 108 109
		prev = n;
		prev_entry = entry;

110 111
		WARN_ON(!entry->in_tree);

112 113
		if (offset < entry->start)
			n = n->rb_left;
114
		else if (offset >= extent_map_end(entry))
115 116 117 118
			n = n->rb_right;
		else
			return n;
	}
119 120 121

	if (prev_ret) {
		orig_prev = prev;
122
		while(prev && offset >= extent_map_end(prev_entry)) {
123
			prev = rb_next(prev);
124
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
125 126 127 128 129 130
		}
		*prev_ret = prev;
		prev = orig_prev;
	}

	if (next_ret) {
131
		prev_entry = rb_entry(prev, struct extent_map, rb_node);
132 133
		while(prev && offset < prev_entry->start) {
			prev = rb_prev(prev);
134
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
135 136
		}
		*next_ret = prev;
137 138 139 140 141 142 143 144
	}
	return NULL;
}

static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
{
	struct rb_node *prev;
	struct rb_node *ret;
145
	ret = __tree_search(root, offset, &prev, NULL);
146 147 148 149 150
	if (!ret)
		return prev;
	return ret;
}

151
static int mergable_maps(struct extent_map *prev, struct extent_map *next)
152
{
153 154 155 156 157 158 159 160 161 162 163 164 165
	if (extent_map_end(prev) == next->start &&
	    prev->flags == next->flags &&
	    prev->bdev == next->bdev &&
	    ((next->block_start == EXTENT_MAP_HOLE &&
	      prev->block_start == EXTENT_MAP_HOLE) ||
	     (next->block_start == EXTENT_MAP_INLINE &&
	      prev->block_start == EXTENT_MAP_INLINE) ||
	     (next->block_start == EXTENT_MAP_DELALLOC &&
	      prev->block_start == EXTENT_MAP_DELALLOC) ||
	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
	      next->block_start == extent_map_block_end(prev)))) {
		return 1;
	}
166 167 168 169
	return 0;
}

/*
170
 * add_extent_mapping tries a simple forward/backward merge with existing
171 172 173 174 175 176 177
 * mappings.  The extent_map struct passed in will be inserted into
 * the tree directly (no copies made, just a reference taken).
 */
int add_extent_mapping(struct extent_map_tree *tree,
		       struct extent_map *em)
{
	int ret = 0;
178
	struct extent_map *merge = NULL;
179 180
	struct rb_node *rb;

181
	rb = tree_insert(&tree->map, em->start, &em->rb_node);
182
	if (rb) {
183
		merge = rb_entry(rb, struct extent_map, rb_node);
184 185 186 187 188 189 190
		ret = -EEXIST;
		goto out;
	}
	atomic_inc(&em->refs);
	if (em->start != 0) {
		rb = rb_prev(&em->rb_node);
		if (rb)
191 192 193 194 195 196 197 198
			merge = rb_entry(rb, struct extent_map, rb_node);
		if (rb && mergable_maps(merge, em)) {
			em->start = merge->start;
			em->len += merge->len;
			em->block_start = merge->block_start;
			merge->in_tree = 0;
			rb_erase(&merge->rb_node, &tree->map);
			free_extent_map(merge);
199 200
		}
	 }
201 202 203 204 205 206 207 208 209 210
	rb = rb_next(&em->rb_node);
	if (rb)
		merge = rb_entry(rb, struct extent_map, rb_node);
	if (rb && mergable_maps(em, merge)) {
		em->len += merge->len;
		rb_erase(&merge->rb_node, &tree->map);
		merge->in_tree = 0;
		free_extent_map(merge);
	}
	tree->last = em;
211 212 213 214 215
out:
	return ret;
}
EXPORT_SYMBOL(add_extent_mapping);

216 217 218 219 220 221 222
static u64 range_end(u64 start, u64 len)
{
	if (start + len < start)
		return (u64)-1;
	return start + len;
}

223 224
/*
 * lookup_extent_mapping returns the first extent_map struct in the
225
 * tree that intersects the [start, len] range.  There may
226 227 228 229
 * be additional objects in the tree that intersect, so check the object
 * returned carefully to make sure you don't need additional lookups.
 */
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
230
					 u64 start, u64 len)
231 232 233
{
	struct extent_map *em;
	struct rb_node *rb_node;
234
	struct rb_node *prev = NULL; struct rb_node *next = NULL; u64 end = range_end(start, len); em = tree->last; if (em && end > em->start && start < extent_map_end(em)) goto found;
235

236 237 238
	rb_node = __tree_search(&tree->map, start, &prev, &next);
	if (!rb_node && prev) {
		em = rb_entry(prev, struct extent_map, rb_node);
239
		if (end > em->start && start < extent_map_end(em))
240 241 242 243
			goto found;
	}
	if (!rb_node && next) {
		em = rb_entry(next, struct extent_map, rb_node);
244
		if (end > em->start && start < extent_map_end(em))
245 246
			goto found;
	}
247 248 249 250 251 252 253 254 255
	if (!rb_node) {
		em = NULL;
		goto out;
	}
	if (IS_ERR(rb_node)) {
		em = ERR_PTR(PTR_ERR(rb_node));
		goto out;
	}
	em = rb_entry(rb_node, struct extent_map, rb_node);
256 257 258 259 260 261
	if (end > em->start && start < extent_map_end(em))
		goto found;

	em = NULL;
	goto out;

262
found:
263
	atomic_inc(&em->refs);
264
	tree->last = em;
265 266 267 268 269 270 271 272 273 274 275
out:
	return em;
}
EXPORT_SYMBOL(lookup_extent_mapping);

/*
 * removes an extent_map struct from the tree.  No reference counts are
 * dropped, and no checks are done to  see if the range is in use
 */
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
{
276
	int ret = 0;
277

278 279 280 281
	rb_erase(&em->rb_node, &tree->map);
	em->in_tree = 0;
	if (tree->last == em)
		tree->last = NULL;
282 283 284
	return ret;
}
EXPORT_SYMBOL(remove_extent_mapping);
新手
引导
客服 返回
顶部