extent_map.c 10.0 KB
Newer Older
1
#include <linux/err.h>
2
#include <linux/gfp.h>
3
#include <linux/slab.h>
4 5
#include <linux/module.h>
#include <linux/spinlock.h>
6
#include <linux/hardirq.h>
7 8
#include "extent_map.h"

9

10
static struct kmem_cache *extent_map_cache;
11

12
int __init extent_map_init(void)
13
{
14 15 16
	extent_map_cache = kmem_cache_create("extent_map",
			sizeof(struct extent_map), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
17 18 19
	if (!extent_map_cache)
		return -ENOMEM;
	return 0;
20 21
}

22
void extent_map_exit(void)
23 24 25 26 27
{
	if (extent_map_cache)
		kmem_cache_destroy(extent_map_cache);
}

28 29 30 31 32 33 34 35
/**
 * extent_map_tree_init - initialize extent map tree
 * @tree:		tree to initialize
 * @mask:		flags for memory allocations during tree operations
 *
 * Initialize the extent tree @tree.  Should be called for each new inode
 * or other user of the extent_map interface.
 */
36
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
37 38
{
	tree->map.rb_node = NULL;
39
	rwlock_init(&tree->lock);
40 41
}

42 43 44 45 46 47 48 49
/**
 * alloc_extent_map - allocate new extent map structure
 * @mask:	memory allocation flags
 *
 * Allocate a new extent_map structure.  The new structure is
 * returned with a reference count of one and needs to be
 * freed using free_extent_map()
 */
50 51 52 53 54 55 56
struct extent_map *alloc_extent_map(gfp_t mask)
{
	struct extent_map *em;
	em = kmem_cache_alloc(extent_map_cache, mask);
	if (!em || IS_ERR(em))
		return em;
	em->in_tree = 0;
57
	em->flags = 0;
58 59 60 61
	atomic_set(&em->refs, 1);
	return em;
}

62 63 64 65 66 67 68
/**
 * free_extent_map - drop reference count of an extent_map
 * @em:		extent map beeing releasead
 *
 * Drops the reference out on @em by one and free the structure
 * if the reference count hits zero.
 */
69 70
void free_extent_map(struct extent_map *em)
{
C
Chris Mason 已提交
71 72
	if (!em)
		return;
73
	WARN_ON(atomic_read(&em->refs) == 0);
74 75 76 77 78 79 80 81 82
	if (atomic_dec_and_test(&em->refs)) {
		WARN_ON(em->in_tree);
		kmem_cache_free(extent_map_cache, em);
	}
}

static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
				   struct rb_node *node)
{
C
Chris Mason 已提交
83 84
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
85
	struct extent_map *entry;
86

C
Chris Mason 已提交
87
	while (*p) {
88
		parent = *p;
89 90 91
		entry = rb_entry(parent, struct extent_map, rb_node);

		WARN_ON(!entry->in_tree);
92 93 94

		if (offset < entry->start)
			p = &(*p)->rb_left;
95
		else if (offset >= extent_map_end(entry))
96 97 98 99 100
			p = &(*p)->rb_right;
		else
			return parent;
	}

101
	entry = rb_entry(node, struct extent_map, rb_node);
102 103 104 105 106 107
	entry->in_tree = 1;
	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return NULL;
}

C
Chris Mason 已提交
108 109 110 111
/*
 * search through the tree for an extent_map with a given offset.  If
 * it can't be found, try to find some neighboring extents
 */
112
static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
113 114
				     struct rb_node **prev_ret,
				     struct rb_node **next_ret)
115
{
C
Chris Mason 已提交
116
	struct rb_node *n = root->rb_node;
117
	struct rb_node *prev = NULL;
118
	struct rb_node *orig_prev = NULL;
119 120
	struct extent_map *entry;
	struct extent_map *prev_entry = NULL;
121

C
Chris Mason 已提交
122
	while (n) {
123
		entry = rb_entry(n, struct extent_map, rb_node);
124 125 126
		prev = n;
		prev_entry = entry;

127 128
		WARN_ON(!entry->in_tree);

129 130
		if (offset < entry->start)
			n = n->rb_left;
131
		else if (offset >= extent_map_end(entry))
132 133 134 135
			n = n->rb_right;
		else
			return n;
	}
136 137 138

	if (prev_ret) {
		orig_prev = prev;
C
Chris Mason 已提交
139
		while (prev && offset >= extent_map_end(prev_entry)) {
140
			prev = rb_next(prev);
141
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
142 143 144 145 146 147
		}
		*prev_ret = prev;
		prev = orig_prev;
	}

	if (next_ret) {
148
		prev_entry = rb_entry(prev, struct extent_map, rb_node);
C
Chris Mason 已提交
149
		while (prev && offset < prev_entry->start) {
150
			prev = rb_prev(prev);
151
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
152 153
		}
		*next_ret = prev;
154 155 156 157
	}
	return NULL;
}

C
Chris Mason 已提交
158
/* check to see if two extent_map structs are adjacent and safe to merge */
159
static int mergable_maps(struct extent_map *prev, struct extent_map *next)
160
{
161 162 163
	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
		return 0;

C
Chris Mason 已提交
164 165 166 167 168 169 170
	/*
	 * don't merge compressed extents, we need to know their
	 * actual size
	 */
	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
		return 0;

171 172 173 174 175 176 177 178 179 180 181 182 183
	if (extent_map_end(prev) == next->start &&
	    prev->flags == next->flags &&
	    prev->bdev == next->bdev &&
	    ((next->block_start == EXTENT_MAP_HOLE &&
	      prev->block_start == EXTENT_MAP_HOLE) ||
	     (next->block_start == EXTENT_MAP_INLINE &&
	      prev->block_start == EXTENT_MAP_INLINE) ||
	     (next->block_start == EXTENT_MAP_DELALLOC &&
	      prev->block_start == EXTENT_MAP_DELALLOC) ||
	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
	      next->block_start == extent_map_block_end(prev)))) {
		return 1;
	}
184 185 186
	return 0;
}

C
Chris Mason 已提交
187 188 189 190 191 192 193 194 195 196
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
{
	int ret = 0;
	struct extent_map *merge = NULL;
	struct rb_node *rb;
	struct extent_map *em;

	write_lock(&tree->lock);
	em = lookup_extent_mapping(tree, start, len);

197
	WARN_ON(!em || em->start != start);
C
Chris Mason 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236

	if (!em)
		goto out;

	clear_bit(EXTENT_FLAG_PINNED, &em->flags);

	if (em->start != 0) {
		rb = rb_prev(&em->rb_node);
		if (rb)
			merge = rb_entry(rb, struct extent_map, rb_node);
		if (rb && mergable_maps(merge, em)) {
			em->start = merge->start;
			em->len += merge->len;
			em->block_len += merge->block_len;
			em->block_start = merge->block_start;
			merge->in_tree = 0;
			rb_erase(&merge->rb_node, &tree->map);
			free_extent_map(merge);
		}
	}

	rb = rb_next(&em->rb_node);
	if (rb)
		merge = rb_entry(rb, struct extent_map, rb_node);
	if (rb && mergable_maps(em, merge)) {
		em->len += merge->len;
		em->block_len += merge->len;
		rb_erase(&merge->rb_node, &tree->map);
		merge->in_tree = 0;
		free_extent_map(merge);
	}

	free_extent_map(em);
out:
	write_unlock(&tree->lock);
	return ret;

}

237 238 239 240 241 242 243 244 245
/**
 * add_extent_mapping - add new extent map to the extent tree
 * @tree:	tree to insert new map in
 * @em:		map to insert
 *
 * Insert @em into @tree or perform a simple forward/backward merge with
 * existing mappings.  The extent_map struct passed in will be inserted
 * into the tree directly, with an additional reference taken, or a
 * reference dropped if the merge attempt was sucessfull.
246 247 248 249 250
 */
int add_extent_mapping(struct extent_map_tree *tree,
		       struct extent_map *em)
{
	int ret = 0;
251
	struct extent_map *merge = NULL;
252
	struct rb_node *rb;
253
	struct extent_map *exist;
254

255 256 257 258 259 260
	exist = lookup_extent_mapping(tree, em->start, em->len);
	if (exist) {
		free_extent_map(exist);
		ret = -EEXIST;
		goto out;
	}
261
	rb = tree_insert(&tree->map, em->start, &em->rb_node);
262 263 264 265 266 267 268 269
	if (rb) {
		ret = -EEXIST;
		goto out;
	}
	atomic_inc(&em->refs);
	if (em->start != 0) {
		rb = rb_prev(&em->rb_node);
		if (rb)
270 271 272 273
			merge = rb_entry(rb, struct extent_map, rb_node);
		if (rb && mergable_maps(merge, em)) {
			em->start = merge->start;
			em->len += merge->len;
C
Chris Mason 已提交
274
			em->block_len += merge->block_len;
275 276 277 278
			em->block_start = merge->block_start;
			merge->in_tree = 0;
			rb_erase(&merge->rb_node, &tree->map);
			free_extent_map(merge);
279 280
		}
	 }
281 282 283 284 285
	rb = rb_next(&em->rb_node);
	if (rb)
		merge = rb_entry(rb, struct extent_map, rb_node);
	if (rb && mergable_maps(em, merge)) {
		em->len += merge->len;
C
Chris Mason 已提交
286
		em->block_len += merge->len;
287 288 289 290
		rb_erase(&merge->rb_node, &tree->map);
		merge->in_tree = 0;
		free_extent_map(merge);
	}
291 292 293 294
out:
	return ret;
}

C
Chris Mason 已提交
295
/* simple helper to do math around the end of an extent, handling wrap */
296 297 298 299 300 301 302
static u64 range_end(u64 start, u64 len)
{
	if (start + len < start)
		return (u64)-1;
	return start + len;
}

303 304 305 306 307 308 309 310 311 312
/**
 * lookup_extent_mapping - lookup extent_map
 * @tree:	tree to lookup in
 * @start:	byte offset to start the search
 * @len:	length of the lookup range
 *
 * Find and return the first extent_map struct in @tree that intersects the
 * [start, len] range.  There may be additional objects in the tree that
 * intersect, so check the object returned carefully to make sure that no
 * additional lookups are needed.
313 314
 */
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
315
					 u64 start, u64 len)
316 317 318
{
	struct extent_map *em;
	struct rb_node *rb_node;
319 320 321 322
	struct rb_node *prev = NULL;
	struct rb_node *next = NULL;
	u64 end = range_end(start, len);

323 324 325
	rb_node = __tree_search(&tree->map, start, &prev, &next);
	if (!rb_node && prev) {
		em = rb_entry(prev, struct extent_map, rb_node);
326
		if (end > em->start && start < extent_map_end(em))
327 328 329 330
			goto found;
	}
	if (!rb_node && next) {
		em = rb_entry(next, struct extent_map, rb_node);
331
		if (end > em->start && start < extent_map_end(em))
332 333
			goto found;
	}
334 335 336 337 338 339 340 341 342
	if (!rb_node) {
		em = NULL;
		goto out;
	}
	if (IS_ERR(rb_node)) {
		em = ERR_PTR(PTR_ERR(rb_node));
		goto out;
	}
	em = rb_entry(rb_node, struct extent_map, rb_node);
343 344 345 346 347 348
	if (end > em->start && start < extent_map_end(em))
		goto found;

	em = NULL;
	goto out;

349
found:
350 351 352 353 354
	atomic_inc(&em->refs);
out:
	return em;
}

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
/**
 * search_extent_mapping - find a nearby extent map
 * @tree:	tree to lookup in
 * @start:	byte offset to start the search
 * @len:	length of the lookup range
 *
 * Find and return the first extent_map struct in @tree that intersects the
 * [start, len] range.
 *
 * If one can't be found, any nearby extent may be returned
 */
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
					 u64 start, u64 len)
{
	struct extent_map *em;
	struct rb_node *rb_node;
	struct rb_node *prev = NULL;
	struct rb_node *next = NULL;

	rb_node = __tree_search(&tree->map, start, &prev, &next);
	if (!rb_node && prev) {
		em = rb_entry(prev, struct extent_map, rb_node);
		goto found;
	}
	if (!rb_node && next) {
		em = rb_entry(next, struct extent_map, rb_node);
		goto found;
	}
	if (!rb_node) {
		em = NULL;
		goto out;
	}
	if (IS_ERR(rb_node)) {
		em = ERR_PTR(PTR_ERR(rb_node));
		goto out;
	}
	em = rb_entry(rb_node, struct extent_map, rb_node);
	goto found;

	em = NULL;
	goto out;

found:
	atomic_inc(&em->refs);
out:
	return em;
}

403 404 405 406 407 408 409
/**
 * remove_extent_mapping - removes an extent_map from the extent tree
 * @tree:	extent tree to remove from
 * @em:		extent map beeing removed
 *
 * Removes @em from @tree.  No reference counts are dropped, and no checks
 * are done to see if the range is in use
410 411 412
 */
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
{
413
	int ret = 0;
414

415
	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
416 417
	rb_erase(&em->rb_node, &tree->map);
	em->in_tree = 0;
418 419
	return ret;
}