extent_map.c 10.0 KB
Newer Older
1 2
#include <linux/err.h>
#include <linux/slab.h>
3 4
#include <linux/module.h>
#include <linux/spinlock.h>
5
#include <linux/hardirq.h>
6
#include "ctree.h"
7 8
#include "extent_map.h"

9

10
static struct kmem_cache *extent_map_cache;
11

12
int __init extent_map_init(void)
13
{
14 15 16
	extent_map_cache = kmem_cache_create("extent_map",
			sizeof(struct extent_map), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
17 18 19
	if (!extent_map_cache)
		return -ENOMEM;
	return 0;
20 21
}

22
void extent_map_exit(void)
23 24 25 26 27
{
	if (extent_map_cache)
		kmem_cache_destroy(extent_map_cache);
}

28 29 30 31 32 33 34 35
/**
 * extent_map_tree_init - initialize extent map tree
 * @tree:		tree to initialize
 * @mask:		flags for memory allocations during tree operations
 *
 * Initialize the extent tree @tree.  Should be called for each new inode
 * or other user of the extent_map interface.
 */
36
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
37
{
38
	tree->map = RB_ROOT;
39
	rwlock_init(&tree->lock);
40 41
}

42 43 44 45 46 47 48 49
/**
 * alloc_extent_map - allocate new extent map structure
 * @mask:	memory allocation flags
 *
 * Allocate a new extent_map structure.  The new structure is
 * returned with a reference count of one and needs to be
 * freed using free_extent_map()
 */
50 51 52 53
struct extent_map *alloc_extent_map(gfp_t mask)
{
	struct extent_map *em;
	em = kmem_cache_alloc(extent_map_cache, mask);
54 55
	if (!em)
		return NULL;
56
	em->in_tree = 0;
57
	em->flags = 0;
58
	em->compress_type = BTRFS_COMPRESS_NONE;
59 60 61 62
	atomic_set(&em->refs, 1);
	return em;
}

63 64 65 66 67 68 69
/**
 * free_extent_map - drop reference count of an extent_map
 * @em:		extent map beeing releasead
 *
 * Drops the reference out on @em by one and free the structure
 * if the reference count hits zero.
 */
70 71
void free_extent_map(struct extent_map *em)
{
C
Chris Mason 已提交
72 73
	if (!em)
		return;
74
	WARN_ON(atomic_read(&em->refs) == 0);
75 76 77 78 79 80 81 82 83
	if (atomic_dec_and_test(&em->refs)) {
		WARN_ON(em->in_tree);
		kmem_cache_free(extent_map_cache, em);
	}
}

static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
				   struct rb_node *node)
{
84 85
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
86
	struct extent_map *entry;
87

88
	while (*p) {
89
		parent = *p;
90 91 92
		entry = rb_entry(parent, struct extent_map, rb_node);

		WARN_ON(!entry->in_tree);
93 94 95

		if (offset < entry->start)
			p = &(*p)->rb_left;
96
		else if (offset >= extent_map_end(entry))
97 98 99 100 101
			p = &(*p)->rb_right;
		else
			return parent;
	}

102
	entry = rb_entry(node, struct extent_map, rb_node);
103 104 105 106 107 108
	entry->in_tree = 1;
	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return NULL;
}

109 110 111 112
/*
 * search through the tree for an extent_map with a given offset.  If
 * it can't be found, try to find some neighboring extents
 */
113
static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
114 115
				     struct rb_node **prev_ret,
				     struct rb_node **next_ret)
116
{
117
	struct rb_node *n = root->rb_node;
118
	struct rb_node *prev = NULL;
119
	struct rb_node *orig_prev = NULL;
120 121
	struct extent_map *entry;
	struct extent_map *prev_entry = NULL;
122

123
	while (n) {
124
		entry = rb_entry(n, struct extent_map, rb_node);
125 126 127
		prev = n;
		prev_entry = entry;

128 129
		WARN_ON(!entry->in_tree);

130 131
		if (offset < entry->start)
			n = n->rb_left;
132
		else if (offset >= extent_map_end(entry))
133 134 135 136
			n = n->rb_right;
		else
			return n;
	}
137 138 139

	if (prev_ret) {
		orig_prev = prev;
140
		while (prev && offset >= extent_map_end(prev_entry)) {
141
			prev = rb_next(prev);
142
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
143 144 145 146 147 148
		}
		*prev_ret = prev;
		prev = orig_prev;
	}

	if (next_ret) {
149
		prev_entry = rb_entry(prev, struct extent_map, rb_node);
150
		while (prev && offset < prev_entry->start) {
151
			prev = rb_prev(prev);
152
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
153 154
		}
		*next_ret = prev;
155 156 157 158
	}
	return NULL;
}

159
/* check to see if two extent_map structs are adjacent and safe to merge */
160
static int mergable_maps(struct extent_map *prev, struct extent_map *next)
161
{
162 163 164
	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
		return 0;

165 166 167 168 169 170 171
	/*
	 * don't merge compressed extents, we need to know their
	 * actual size
	 */
	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
		return 0;

172 173 174 175 176 177 178 179 180 181 182 183 184
	if (extent_map_end(prev) == next->start &&
	    prev->flags == next->flags &&
	    prev->bdev == next->bdev &&
	    ((next->block_start == EXTENT_MAP_HOLE &&
	      prev->block_start == EXTENT_MAP_HOLE) ||
	     (next->block_start == EXTENT_MAP_INLINE &&
	      prev->block_start == EXTENT_MAP_INLINE) ||
	     (next->block_start == EXTENT_MAP_DELALLOC &&
	      prev->block_start == EXTENT_MAP_DELALLOC) ||
	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
	      next->block_start == extent_map_block_end(prev)))) {
		return 1;
	}
185 186 187
	return 0;
}

188 189 190 191 192 193 194 195 196 197
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
{
	int ret = 0;
	struct extent_map *merge = NULL;
	struct rb_node *rb;
	struct extent_map *em;

	write_lock(&tree->lock);
	em = lookup_extent_mapping(tree, start, len);

198
	WARN_ON(!em || em->start != start);
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237

	if (!em)
		goto out;

	clear_bit(EXTENT_FLAG_PINNED, &em->flags);

	if (em->start != 0) {
		rb = rb_prev(&em->rb_node);
		if (rb)
			merge = rb_entry(rb, struct extent_map, rb_node);
		if (rb && mergable_maps(merge, em)) {
			em->start = merge->start;
			em->len += merge->len;
			em->block_len += merge->block_len;
			em->block_start = merge->block_start;
			merge->in_tree = 0;
			rb_erase(&merge->rb_node, &tree->map);
			free_extent_map(merge);
		}
	}

	rb = rb_next(&em->rb_node);
	if (rb)
		merge = rb_entry(rb, struct extent_map, rb_node);
	if (rb && mergable_maps(em, merge)) {
		em->len += merge->len;
		em->block_len += merge->len;
		rb_erase(&merge->rb_node, &tree->map);
		merge->in_tree = 0;
		free_extent_map(merge);
	}

	free_extent_map(em);
out:
	write_unlock(&tree->lock);
	return ret;

}

238 239 240 241 242 243 244 245
/**
 * add_extent_mapping - add new extent map to the extent tree
 * @tree:	tree to insert new map in
 * @em:		map to insert
 *
 * Insert @em into @tree or perform a simple forward/backward merge with
 * existing mappings.  The extent_map struct passed in will be inserted
 * into the tree directly, with an additional reference taken, or a
L
Lucas De Marchi 已提交
246
 * reference dropped if the merge attempt was successful.
247 248 249 250 251
 */
int add_extent_mapping(struct extent_map_tree *tree,
		       struct extent_map *em)
{
	int ret = 0;
252
	struct extent_map *merge = NULL;
253
	struct rb_node *rb;
254
	struct extent_map *exist;
255

256 257 258 259 260 261
	exist = lookup_extent_mapping(tree, em->start, em->len);
	if (exist) {
		free_extent_map(exist);
		ret = -EEXIST;
		goto out;
	}
262
	rb = tree_insert(&tree->map, em->start, &em->rb_node);
263 264 265 266 267 268 269 270
	if (rb) {
		ret = -EEXIST;
		goto out;
	}
	atomic_inc(&em->refs);
	if (em->start != 0) {
		rb = rb_prev(&em->rb_node);
		if (rb)
271 272 273 274
			merge = rb_entry(rb, struct extent_map, rb_node);
		if (rb && mergable_maps(merge, em)) {
			em->start = merge->start;
			em->len += merge->len;
275
			em->block_len += merge->block_len;
276 277 278 279
			em->block_start = merge->block_start;
			merge->in_tree = 0;
			rb_erase(&merge->rb_node, &tree->map);
			free_extent_map(merge);
280 281
		}
	 }
282 283 284 285 286
	rb = rb_next(&em->rb_node);
	if (rb)
		merge = rb_entry(rb, struct extent_map, rb_node);
	if (rb && mergable_maps(em, merge)) {
		em->len += merge->len;
287
		em->block_len += merge->len;
288 289 290 291
		rb_erase(&merge->rb_node, &tree->map);
		merge->in_tree = 0;
		free_extent_map(merge);
	}
292 293 294 295
out:
	return ret;
}

296
/* simple helper to do math around the end of an extent, handling wrap */
297 298 299 300 301 302 303
static u64 range_end(u64 start, u64 len)
{
	if (start + len < start)
		return (u64)-1;
	return start + len;
}

304 305 306 307 308 309 310 311 312 313
/**
 * lookup_extent_mapping - lookup extent_map
 * @tree:	tree to lookup in
 * @start:	byte offset to start the search
 * @len:	length of the lookup range
 *
 * Find and return the first extent_map struct in @tree that intersects the
 * [start, len] range.  There may be additional objects in the tree that
 * intersect, so check the object returned carefully to make sure that no
 * additional lookups are needed.
314 315
 */
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
316
					 u64 start, u64 len)
317 318 319
{
	struct extent_map *em;
	struct rb_node *rb_node;
320 321 322 323
	struct rb_node *prev = NULL;
	struct rb_node *next = NULL;
	u64 end = range_end(start, len);

324 325 326
	rb_node = __tree_search(&tree->map, start, &prev, &next);
	if (!rb_node && prev) {
		em = rb_entry(prev, struct extent_map, rb_node);
327
		if (end > em->start && start < extent_map_end(em))
328 329 330 331
			goto found;
	}
	if (!rb_node && next) {
		em = rb_entry(next, struct extent_map, rb_node);
332
		if (end > em->start && start < extent_map_end(em))
333 334
			goto found;
	}
335 336 337 338 339
	if (!rb_node) {
		em = NULL;
		goto out;
	}
	if (IS_ERR(rb_node)) {
340
		em = ERR_CAST(rb_node);
341 342 343
		goto out;
	}
	em = rb_entry(rb_node, struct extent_map, rb_node);
344 345 346 347 348 349
	if (end > em->start && start < extent_map_end(em))
		goto found;

	em = NULL;
	goto out;

350
found:
351 352 353 354 355
	atomic_inc(&em->refs);
out:
	return em;
}

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
/**
 * search_extent_mapping - find a nearby extent map
 * @tree:	tree to lookup in
 * @start:	byte offset to start the search
 * @len:	length of the lookup range
 *
 * Find and return the first extent_map struct in @tree that intersects the
 * [start, len] range.
 *
 * If one can't be found, any nearby extent may be returned
 */
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
					 u64 start, u64 len)
{
	struct extent_map *em;
	struct rb_node *rb_node;
	struct rb_node *prev = NULL;
	struct rb_node *next = NULL;

	rb_node = __tree_search(&tree->map, start, &prev, &next);
	if (!rb_node && prev) {
		em = rb_entry(prev, struct extent_map, rb_node);
		goto found;
	}
	if (!rb_node && next) {
		em = rb_entry(next, struct extent_map, rb_node);
		goto found;
	}
	if (!rb_node) {
		em = NULL;
		goto out;
	}
	if (IS_ERR(rb_node)) {
389
		em = ERR_CAST(rb_node);
390 391 392 393 394 395 396 397 398 399 400 401 402 403
		goto out;
	}
	em = rb_entry(rb_node, struct extent_map, rb_node);
	goto found;

	em = NULL;
	goto out;

found:
	atomic_inc(&em->refs);
out:
	return em;
}

404 405 406 407 408 409 410
/**
 * remove_extent_mapping - removes an extent_map from the extent tree
 * @tree:	extent tree to remove from
 * @em:		extent map beeing removed
 *
 * Removes @em from @tree.  No reference counts are dropped, and no checks
 * are done to see if the range is in use
411 412 413
 */
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
{
414
	int ret = 0;
415

416
	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
417 418
	rb_erase(&em->rb_node, &tree->map);
	em->in_tree = 0;
419 420
	return ret;
}
反馈
建议
客服 返回
顶部