extent_map.c 9.2 KB
Newer Older
1 2
#include <linux/err.h>
#include <linux/slab.h>
3 4
#include <linux/module.h>
#include <linux/spinlock.h>
5
#include <linux/hardirq.h>
6
#include "ctree.h"
7 8
#include "extent_map.h"

9

10
static struct kmem_cache *extent_map_cache;
11

12
int __init extent_map_init(void)
13
{
14 15 16
	extent_map_cache = kmem_cache_create("extent_map",
			sizeof(struct extent_map), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
17 18 19
	if (!extent_map_cache)
		return -ENOMEM;
	return 0;
20 21
}

22
void extent_map_exit(void)
23 24 25 26 27
{
	if (extent_map_cache)
		kmem_cache_destroy(extent_map_cache);
}

28 29 30 31 32 33 34
/**
 * extent_map_tree_init - initialize extent map tree
 * @tree:		tree to initialize
 *
 * Initialize the extent tree @tree.  Should be called for each new inode
 * or other user of the extent_map interface.
 */
35
void extent_map_tree_init(struct extent_map_tree *tree)
36
{
37
	tree->map = RB_ROOT;
38
	rwlock_init(&tree->lock);
39 40
}

41 42 43 44 45 46 47
/**
 * alloc_extent_map - allocate new extent map structure
 *
 * Allocate a new extent_map structure.  The new structure is
 * returned with a reference count of one and needs to be
 * freed using free_extent_map()
 */
48
struct extent_map *alloc_extent_map(void)
49 50
{
	struct extent_map *em;
51
	em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
52 53
	if (!em)
		return NULL;
54
	em->in_tree = 0;
55
	em->flags = 0;
56
	em->compress_type = BTRFS_COMPRESS_NONE;
57 58 59 60
	atomic_set(&em->refs, 1);
	return em;
}

61 62 63 64 65 66 67
/**
 * free_extent_map - drop reference count of an extent_map
 * @em:		extent map beeing releasead
 *
 * Drops the reference out on @em by one and free the structure
 * if the reference count hits zero.
 */
68 69
void free_extent_map(struct extent_map *em)
{
C
Chris Mason 已提交
70 71
	if (!em)
		return;
72
	WARN_ON(atomic_read(&em->refs) == 0);
73 74 75 76 77 78 79 80 81
	if (atomic_dec_and_test(&em->refs)) {
		WARN_ON(em->in_tree);
		kmem_cache_free(extent_map_cache, em);
	}
}

static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
				   struct rb_node *node)
{
82 83
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
84
	struct extent_map *entry;
85

86
	while (*p) {
87
		parent = *p;
88 89 90
		entry = rb_entry(parent, struct extent_map, rb_node);

		WARN_ON(!entry->in_tree);
91 92 93

		if (offset < entry->start)
			p = &(*p)->rb_left;
94
		else if (offset >= extent_map_end(entry))
95 96 97 98 99
			p = &(*p)->rb_right;
		else
			return parent;
	}

100
	entry = rb_entry(node, struct extent_map, rb_node);
101 102 103 104 105 106
	entry->in_tree = 1;
	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return NULL;
}

107 108 109 110
/*
 * search through the tree for an extent_map with a given offset.  If
 * it can't be found, try to find some neighboring extents
 */
111
static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
112 113
				     struct rb_node **prev_ret,
				     struct rb_node **next_ret)
114
{
115
	struct rb_node *n = root->rb_node;
116
	struct rb_node *prev = NULL;
117
	struct rb_node *orig_prev = NULL;
118 119
	struct extent_map *entry;
	struct extent_map *prev_entry = NULL;
120

121
	while (n) {
122
		entry = rb_entry(n, struct extent_map, rb_node);
123 124 125
		prev = n;
		prev_entry = entry;

126 127
		WARN_ON(!entry->in_tree);

128 129
		if (offset < entry->start)
			n = n->rb_left;
130
		else if (offset >= extent_map_end(entry))
131 132 133 134
			n = n->rb_right;
		else
			return n;
	}
135 136 137

	if (prev_ret) {
		orig_prev = prev;
138
		while (prev && offset >= extent_map_end(prev_entry)) {
139
			prev = rb_next(prev);
140
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
141 142 143 144 145 146
		}
		*prev_ret = prev;
		prev = orig_prev;
	}

	if (next_ret) {
147
		prev_entry = rb_entry(prev, struct extent_map, rb_node);
148
		while (prev && offset < prev_entry->start) {
149
			prev = rb_prev(prev);
150
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
151 152
		}
		*next_ret = prev;
153 154 155 156
	}
	return NULL;
}

157
/* check to see if two extent_map structs are adjacent and safe to merge */
158
static int mergable_maps(struct extent_map *prev, struct extent_map *next)
159
{
160 161 162
	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
		return 0;

163 164 165 166 167 168 169
	/*
	 * don't merge compressed extents, we need to know their
	 * actual size
	 */
	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
		return 0;

170 171 172 173 174 175 176 177 178 179 180 181 182
	if (extent_map_end(prev) == next->start &&
	    prev->flags == next->flags &&
	    prev->bdev == next->bdev &&
	    ((next->block_start == EXTENT_MAP_HOLE &&
	      prev->block_start == EXTENT_MAP_HOLE) ||
	     (next->block_start == EXTENT_MAP_INLINE &&
	      prev->block_start == EXTENT_MAP_INLINE) ||
	     (next->block_start == EXTENT_MAP_DELALLOC &&
	      prev->block_start == EXTENT_MAP_DELALLOC) ||
	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
	      next->block_start == extent_map_block_end(prev)))) {
		return 1;
	}
183 184 185
	return 0;
}

186 187 188 189 190 191 192 193 194 195
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
{
	int ret = 0;
	struct extent_map *merge = NULL;
	struct rb_node *rb;
	struct extent_map *em;

	write_lock(&tree->lock);
	em = lookup_extent_mapping(tree, start, len);

196
	WARN_ON(!em || em->start != start);
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235

	if (!em)
		goto out;

	clear_bit(EXTENT_FLAG_PINNED, &em->flags);

	if (em->start != 0) {
		rb = rb_prev(&em->rb_node);
		if (rb)
			merge = rb_entry(rb, struct extent_map, rb_node);
		if (rb && mergable_maps(merge, em)) {
			em->start = merge->start;
			em->len += merge->len;
			em->block_len += merge->block_len;
			em->block_start = merge->block_start;
			merge->in_tree = 0;
			rb_erase(&merge->rb_node, &tree->map);
			free_extent_map(merge);
		}
	}

	rb = rb_next(&em->rb_node);
	if (rb)
		merge = rb_entry(rb, struct extent_map, rb_node);
	if (rb && mergable_maps(em, merge)) {
		em->len += merge->len;
		em->block_len += merge->len;
		rb_erase(&merge->rb_node, &tree->map);
		merge->in_tree = 0;
		free_extent_map(merge);
	}

	free_extent_map(em);
out:
	write_unlock(&tree->lock);
	return ret;

}

236 237 238 239 240 241 242 243
/**
 * add_extent_mapping - add new extent map to the extent tree
 * @tree:	tree to insert new map in
 * @em:		map to insert
 *
 * Insert @em into @tree or perform a simple forward/backward merge with
 * existing mappings.  The extent_map struct passed in will be inserted
 * into the tree directly, with an additional reference taken, or a
L
Lucas De Marchi 已提交
244
 * reference dropped if the merge attempt was successful.
245 246 247 248 249
 */
int add_extent_mapping(struct extent_map_tree *tree,
		       struct extent_map *em)
{
	int ret = 0;
250
	struct extent_map *merge = NULL;
251
	struct rb_node *rb;
252
	struct extent_map *exist;
253

254 255 256 257 258 259
	exist = lookup_extent_mapping(tree, em->start, em->len);
	if (exist) {
		free_extent_map(exist);
		ret = -EEXIST;
		goto out;
	}
260
	rb = tree_insert(&tree->map, em->start, &em->rb_node);
261 262 263 264 265 266 267 268
	if (rb) {
		ret = -EEXIST;
		goto out;
	}
	atomic_inc(&em->refs);
	if (em->start != 0) {
		rb = rb_prev(&em->rb_node);
		if (rb)
269 270 271 272
			merge = rb_entry(rb, struct extent_map, rb_node);
		if (rb && mergable_maps(merge, em)) {
			em->start = merge->start;
			em->len += merge->len;
273
			em->block_len += merge->block_len;
274 275 276 277
			em->block_start = merge->block_start;
			merge->in_tree = 0;
			rb_erase(&merge->rb_node, &tree->map);
			free_extent_map(merge);
278 279
		}
	 }
280 281 282 283 284
	rb = rb_next(&em->rb_node);
	if (rb)
		merge = rb_entry(rb, struct extent_map, rb_node);
	if (rb && mergable_maps(em, merge)) {
		em->len += merge->len;
285
		em->block_len += merge->len;
286 287 288 289
		rb_erase(&merge->rb_node, &tree->map);
		merge->in_tree = 0;
		free_extent_map(merge);
	}
290 291 292 293
out:
	return ret;
}

294
/* simple helper to do math around the end of an extent, handling wrap */
295 296 297 298 299 300 301
static u64 range_end(u64 start, u64 len)
{
	if (start + len < start)
		return (u64)-1;
	return start + len;
}

302 303
struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree,
					   u64 start, u64 len, int strict)
304 305 306
{
	struct extent_map *em;
	struct rb_node *rb_node;
307 308 309 310
	struct rb_node *prev = NULL;
	struct rb_node *next = NULL;
	u64 end = range_end(start, len);

311
	rb_node = __tree_search(&tree->map, start, &prev, &next);
312
	if (!rb_node) {
313 314 315 316 317 318
		if (prev)
			rb_node = prev;
		else if (next)
			rb_node = next;
		else
			return NULL;
319
	}
320

321
	em = rb_entry(rb_node, struct extent_map, rb_node);
322

323 324
	if (strict && !(end > em->start && start < extent_map_end(em)))
		return NULL;
325

326 327 328 329
	atomic_inc(&em->refs);
	return em;
}

330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
/**
 * lookup_extent_mapping - lookup extent_map
 * @tree:	tree to lookup in
 * @start:	byte offset to start the search
 * @len:	length of the lookup range
 *
 * Find and return the first extent_map struct in @tree that intersects the
 * [start, len] range.  There may be additional objects in the tree that
 * intersect, so check the object returned carefully to make sure that no
 * additional lookups are needed.
 */
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
					 u64 start, u64 len)
{
	return __lookup_extent_mapping(tree, start, len, 1);
}

347 348 349 350 351 352 353 354 355 356 357 358 359 360
/**
 * search_extent_mapping - find a nearby extent map
 * @tree:	tree to lookup in
 * @start:	byte offset to start the search
 * @len:	length of the lookup range
 *
 * Find and return the first extent_map struct in @tree that intersects the
 * [start, len] range.
 *
 * If one can't be found, any nearby extent may be returned
 */
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
					 u64 start, u64 len)
{
361
	return __lookup_extent_mapping(tree, start, len, 0);
362 363
}

364 365 366 367 368 369 370
/**
 * remove_extent_mapping - removes an extent_map from the extent tree
 * @tree:	extent tree to remove from
 * @em:		extent map beeing removed
 *
 * Removes @em from @tree.  No reference counts are dropped, and no checks
 * are done to see if the range is in use
371 372 373
 */
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
{
374
	int ret = 0;
375

376
	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
377 378
	rb_erase(&em->rb_node, &tree->map);
	em->in_tree = 0;
379 380
	return ret;
}
新手
引导
客服 返回
顶部