backref.h 12.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5
/*
 * Copyright (C) 2011 STRATO.  All rights reserved.
 */

6 7
#ifndef BTRFS_BACKREF_H
#define BTRFS_BACKREF_H
8

9
#include <linux/btrfs.h>
10
#include "ulist.h"
11
#include "disk-io.h"
12
#include "extent_io.h"
13 14 15 16 17 18 19

struct inode_fs_paths {
	struct btrfs_path		*btrfs_path;
	struct btrfs_root		*fs_root;
	struct btrfs_data_container	*fspath;
};

20 21 22 23 24 25
struct btrfs_backref_shared_cache_entry {
	u64 bytenr;
	u64 gen;
	bool is_shared;
};

26 27
#define BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE 8

28
struct btrfs_backref_share_check_ctx {
29 30
	/* Ulists used during backref walking. */
	struct ulist refs;
31 32 33 34 35 36 37 38 39 40 41 42 43
	/*
	 * The current leaf the caller of btrfs_is_data_extent_shared() is at.
	 * Typically the caller (at the moment only fiemap) tries to determine
	 * the sharedness of data extents point by file extent items from entire
	 * leaves.
	 */
	u64 curr_leaf_bytenr;
	/*
	 * The previous leaf the caller was at in the previous call to
	 * btrfs_is_data_extent_shared(). This may be the same as the current
	 * leaf. On the first call it must be 0.
	 */
	u64 prev_leaf_bytenr;
44 45 46 47
	/*
	 * A path from a root to a leaf that has a file extent item pointing to
	 * a given data extent should never exceed the maximum b+tree height.
	 */
48 49
	struct btrfs_backref_shared_cache_entry path_cache_entries[BTRFS_MAX_LEVEL];
	bool use_path_cache;
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
	/*
	 * Cache the sharedness result for the last few extents we have found,
	 * but only for extents for which we have multiple file extent items
	 * that point to them.
	 * It's very common to have several file extent items that point to the
	 * same extent (bytenr) but with different offsets and lengths. This
	 * typically happens for COW writes, partial writes into prealloc
	 * extents, NOCOW writes after snapshoting a root, hole punching or
	 * reflinking within the same file (less common perhaps).
	 * So keep a small cache with the lookup results for the extent pointed
	 * by the last few file extent items. This cache is checked, with a
	 * linear scan, whenever btrfs_is_data_extent_shared() is called, so
	 * it must be small so that it does not negatively affect performance in
	 * case we don't have multiple file extent items that point to the same
	 * data extent.
	 */
	struct {
		u64 bytenr;
		bool is_shared;
	} prev_extents_cache[BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE];
	/*
	 * The slot in the prev_extents_cache array that will be used for
	 * storing the sharedness result of a new data extent.
	 */
	int prev_extents_cache_slot;
75 76
};

77 78 79
typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
		void *ctx);

80 81 82
struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void);
void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx);

83
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
84 85
			struct btrfs_path *path, struct btrfs_key *found_key,
			u64 *flags);
86 87

int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
88 89
			    struct btrfs_key *key, struct btrfs_extent_item *ei,
			    u32 item_size, u64 *out_root, u8 *out_level);
90 91 92

int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
				u64 extent_item_objectid,
93
				u64 extent_offset, int search_commit_root,
94 95
				iterate_extent_inodes_t *iterate, void *ctx,
				bool ignore_offset);
96 97

int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
98
				struct btrfs_path *path, void *ctx,
99
				bool ignore_offset);
100 101 102

int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);

103 104 105 106
int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
			 struct btrfs_fs_info *fs_info, u64 bytenr,
			 u64 time_seq, struct ulist **leafs,
			 const u64 *extent_item_pos, bool ignore_offset);
107
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
J
Josef Bacik 已提交
108
			 struct btrfs_fs_info *fs_info, u64 bytenr,
109
			 u64 time_seq, struct ulist **roots,
110
			 bool skip_commit_root_sem);
111 112 113 114
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
			u32 name_len, unsigned long name_off,
			struct extent_buffer *eb_in, u64 parent,
			char *dest, u32 size);
115

116 117 118 119 120
struct btrfs_data_container *init_data_container(u32 total_bytes);
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
					struct btrfs_path *path);
void free_ipath(struct inode_fs_paths *ipath);

M
Mark Fasheh 已提交
121 122 123 124
int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
			  u64 start_off, struct btrfs_path *path,
			  struct btrfs_inode_extref **ret_extref,
			  u64 *found_off);
125
int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
126
				u64 extent_gen,
127
				struct btrfs_backref_share_check_ctx *ctx);
M
Mark Fasheh 已提交
128

129
int __init btrfs_prelim_ref_init(void);
130
void __cold btrfs_prelim_ref_exit(void);
131 132 133 134 135 136 137 138 139 140 141 142

struct prelim_ref {
	struct rb_node rbnode;
	u64 root_id;
	struct btrfs_key key_for_search;
	int level;
	int count;
	struct extent_inode_elem *inode_list;
	u64 parent;
	u64 wanted_disk_byte;
};

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/*
 * Iterate backrefs of one extent.
 *
 * Now it only supports iteration of tree block in commit root.
 */
struct btrfs_backref_iter {
	u64 bytenr;
	struct btrfs_path *path;
	struct btrfs_fs_info *fs_info;
	struct btrfs_key cur_key;
	u32 item_ptr;
	u32 cur_ptr;
	u32 end_ptr;
};

struct btrfs_backref_iter *btrfs_backref_iter_alloc(
		struct btrfs_fs_info *fs_info, gfp_t gfp_flag);

static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter)
{
	if (!iter)
		return;
	btrfs_free_path(iter->path);
	kfree(iter);
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
static inline struct extent_buffer *btrfs_backref_get_eb(
		struct btrfs_backref_iter *iter)
{
	if (!iter)
		return NULL;
	return iter->path->nodes[0];
}

/*
 * For metadata with EXTENT_ITEM key (non-skinny) case, the first inline data
 * is btrfs_tree_block_info, without a btrfs_extent_inline_ref header.
 *
 * This helper determines if that's the case.
 */
static inline bool btrfs_backref_has_tree_block_info(
		struct btrfs_backref_iter *iter)
{
	if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY &&
	    iter->cur_ptr - iter->item_ptr == sizeof(struct btrfs_extent_item))
		return true;
	return false;
}

192 193
int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr);

194 195 196 197 198 199 200 201 202 203 204
int btrfs_backref_iter_next(struct btrfs_backref_iter *iter);

static inline bool btrfs_backref_iter_is_inline_ref(
		struct btrfs_backref_iter *iter)
{
	if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
	    iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
		return true;
	return false;
}

205 206 207 208 209 210 211 212 213 214
static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
{
	iter->bytenr = 0;
	iter->item_ptr = 0;
	iter->cur_ptr = 0;
	iter->end_ptr = 0;
	btrfs_release_path(iter->path);
	memset(&iter->cur_key, 0, sizeof(iter->cur_key));
}

215 216 217 218 219 220 221 222 223 224 225
/*
 * Backref cache related structures
 *
 * The whole objective of backref_cache is to build a bi-directional map
 * of tree blocks (represented by backref_node) and all their parents.
 */

/*
 * Represent a tree block in the backref cache
 */
struct btrfs_backref_node {
226 227 228 229
	struct {
		struct rb_node rb_node;
		u64 bytenr;
	}; /* Use rb_simple_node for search/insert */
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247

	u64 new_bytenr;
	/* Objectid of tree block owner, can be not uptodate */
	u64 owner;
	/* Link to pending, changed or detached list */
	struct list_head list;

	/* List of upper level edges, which link this node to its parents */
	struct list_head upper;
	/* List of lower level edges, which link this node to its children */
	struct list_head lower;

	/* NULL if this node is not tree root */
	struct btrfs_root *root;
	/* Extent buffer got by COWing the block */
	struct extent_buffer *eb;
	/* Level of the tree block */
	unsigned int level:8;
248
	/* Is the block in a non-shareable tree */
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
	unsigned int cowonly:1;
	/* 1 if no child node is in the cache */
	unsigned int lowest:1;
	/* Is the extent buffer locked */
	unsigned int locked:1;
	/* Has the block been processed */
	unsigned int processed:1;
	/* Have backrefs of this block been checked */
	unsigned int checked:1;
	/*
	 * 1 if corresponding block has been COWed but some upper level block
	 * pointers may not point to the new location
	 */
	unsigned int pending:1;
	/* 1 if the backref node isn't connected to any other backref node */
	unsigned int detached:1;

	/*
	 * For generic purpose backref cache, where we only care if it's a reloc
	 * root, doesn't care the source subvolid.
	 */
	unsigned int is_reloc_root:1;
};

#define LOWER	0
#define UPPER	1

/*
 * Represent an edge connecting upper and lower backref nodes.
 */
struct btrfs_backref_edge {
	/*
	 * list[LOWER] is linked to btrfs_backref_node::upper of lower level
	 * node, and list[UPPER] is linked to btrfs_backref_node::lower of
	 * upper level node.
	 *
	 * Also, build_backref_tree() uses list[UPPER] for pending edges, before
	 * linking list[UPPER] to its upper level nodes.
	 */
	struct list_head list[2];

	/* Two related nodes */
	struct btrfs_backref_node *node[2];
};

struct btrfs_backref_cache {
	/* Red black tree of all backref nodes in the cache */
	struct rb_root rb_root;
	/* For passing backref nodes to btrfs_reloc_cow_block */
	struct btrfs_backref_node *path[BTRFS_MAX_LEVEL];
	/*
	 * List of blocks that have been COWed but some block pointers in upper
	 * level blocks may not reflect the new location
	 */
	struct list_head pending[BTRFS_MAX_LEVEL];
	/* List of backref nodes with no child node */
	struct list_head leaves;
	/* List of blocks that have been COWed in current transaction */
	struct list_head changed;
	/* List of detached backref node. */
	struct list_head detached;

	u64 last_trans;

	int nr_nodes;
	int nr_edges;

	/* List of unchecked backref edges during backref cache build */
	struct list_head pending_edge;

	/* List of useless backref nodes during backref cache build */
	struct list_head useless_node;

	struct btrfs_fs_info *fs_info;

	/*
	 * Whether this cache is for relocation
	 *
	 * Reloction backref cache require more info for reloc root compared
	 * to generic backref cache.
	 */
	unsigned int is_reloc;
};

333 334
void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
			      struct btrfs_backref_cache *cache, int is_reloc);
335 336
struct btrfs_backref_node *btrfs_backref_alloc_node(
		struct btrfs_backref_cache *cache, u64 bytenr, int level);
337 338
struct btrfs_backref_edge *btrfs_backref_alloc_edge(
		struct btrfs_backref_cache *cache);
339

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
#define		LINK_LOWER	(1 << 0)
#define		LINK_UPPER	(1 << 1)
static inline void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
					   struct btrfs_backref_node *lower,
					   struct btrfs_backref_node *upper,
					   int link_which)
{
	ASSERT(upper && lower && upper->level == lower->level + 1);
	edge->node[LOWER] = lower;
	edge->node[UPPER] = upper;
	if (link_which & LINK_LOWER)
		list_add_tail(&edge->list[LOWER], &lower->upper);
	if (link_which & LINK_UPPER)
		list_add_tail(&edge->list[UPPER], &upper->lower);
}

356 357 358 359
static inline void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
					   struct btrfs_backref_node *node)
{
	if (node) {
360 361 362
		ASSERT(list_empty(&node->list));
		ASSERT(list_empty(&node->lower));
		ASSERT(node->eb == NULL);
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
		cache->nr_nodes--;
		btrfs_put_root(node->root);
		kfree(node);
	}
}

static inline void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
					   struct btrfs_backref_edge *edge)
{
	if (edge) {
		cache->nr_edges--;
		kfree(edge);
	}
}

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
static inline void btrfs_backref_unlock_node_buffer(
		struct btrfs_backref_node *node)
{
	if (node->locked) {
		btrfs_tree_unlock(node->eb);
		node->locked = 0;
	}
}

static inline void btrfs_backref_drop_node_buffer(
		struct btrfs_backref_node *node)
{
	if (node->eb) {
		btrfs_backref_unlock_node_buffer(node);
		free_extent_buffer(node->eb);
		node->eb = NULL;
	}
}

/*
 * Drop the backref node from cache without cleaning up its children
 * edges.
 *
 * This can only be called on node without parent edges.
 * The children edges are still kept as is.
 */
static inline void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
					   struct btrfs_backref_node *node)
{
407
	ASSERT(list_empty(&node->upper));
408 409

	btrfs_backref_drop_node_buffer(node);
410 411
	list_del_init(&node->list);
	list_del_init(&node->lower);
412 413 414 415 416
	if (!RB_EMPTY_NODE(&node->rb_node))
		rb_erase(&node->rb_node, &tree->rb_root);
	btrfs_backref_free_node(tree, node);
}

417 418 419
void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
				struct btrfs_backref_node *node);

420 421
void btrfs_backref_release_cache(struct btrfs_backref_cache *cache);

422 423 424 425 426 427 428 429
static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info,
				       u64 bytenr, int errno)
{
	btrfs_panic(fs_info, errno,
		    "Inconsistency in backref cache found at offset %llu",
		    bytenr);
}

430 431 432 433 434 435
int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
				struct btrfs_path *path,
				struct btrfs_backref_iter *iter,
				struct btrfs_key *node_key,
				struct btrfs_backref_node *cur);

436 437 438
int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
				     struct btrfs_backref_node *start);

439 440 441
void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
				 struct btrfs_backref_node *node);

442
#endif