extent_io.h 18.0 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4

#ifndef BTRFS_EXTENT_IO_H
#define BTRFS_EXTENT_IO_H
5 6

#include <linux/rbtree.h>
7
#include <linux/refcount.h>
8
#include "ulist.h"
9 10

/* bits for the extent state */
11
#define EXTENT_DIRTY		(1U << 0)
N
Nikolay Borisov 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24 25
#define EXTENT_UPTODATE		(1U << 1)
#define EXTENT_LOCKED		(1U << 2)
#define EXTENT_NEW		(1U << 3)
#define EXTENT_DELALLOC		(1U << 4)
#define EXTENT_DEFRAG		(1U << 5)
#define EXTENT_BOUNDARY		(1U << 6)
#define EXTENT_NODATASUM	(1U << 7)
#define EXTENT_CLEAR_META_RESV	(1U << 8)
#define EXTENT_NEED_WAIT	(1U << 9)
#define EXTENT_DAMAGED		(1U << 10)
#define EXTENT_NORESERVE	(1U << 11)
#define EXTENT_QGROUP_RESERVED	(1U << 12)
#define EXTENT_CLEAR_DATA_RESV	(1U << 13)
#define EXTENT_DELALLOC_NEW	(1U << 14)
26 27
#define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
				 EXTENT_CLEAR_DATA_RESV)
28
#define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING)
29

30 31 32 33 34 35
/*
 * Redefined bits above which are used only in the device allocation tree,
 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
 * manipulation functions
 */
36
#define CHUNK_ALLOCATED EXTENT_DIRTY
37
#define CHUNK_TRIMMED   EXTENT_DEFRAG
38

39 40 41 42
/*
 * flags for bio submission. The high bits indicate the compression
 * type for this bio
 */
C
Chris Mason 已提交
43
#define EXTENT_BIO_COMPRESSED 1
44
#define EXTENT_BIO_FLAG_SHIFT 16
C
Chris Mason 已提交
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
enum {
	EXTENT_BUFFER_UPTODATE,
	EXTENT_BUFFER_DIRTY,
	EXTENT_BUFFER_CORRUPT,
	/* this got triggered by readahead */
	EXTENT_BUFFER_READAHEAD,
	EXTENT_BUFFER_TREE_REF,
	EXTENT_BUFFER_STALE,
	EXTENT_BUFFER_WRITEBACK,
	/* read IO error */
	EXTENT_BUFFER_READ_ERR,
	EXTENT_BUFFER_UNMAPPED,
	EXTENT_BUFFER_IN_TREE,
	/* write IO error */
	EXTENT_BUFFER_WRITE_ERR,
};
62

63
/* these are flags for __process_pages_contig */
64 65 66 67 68
#define PAGE_UNLOCK		(1 << 0)
#define PAGE_CLEAR_DIRTY	(1 << 1)
#define PAGE_SET_WRITEBACK	(1 << 2)
#define PAGE_END_WRITEBACK	(1 << 3)
#define PAGE_SET_PRIVATE2	(1 << 4)
69
#define PAGE_SET_ERROR		(1 << 5)
70
#define PAGE_LOCK		(1 << 6)
71

72 73 74 75 76 77
/*
 * page->private values.  Every page that is controlled by the extent
 * map has page->private set to one.
 */
#define EXTENT_PAGE_PRIVATE 1

78 79 80 81 82 83 84 85 86 87 88 89 90 91
/*
 * The extent buffer bitmap operations are done with byte granularity instead of
 * word granularity for two reasons:
 * 1. The bitmaps must be little-endian on disk.
 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
 *    single word in a bitmap may straddle two pages in the extent buffer.
 */
#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
#define BITMAP_FIRST_BYTE_MASK(start) \
	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
#define BITMAP_LAST_BYTE_MASK(nbits) \
	(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))

92
struct extent_state;
93
struct btrfs_root;
94
struct btrfs_inode;
95
struct btrfs_io_bio;
96
struct io_failure_record;
97

98 99

typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
100
		struct bio *bio, u64 bio_offset);
101

102
struct extent_io_ops {
103
	/*
104
	 * The following callbacks must be always defined, the function
105 106
	 * pointer will be called unconditionally.
	 */
107
	blk_status_t (*submit_bio_hook)(struct inode *inode, struct bio *bio,
108
					int mirror_num, unsigned long bio_flags);
109 110 111
	int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
				    struct page *page, u64 start, u64 end,
				    int mirror);
112 113
};

114 115 116 117 118 119 120 121 122 123 124
enum {
	IO_TREE_FS_INFO_FREED_EXTENTS0,
	IO_TREE_FS_INFO_FREED_EXTENTS1,
	IO_TREE_INODE_IO,
	IO_TREE_INODE_IO_FAILURE,
	IO_TREE_RELOC_BLOCKS,
	IO_TREE_TRANS_DIRTY_PAGES,
	IO_TREE_ROOT_DIRTY_LOG_PAGES,
	IO_TREE_SELFTEST,
};

125 126
struct extent_io_tree {
	struct rb_root state;
127
	struct btrfs_fs_info *fs_info;
128
	void *private_data;
129
	u64 dirty_bytes;
130
	bool track_uptodate;
131 132 133 134

	/* Who owns this io tree, should be one of IO_TREE_* */
	u8 owner;

135
	spinlock_t lock;
136
	const struct extent_io_ops *ops;
137 138 139 140 141 142
};

struct extent_state {
	u64 start;
	u64 end; /* inclusive */
	struct rb_node rb_node;
J
Josef Bacik 已提交
143 144

	/* ADD NEW ELEMENTS AFTER THIS */
145
	wait_queue_head_t wq;
146
	refcount_t refs;
147
	unsigned state;
148

149
	struct io_failure_record *failrec;
150

151
#ifdef CONFIG_BTRFS_DEBUG
152
	struct list_head leak_list;
153
#endif
154 155
};

156
#define INLINE_EXTENT_BUFFER_PAGES 16
157
#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
158 159 160
struct extent_buffer {
	u64 start;
	unsigned long len;
161
	unsigned long bflags;
162
	struct btrfs_fs_info *fs_info;
163
	spinlock_t refs_lock;
164
	atomic_t refs;
165
	atomic_t io_pages;
166
	int read_mirror;
167
	struct rcu_head rcu_head;
168
	pid_t lock_owner;
169

170 171
	atomic_t blocking_writers;
	atomic_t blocking_readers;
172
	bool lock_nested;
173 174
	/* >= 0 if eb belongs to a log tree, -1 otherwise */
	short log_index;
175 176 177 178 179 180 181 182

	/* protects write locks */
	rwlock_t lock;

	/* readers use lock_wq while they wait for the write
	 * lock holders to unlock
	 */
	wait_queue_head_t write_lock_wq;
183

184 185
	/* writers use read_lock_wq while they wait for readers
	 * to unlock
186
	 */
187
	wait_queue_head_t read_lock_wq;
188
	struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
189
#ifdef CONFIG_BTRFS_DEBUG
190
	atomic_t spinning_writers;
191
	atomic_t spinning_readers;
192
	atomic_t read_locks;
193
	atomic_t write_locks;
194 195
	struct list_head leak_list;
#endif
196 197
};

198 199 200 201 202
/*
 * Structure to record how many bytes and which ranges are set/cleared
 */
struct extent_changeset {
	/* How many bytes are set/cleared in this operation */
203
	unsigned int bytes_changed;
204 205

	/* Changed ranges */
206
	struct ulist range_changed;
207 208
};

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
static inline void extent_changeset_init(struct extent_changeset *changeset)
{
	changeset->bytes_changed = 0;
	ulist_init(&changeset->range_changed);
}

static inline struct extent_changeset *extent_changeset_alloc(void)
{
	struct extent_changeset *ret;

	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
	if (!ret)
		return NULL;

	extent_changeset_init(ret);
	return ret;
}

static inline void extent_changeset_release(struct extent_changeset *changeset)
{
	if (!changeset)
		return;
	changeset->bytes_changed = 0;
	ulist_release(&changeset->range_changed);
}

static inline void extent_changeset_free(struct extent_changeset *changeset)
{
	if (!changeset)
		return;
	extent_changeset_release(changeset);
	kfree(changeset);
}

243 244 245 246 247 248 249 250 251 252 253
static inline void extent_set_compress_type(unsigned long *bio_flags,
					    int compress_type)
{
	*bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
}

static inline int extent_compress_type(unsigned long bio_flags)
{
	return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
}

254 255
struct extent_map_tree;

256
typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
257
					  struct page *page,
258
					  size_t pg_offset,
259 260 261
					  u64 start, u64 len,
					  int create);

262
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
263 264
			 struct extent_io_tree *tree, unsigned int owner,
			 void *private_data);
265
void extent_io_tree_release(struct extent_io_tree *tree);
266
int try_release_extent_mapping(struct page *page, gfp_t mask);
267
int try_release_extent_buffer(struct page *page);
268
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
269
		     struct extent_state **cached);
270 271 272 273 274 275

static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
	return lock_extent_bits(tree, start, end, NULL);
}

276
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
277
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
278
			  get_extent_t *get_extent, int mirror_num);
279
int __init extent_io_init(void);
280
void __cold extent_io_exit(void);
281 282 283

u64 count_range_bits(struct extent_io_tree *tree,
		     u64 *start, u64 search_end,
284
		     u64 max_bytes, unsigned bits, int contig);
285

286
void free_extent_state(struct extent_state *state);
287
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
288
		   unsigned bits, int filled,
289
		   struct extent_state *cached_state);
290
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
291
		unsigned bits, struct extent_changeset *changeset);
292
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
293
		     unsigned bits, int wake, int delete,
294
		     struct extent_state **cached);
295 296 297 298
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
		     unsigned bits, int wake, int delete,
		     struct extent_state **cached, gfp_t mask,
		     struct extent_changeset *changeset);
299

300 301
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
302
	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
303 304 305
}

static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
306
		u64 end, struct extent_state **cached)
307
{
308
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
309
				GFP_NOFS, NULL);
310 311
}

312 313
static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
		u64 start, u64 end, struct extent_state **cached)
314
{
315 316
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
				GFP_ATOMIC, NULL);
317 318 319
}

static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
320
		u64 end, unsigned bits)
321 322 323 324 325 326
{
	int wake = 0;

	if (bits & EXTENT_LOCKED)
		wake = 1;

327
	return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
328 329
}

330
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
331
			   unsigned bits, struct extent_changeset *changeset);
332
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
333
		   unsigned bits, u64 *failed_start,
334
		   struct extent_state **cached_state, gfp_t mask);
335 336
int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
			   unsigned bits);
337 338

static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
339
		u64 end, unsigned bits)
340
{
341
	return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
342 343
}

344
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
345
		u64 end, struct extent_state **cached_state)
346
{
347
	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
348
				cached_state, GFP_NOFS, NULL);
349
}
350 351 352 353 354 355 356 357

static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
		u64 end, gfp_t mask)
{
	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
			      NULL, mask);
}

358
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
359
				     u64 end, struct extent_state **cached)
360 361 362
{
	return clear_extent_bit(tree, start, end,
				EXTENT_DIRTY | EXTENT_DELALLOC |
363
				EXTENT_DO_ACCOUNTING, 0, 0, cached);
364 365
}

J
Josef Bacik 已提交
366
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
367
		       unsigned bits, unsigned clear_bits,
368
		       struct extent_state **cached_state);
369 370

static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
371 372
				      u64 end, unsigned int extra_bits,
				      struct extent_state **cached_state)
373 374
{
	return set_extent_bit(tree, start, end,
375
			      EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
376
			      NULL, cached_state, GFP_NOFS);
377 378 379
}

static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
380
		u64 end, struct extent_state **cached_state)
381 382 383
{
	return set_extent_bit(tree, start, end,
			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
384
			      NULL, cached_state, GFP_NOFS);
385 386 387
}

static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
388
		u64 end)
389
{
390 391
	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
			GFP_NOFS);
392 393 394 395 396 397 398 399 400
}

static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
		u64 end, struct extent_state **cached_state, gfp_t mask)
{
	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
			      cached_state, mask);
}

401
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
402
			  u64 *start_ret, u64 *end_ret, unsigned bits,
403
			  struct extent_state **cached_state);
404 405
void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
				 u64 *start_ret, u64 *end_ret, unsigned bits);
406 407
int extent_invalidatepage(struct extent_io_tree *tree,
			  struct page *page, unsigned long offset);
408
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
409
int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
410
			      int mode);
411
int extent_writepages(struct address_space *mapping,
412
		      struct writeback_control *wbc);
413 414
int btree_write_cache_pages(struct address_space *mapping,
			    struct writeback_control *wbc);
415 416
int extent_readpages(struct address_space *mapping, struct list_head *pages,
		     unsigned nr_pages);
Y
Yehuda Sadeh 已提交
417
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
418
		__u64 start, __u64 len);
419 420
void set_page_extent_mapped(struct page *page);

421
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
422
					  u64 start);
423 424
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
						  u64 start, unsigned long len);
425
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
426
						u64 start);
427
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
428
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
429
					 u64 start);
430
void free_extent_buffer(struct extent_buffer *eb);
431
void free_extent_buffer_stale(struct extent_buffer *eb);
432 433 434
#define WAIT_NONE	0
#define WAIT_COMPLETE	1
#define WAIT_PAGE_LOCK	2
435
int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
436
			     int mirror_num);
437
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
438

439
static inline int num_extent_pages(const struct extent_buffer *eb)
440
{
441 442
	return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
	       (eb->start >> PAGE_SHIFT);
443 444
}

445 446 447 448 449
static inline void extent_buffer_get(struct extent_buffer *eb)
{
	atomic_inc(&eb->refs);
}

450 451 452 453 454
static inline int extent_buffer_uptodate(struct extent_buffer *eb)
{
	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
}

455 456 457
int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
			 unsigned long start, unsigned long len);
void read_extent_buffer(const struct extent_buffer *eb, void *dst,
458 459
			unsigned long start,
			unsigned long len);
460 461
int read_extent_buffer_to_user(const struct extent_buffer *eb,
			       void __user *dst, unsigned long start,
462
			       unsigned long len);
463 464 465
void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
		const void *src);
466 467
void write_extent_buffer(struct extent_buffer *eb, const void *src,
			 unsigned long start, unsigned long len);
468 469
void copy_extent_buffer_full(struct extent_buffer *dst,
			     struct extent_buffer *src);
470 471 472 473 474 475 476
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
			unsigned long dst_offset, unsigned long src_offset,
			unsigned long len);
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len);
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len);
477 478
void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
			   unsigned long len);
479 480 481 482 483 484
int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
			   unsigned long pos);
void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
			      unsigned long pos, unsigned long len);
void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
				unsigned long pos, unsigned long len);
485
void clear_extent_buffer_dirty(struct extent_buffer *eb);
486
bool set_extent_buffer_dirty(struct extent_buffer *eb);
487
void set_extent_buffer_uptodate(struct extent_buffer *eb);
488
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
489
int extent_buffer_under_io(struct extent_buffer *eb);
490 491 492 493
int map_private_extent_buffer(const struct extent_buffer *eb,
			      unsigned long offset, unsigned long min_len,
			      char **map, unsigned long *map_start,
			      unsigned long *map_len);
494
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
495
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
496
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
497
				 u64 delalloc_end, struct page *locked_page,
498
				 unsigned bits_to_clear,
499
				 unsigned long page_ops);
500
struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
501
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
502
struct bio *btrfs_bio_clone(struct bio *bio);
503
struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
504

505
struct btrfs_fs_info;
506
struct btrfs_inode;
507

508 509 510
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
		      u64 length, u64 logical, struct page *page,
		      unsigned int pg_offset, int mirror_num);
511 512 513 514
int clean_io_failure(struct btrfs_fs_info *fs_info,
		     struct extent_io_tree *failure_tree,
		     struct extent_io_tree *io_tree, u64 start,
		     struct page *page, u64 ino, unsigned int pg_offset);
515
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
516
int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536

/*
 * When IO fails, either with EIO or csum verification fails, we
 * try other mirrors that might have a good copy of the data.  This
 * io_failure_record is used to record state as we go through all the
 * mirrors.  If another mirror has good data, the page is set up to date
 * and things continue.  If a good mirror can't be found, the original
 * bio end_io callback is called to indicate things have failed.
 */
struct io_failure_record {
	struct page *page;
	u64 start;
	u64 len;
	u64 logical;
	unsigned long bio_flags;
	int this_mirror;
	int failed_mirror;
	int in_validation;
};

537

538 539
void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
		u64 end);
540 541
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
				struct io_failure_record **failrec_ret);
542
bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
543
			    struct io_failure_record *failrec, int fail_mirror);
544 545 546
struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
				    struct io_failure_record *failrec,
				    struct page *page, int pg_offset, int icsum,
547
				    bio_end_io_t *endio_func, void *data);
548 549 550
int free_io_failure(struct extent_io_tree *failure_tree,
		    struct extent_io_tree *io_tree,
		    struct io_failure_record *rec);
551
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
552
bool find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree,
553 554
			     struct page *locked_page, u64 *start,
			     u64 *end);
555
#endif
556
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
557
					       u64 start);
558

559
#endif