extent_io.h 18.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4

#ifndef BTRFS_EXTENT_IO_H
#define BTRFS_EXTENT_IO_H
5 6

#include <linux/rbtree.h>
7
#include <linux/refcount.h>
8
#include "ulist.h"
9 10

/* bits for the extent state */
11
#define EXTENT_DIRTY		(1U << 0)
N
Nikolay Borisov 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24 25
#define EXTENT_UPTODATE		(1U << 1)
#define EXTENT_LOCKED		(1U << 2)
#define EXTENT_NEW		(1U << 3)
#define EXTENT_DELALLOC		(1U << 4)
#define EXTENT_DEFRAG		(1U << 5)
#define EXTENT_BOUNDARY		(1U << 6)
#define EXTENT_NODATASUM	(1U << 7)
#define EXTENT_CLEAR_META_RESV	(1U << 8)
#define EXTENT_NEED_WAIT	(1U << 9)
#define EXTENT_DAMAGED		(1U << 10)
#define EXTENT_NORESERVE	(1U << 11)
#define EXTENT_QGROUP_RESERVED	(1U << 12)
#define EXTENT_CLEAR_DATA_RESV	(1U << 13)
#define EXTENT_DELALLOC_NEW	(1U << 14)
26 27
#define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
				 EXTENT_CLEAR_DATA_RESV)
28
#define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING)
29

30 31 32 33 34 35
/*
 * Redefined bits above which are used only in the device allocation tree,
 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
 * manipulation functions
 */
36
#define CHUNK_ALLOCATED EXTENT_DIRTY
37
#define CHUNK_TRIMMED   EXTENT_DEFRAG
38

39 40 41 42
/*
 * flags for bio submission. The high bits indicate the compression
 * type for this bio
 */
C
Chris Mason 已提交
43
#define EXTENT_BIO_COMPRESSED 1
44
#define EXTENT_BIO_FLAG_SHIFT 16
C
Chris Mason 已提交
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
enum {
	EXTENT_BUFFER_UPTODATE,
	EXTENT_BUFFER_DIRTY,
	EXTENT_BUFFER_CORRUPT,
	/* this got triggered by readahead */
	EXTENT_BUFFER_READAHEAD,
	EXTENT_BUFFER_TREE_REF,
	EXTENT_BUFFER_STALE,
	EXTENT_BUFFER_WRITEBACK,
	/* read IO error */
	EXTENT_BUFFER_READ_ERR,
	EXTENT_BUFFER_UNMAPPED,
	EXTENT_BUFFER_IN_TREE,
	/* write IO error */
	EXTENT_BUFFER_WRITE_ERR,
};
62

63
/* these are flags for __process_pages_contig */
64 65 66 67 68
#define PAGE_UNLOCK		(1 << 0)
#define PAGE_CLEAR_DIRTY	(1 << 1)
#define PAGE_SET_WRITEBACK	(1 << 2)
#define PAGE_END_WRITEBACK	(1 << 3)
#define PAGE_SET_PRIVATE2	(1 << 4)
69
#define PAGE_SET_ERROR		(1 << 5)
70
#define PAGE_LOCK		(1 << 6)
71

72 73 74 75 76 77
/*
 * page->private values.  Every page that is controlled by the extent
 * map has page->private set to one.
 */
#define EXTENT_PAGE_PRIVATE 1

78 79 80 81 82 83 84 85 86 87 88 89 90 91
/*
 * The extent buffer bitmap operations are done with byte granularity instead of
 * word granularity for two reasons:
 * 1. The bitmaps must be little-endian on disk.
 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
 *    single word in a bitmap may straddle two pages in the extent buffer.
 */
#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
#define BITMAP_FIRST_BYTE_MASK(start) \
	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
#define BITMAP_LAST_BYTE_MASK(nbits) \
	(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))

92
struct extent_state;
93
struct btrfs_root;
94
struct btrfs_inode;
95
struct btrfs_io_bio;
96
struct io_failure_record;
97

98 99

typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
100
		struct bio *bio, u64 bio_offset);
101

102
struct extent_io_ops {
103
	/*
104
	 * The following callbacks must be always defined, the function
105 106
	 * pointer will be called unconditionally.
	 */
107
	blk_status_t (*submit_bio_hook)(struct inode *inode, struct bio *bio,
108 109
					int mirror_num, unsigned long bio_flags,
					u64 bio_offset);
110 111 112
	int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
				    struct page *page, u64 start, u64 end,
				    int mirror);
113 114
};

115 116 117 118 119 120 121 122 123 124 125
enum {
	IO_TREE_FS_INFO_FREED_EXTENTS0,
	IO_TREE_FS_INFO_FREED_EXTENTS1,
	IO_TREE_INODE_IO,
	IO_TREE_INODE_IO_FAILURE,
	IO_TREE_RELOC_BLOCKS,
	IO_TREE_TRANS_DIRTY_PAGES,
	IO_TREE_ROOT_DIRTY_LOG_PAGES,
	IO_TREE_SELFTEST,
};

126 127
struct extent_io_tree {
	struct rb_root state;
128
	struct btrfs_fs_info *fs_info;
129
	void *private_data;
130
	u64 dirty_bytes;
131
	bool track_uptodate;
132 133 134 135

	/* Who owns this io tree, should be one of IO_TREE_* */
	u8 owner;

136
	spinlock_t lock;
137
	const struct extent_io_ops *ops;
138 139 140 141 142 143
};

struct extent_state {
	u64 start;
	u64 end; /* inclusive */
	struct rb_node rb_node;
J
Josef Bacik 已提交
144 145

	/* ADD NEW ELEMENTS AFTER THIS */
146
	wait_queue_head_t wq;
147
	refcount_t refs;
148
	unsigned state;
149

150
	struct io_failure_record *failrec;
151

152
#ifdef CONFIG_BTRFS_DEBUG
153
	struct list_head leak_list;
154
#endif
155 156
};

157
#define INLINE_EXTENT_BUFFER_PAGES 16
158
#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
159 160 161
struct extent_buffer {
	u64 start;
	unsigned long len;
162
	unsigned long bflags;
163
	struct btrfs_fs_info *fs_info;
164
	spinlock_t refs_lock;
165
	atomic_t refs;
166
	atomic_t io_pages;
167
	int read_mirror;
168
	struct rcu_head rcu_head;
169
	pid_t lock_owner;
170

171 172
	atomic_t blocking_writers;
	atomic_t blocking_readers;
173
	bool lock_nested;
174 175
	/* >= 0 if eb belongs to a log tree, -1 otherwise */
	short log_index;
176 177 178 179 180 181 182 183

	/* protects write locks */
	rwlock_t lock;

	/* readers use lock_wq while they wait for the write
	 * lock holders to unlock
	 */
	wait_queue_head_t write_lock_wq;
184

185 186
	/* writers use read_lock_wq while they wait for readers
	 * to unlock
187
	 */
188
	wait_queue_head_t read_lock_wq;
189
	struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
190
#ifdef CONFIG_BTRFS_DEBUG
191
	atomic_t spinning_writers;
192
	atomic_t spinning_readers;
193
	atomic_t read_locks;
194
	atomic_t write_locks;
195 196
	struct list_head leak_list;
#endif
197 198
};

199 200 201 202 203
/*
 * Structure to record how many bytes and which ranges are set/cleared
 */
struct extent_changeset {
	/* How many bytes are set/cleared in this operation */
204
	unsigned int bytes_changed;
205 206

	/* Changed ranges */
207
	struct ulist range_changed;
208 209
};

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
static inline void extent_changeset_init(struct extent_changeset *changeset)
{
	changeset->bytes_changed = 0;
	ulist_init(&changeset->range_changed);
}

static inline struct extent_changeset *extent_changeset_alloc(void)
{
	struct extent_changeset *ret;

	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
	if (!ret)
		return NULL;

	extent_changeset_init(ret);
	return ret;
}

static inline void extent_changeset_release(struct extent_changeset *changeset)
{
	if (!changeset)
		return;
	changeset->bytes_changed = 0;
	ulist_release(&changeset->range_changed);
}

static inline void extent_changeset_free(struct extent_changeset *changeset)
{
	if (!changeset)
		return;
	extent_changeset_release(changeset);
	kfree(changeset);
}

244 245 246 247 248 249 250 251 252 253 254
static inline void extent_set_compress_type(unsigned long *bio_flags,
					    int compress_type)
{
	*bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
}

static inline int extent_compress_type(unsigned long bio_flags)
{
	return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
}

255 256
struct extent_map_tree;

257
typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
258
					  struct page *page,
259
					  size_t pg_offset,
260 261 262
					  u64 start, u64 len,
					  int create);

263
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
264 265
			 struct extent_io_tree *tree, unsigned int owner,
			 void *private_data);
266
void extent_io_tree_release(struct extent_io_tree *tree);
267
int try_release_extent_mapping(struct page *page, gfp_t mask);
268
int try_release_extent_buffer(struct page *page);
269
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
270
		     struct extent_state **cached);
271 272 273 274 275 276

static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
	return lock_extent_bits(tree, start, end, NULL);
}

277
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
278
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
279
			  get_extent_t *get_extent, int mirror_num);
280
int __init extent_io_init(void);
281
void __cold extent_io_exit(void);
282 283 284

u64 count_range_bits(struct extent_io_tree *tree,
		     u64 *start, u64 search_end,
285
		     u64 max_bytes, unsigned bits, int contig);
286

287
void free_extent_state(struct extent_state *state);
288
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
289
		   unsigned bits, int filled,
290
		   struct extent_state *cached_state);
291
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
292
		unsigned bits, struct extent_changeset *changeset);
293
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
294
		     unsigned bits, int wake, int delete,
295
		     struct extent_state **cached);
296 297 298 299
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
		     unsigned bits, int wake, int delete,
		     struct extent_state **cached, gfp_t mask,
		     struct extent_changeset *changeset);
300

301 302
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
303
	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
304 305 306
}

static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
307
		u64 end, struct extent_state **cached)
308
{
309
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
310
				GFP_NOFS, NULL);
311 312
}

313 314
static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
		u64 start, u64 end, struct extent_state **cached)
315
{
316 317
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
				GFP_ATOMIC, NULL);
318 319 320
}

static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
321
		u64 end, unsigned bits)
322 323 324 325 326 327
{
	int wake = 0;

	if (bits & EXTENT_LOCKED)
		wake = 1;

328
	return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
329 330
}

331
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
332
			   unsigned bits, struct extent_changeset *changeset);
333
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
334
		   unsigned bits, u64 *failed_start,
335
		   struct extent_state **cached_state, gfp_t mask);
336 337
int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
			   unsigned bits);
338 339

static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
340
		u64 end, unsigned bits)
341
{
342
	return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
343 344
}

345
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
346
		u64 end, struct extent_state **cached_state)
347
{
348
	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
349
				cached_state, GFP_NOFS, NULL);
350
}
351 352 353 354 355 356 357 358

static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
		u64 end, gfp_t mask)
{
	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
			      NULL, mask);
}

359
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
360
				     u64 end, struct extent_state **cached)
361 362 363
{
	return clear_extent_bit(tree, start, end,
				EXTENT_DIRTY | EXTENT_DELALLOC |
364
				EXTENT_DO_ACCOUNTING, 0, 0, cached);
365 366
}

J
Josef Bacik 已提交
367
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
368
		       unsigned bits, unsigned clear_bits,
369
		       struct extent_state **cached_state);
370 371

static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
372 373
				      u64 end, unsigned int extra_bits,
				      struct extent_state **cached_state)
374 375
{
	return set_extent_bit(tree, start, end,
376
			      EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
377
			      NULL, cached_state, GFP_NOFS);
378 379 380
}

static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
381
		u64 end, struct extent_state **cached_state)
382 383 384
{
	return set_extent_bit(tree, start, end,
			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
385
			      NULL, cached_state, GFP_NOFS);
386 387 388
}

static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
389
		u64 end)
390
{
391 392
	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
			GFP_NOFS);
393 394 395 396 397 398 399 400 401
}

static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
		u64 end, struct extent_state **cached_state, gfp_t mask)
{
	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
			      cached_state, mask);
}

402
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
403
			  u64 *start_ret, u64 *end_ret, unsigned bits,
404
			  struct extent_state **cached_state);
405 406
void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
				 u64 *start_ret, u64 *end_ret, unsigned bits);
407 408
int extent_invalidatepage(struct extent_io_tree *tree,
			  struct page *page, unsigned long offset);
409
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
410
int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
411
			      int mode);
412
int extent_writepages(struct address_space *mapping,
413
		      struct writeback_control *wbc);
414 415
int btree_write_cache_pages(struct address_space *mapping,
			    struct writeback_control *wbc);
416 417
int extent_readpages(struct address_space *mapping, struct list_head *pages,
		     unsigned nr_pages);
Y
Yehuda Sadeh 已提交
418
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
419
		__u64 start, __u64 len);
420 421
void set_page_extent_mapped(struct page *page);

422
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
423
					  u64 start);
424 425
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
						  u64 start, unsigned long len);
426
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
427
						u64 start);
428
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
429
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
430
					 u64 start);
431
void free_extent_buffer(struct extent_buffer *eb);
432
void free_extent_buffer_stale(struct extent_buffer *eb);
433 434 435
#define WAIT_NONE	0
#define WAIT_COMPLETE	1
#define WAIT_PAGE_LOCK	2
436
int read_extent_buffer_pages(struct extent_io_tree *tree,
437
			     struct extent_buffer *eb, int wait,
438
			     int mirror_num);
439
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
440

441
static inline int num_extent_pages(const struct extent_buffer *eb)
442
{
443 444
	return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
	       (eb->start >> PAGE_SHIFT);
445 446
}

447 448 449 450 451
static inline void extent_buffer_get(struct extent_buffer *eb)
{
	atomic_inc(&eb->refs);
}

452 453 454 455 456
static inline int extent_buffer_uptodate(struct extent_buffer *eb)
{
	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
}

457 458 459
int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
			 unsigned long start, unsigned long len);
void read_extent_buffer(const struct extent_buffer *eb, void *dst,
460 461
			unsigned long start,
			unsigned long len);
462 463
int read_extent_buffer_to_user(const struct extent_buffer *eb,
			       void __user *dst, unsigned long start,
464
			       unsigned long len);
465 466 467
void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
		const void *src);
468 469
void write_extent_buffer(struct extent_buffer *eb, const void *src,
			 unsigned long start, unsigned long len);
470 471
void copy_extent_buffer_full(struct extent_buffer *dst,
			     struct extent_buffer *src);
472 473 474 475 476 477 478
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
			unsigned long dst_offset, unsigned long src_offset,
			unsigned long len);
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len);
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len);
479 480
void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
			   unsigned long len);
481 482 483 484 485 486
int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
			   unsigned long pos);
void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
			      unsigned long pos, unsigned long len);
void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
				unsigned long pos, unsigned long len);
487
void clear_extent_buffer_dirty(struct extent_buffer *eb);
488
bool set_extent_buffer_dirty(struct extent_buffer *eb);
489
void set_extent_buffer_uptodate(struct extent_buffer *eb);
490
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
491
int extent_buffer_under_io(struct extent_buffer *eb);
492 493 494 495
int map_private_extent_buffer(const struct extent_buffer *eb,
			      unsigned long offset, unsigned long min_len,
			      char **map, unsigned long *map_start,
			      unsigned long *map_len);
496
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
497
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
498
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
499
				 u64 delalloc_end, struct page *locked_page,
500
				 unsigned bits_to_clear,
501
				 unsigned long page_ops);
502
struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
503
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
504
struct bio *btrfs_bio_clone(struct bio *bio);
505
struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
506

507
struct btrfs_fs_info;
508
struct btrfs_inode;
509

510 511 512
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
		      u64 length, u64 logical, struct page *page,
		      unsigned int pg_offset, int mirror_num);
513 514 515 516
int clean_io_failure(struct btrfs_fs_info *fs_info,
		     struct extent_io_tree *failure_tree,
		     struct extent_io_tree *io_tree, u64 start,
		     struct page *page, u64 ino, unsigned int pg_offset);
517
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
518
int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538

/*
 * When IO fails, either with EIO or csum verification fails, we
 * try other mirrors that might have a good copy of the data.  This
 * io_failure_record is used to record state as we go through all the
 * mirrors.  If another mirror has good data, the page is set up to date
 * and things continue.  If a good mirror can't be found, the original
 * bio end_io callback is called to indicate things have failed.
 */
struct io_failure_record {
	struct page *page;
	u64 start;
	u64 len;
	u64 logical;
	unsigned long bio_flags;
	int this_mirror;
	int failed_mirror;
	int in_validation;
};

539

540 541
void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
		u64 end);
542 543
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
				struct io_failure_record **failrec_ret);
544
bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
545
			    struct io_failure_record *failrec, int fail_mirror);
546 547 548
struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
				    struct io_failure_record *failrec,
				    struct page *page, int pg_offset, int icsum,
549
				    bio_end_io_t *endio_func, void *data);
550 551 552
int free_io_failure(struct extent_io_tree *failure_tree,
		    struct extent_io_tree *io_tree,
		    struct io_failure_record *rec);
553
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
554
bool find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree,
555 556
			     struct page *locked_page, u64 *start,
			     u64 *end);
557
#endif
558
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
559
					       u64 start);
560

561
#endif