extent_io.h 17.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4

#ifndef BTRFS_EXTENT_IO_H
#define BTRFS_EXTENT_IO_H
5 6

#include <linux/rbtree.h>
7
#include <linux/refcount.h>
8
#include "ulist.h"
9 10

/* bits for the extent state */
11 12 13 14 15 16 17 18 19
#define EXTENT_DIRTY		(1U << 0)
#define EXTENT_WRITEBACK	(1U << 1)
#define EXTENT_UPTODATE		(1U << 2)
#define EXTENT_LOCKED		(1U << 3)
#define EXTENT_NEW		(1U << 4)
#define EXTENT_DELALLOC		(1U << 5)
#define EXTENT_DEFRAG		(1U << 6)
#define EXTENT_BOUNDARY		(1U << 9)
#define EXTENT_NODATASUM	(1U << 10)
20
#define EXTENT_CLEAR_META_RESV	(1U << 11)
21 22 23 24 25 26
#define EXTENT_NEED_WAIT	(1U << 12)
#define EXTENT_DAMAGED		(1U << 13)
#define EXTENT_NORESERVE	(1U << 14)
#define EXTENT_QGROUP_RESERVED	(1U << 15)
#define EXTENT_CLEAR_DATA_RESV	(1U << 16)
#define EXTENT_DELALLOC_NEW	(1U << 17)
27
#define EXTENT_IOBITS		(EXTENT_LOCKED | EXTENT_WRITEBACK)
28 29
#define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
				 EXTENT_CLEAR_DATA_RESV)
30
#define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING)
31

32 33 34 35
/*
 * flags for bio submission. The high bits indicate the compression
 * type for this bio
 */
C
Chris Mason 已提交
36
#define EXTENT_BIO_COMPRESSED 1
37
#define EXTENT_BIO_FLAG_SHIFT 16
C
Chris Mason 已提交
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
enum {
	EXTENT_BUFFER_UPTODATE,
	EXTENT_BUFFER_DIRTY,
	EXTENT_BUFFER_CORRUPT,
	/* this got triggered by readahead */
	EXTENT_BUFFER_READAHEAD,
	EXTENT_BUFFER_TREE_REF,
	EXTENT_BUFFER_STALE,
	EXTENT_BUFFER_WRITEBACK,
	/* read IO error */
	EXTENT_BUFFER_READ_ERR,
	EXTENT_BUFFER_UNMAPPED,
	EXTENT_BUFFER_IN_TREE,
	/* write IO error */
	EXTENT_BUFFER_WRITE_ERR,
};
55

56
/* these are flags for __process_pages_contig */
57 58 59 60 61
#define PAGE_UNLOCK		(1 << 0)
#define PAGE_CLEAR_DIRTY	(1 << 1)
#define PAGE_SET_WRITEBACK	(1 << 2)
#define PAGE_END_WRITEBACK	(1 << 3)
#define PAGE_SET_PRIVATE2	(1 << 4)
62
#define PAGE_SET_ERROR		(1 << 5)
63
#define PAGE_LOCK		(1 << 6)
64

65 66 67 68 69 70
/*
 * page->private values.  Every page that is controlled by the extent
 * map has page->private set to one.
 */
#define EXTENT_PAGE_PRIVATE 1

71 72 73 74 75 76 77 78 79 80 81 82 83 84
/*
 * The extent buffer bitmap operations are done with byte granularity instead of
 * word granularity for two reasons:
 * 1. The bitmaps must be little-endian on disk.
 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
 *    single word in a bitmap may straddle two pages in the extent buffer.
 */
#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
#define BITMAP_FIRST_BYTE_MASK(start) \
	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
#define BITMAP_LAST_BYTE_MASK(nbits) \
	(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))

85
struct extent_state;
86
struct btrfs_root;
87
struct btrfs_inode;
88
struct btrfs_io_bio;
89
struct io_failure_record;
90

91
typedef	blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *bio,
92 93
				       int mirror_num, unsigned long bio_flags,
				       u64 bio_offset);
94 95

typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
96
		struct bio *bio, u64 bio_offset);
97

98
struct extent_io_ops {
99
	/*
100
	 * The following callbacks must be always defined, the function
101 102
	 * pointer will be called unconditionally.
	 */
103
	extent_submit_bio_hook_t *submit_bio_hook;
104 105 106
	int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
				    struct page *page, u64 start, u64 end,
				    int mirror);
107 108
};

109 110 111 112 113 114 115 116 117 118 119
enum {
	IO_TREE_FS_INFO_FREED_EXTENTS0,
	IO_TREE_FS_INFO_FREED_EXTENTS1,
	IO_TREE_INODE_IO,
	IO_TREE_INODE_IO_FAILURE,
	IO_TREE_RELOC_BLOCKS,
	IO_TREE_TRANS_DIRTY_PAGES,
	IO_TREE_ROOT_DIRTY_LOG_PAGES,
	IO_TREE_SELFTEST,
};

120 121
struct extent_io_tree {
	struct rb_root state;
122
	struct btrfs_fs_info *fs_info;
123
	void *private_data;
124
	u64 dirty_bytes;
125
	bool track_uptodate;
126 127 128 129

	/* Who owns this io tree, should be one of IO_TREE_* */
	u8 owner;

130
	spinlock_t lock;
131
	const struct extent_io_ops *ops;
132 133 134 135 136 137
};

struct extent_state {
	u64 start;
	u64 end; /* inclusive */
	struct rb_node rb_node;
J
Josef Bacik 已提交
138 139

	/* ADD NEW ELEMENTS AFTER THIS */
140
	wait_queue_head_t wq;
141
	refcount_t refs;
142
	unsigned state;
143

144
	struct io_failure_record *failrec;
145

146
#ifdef CONFIG_BTRFS_DEBUG
147
	struct list_head leak_list;
148
#endif
149 150
};

151
#define INLINE_EXTENT_BUFFER_PAGES 16
152
#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
153 154 155
struct extent_buffer {
	u64 start;
	unsigned long len;
156
	unsigned long bflags;
157
	struct btrfs_fs_info *fs_info;
158
	spinlock_t refs_lock;
159
	atomic_t refs;
160
	atomic_t io_pages;
161
	int read_mirror;
162
	struct rcu_head rcu_head;
163
	pid_t lock_owner;
164

165 166 167 168 169 170 171
	/* count of read lock holders on the extent buffer */
	atomic_t write_locks;
	atomic_t read_locks;
	atomic_t blocking_writers;
	atomic_t blocking_readers;
	atomic_t spinning_readers;
	atomic_t spinning_writers;
172 173 174
	short lock_nested;
	/* >= 0 if eb belongs to a log tree, -1 otherwise */
	short log_index;
175 176 177 178 179 180 181 182

	/* protects write locks */
	rwlock_t lock;

	/* readers use lock_wq while they wait for the write
	 * lock holders to unlock
	 */
	wait_queue_head_t write_lock_wq;
183

184 185
	/* writers use read_lock_wq while they wait for readers
	 * to unlock
186
	 */
187
	wait_queue_head_t read_lock_wq;
188
	struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
189 190 191
#ifdef CONFIG_BTRFS_DEBUG
	struct list_head leak_list;
#endif
192 193
};

194 195 196 197 198
/*
 * Structure to record how many bytes and which ranges are set/cleared
 */
struct extent_changeset {
	/* How many bytes are set/cleared in this operation */
199
	unsigned int bytes_changed;
200 201

	/* Changed ranges */
202
	struct ulist range_changed;
203 204
};

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
static inline void extent_changeset_init(struct extent_changeset *changeset)
{
	changeset->bytes_changed = 0;
	ulist_init(&changeset->range_changed);
}

static inline struct extent_changeset *extent_changeset_alloc(void)
{
	struct extent_changeset *ret;

	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
	if (!ret)
		return NULL;

	extent_changeset_init(ret);
	return ret;
}

static inline void extent_changeset_release(struct extent_changeset *changeset)
{
	if (!changeset)
		return;
	changeset->bytes_changed = 0;
	ulist_release(&changeset->range_changed);
}

static inline void extent_changeset_free(struct extent_changeset *changeset)
{
	if (!changeset)
		return;
	extent_changeset_release(changeset);
	kfree(changeset);
}

239 240 241 242 243 244 245 246 247 248 249
static inline void extent_set_compress_type(unsigned long *bio_flags,
					    int compress_type)
{
	*bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
}

static inline int extent_compress_type(unsigned long bio_flags)
{
	return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
}

250 251
struct extent_map_tree;

252
typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
253
					  struct page *page,
254
					  size_t pg_offset,
255 256 257
					  u64 start, u64 len,
					  int create);

258
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
259 260
			 struct extent_io_tree *tree, unsigned int owner,
			 void *private_data);
261
int try_release_extent_mapping(struct page *page, gfp_t mask);
262
int try_release_extent_buffer(struct page *page);
263
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
264
		     struct extent_state **cached);
265 266 267 268 269 270

static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
	return lock_extent_bits(tree, start, end, NULL);
}

271
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
272
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
273
			  get_extent_t *get_extent, int mirror_num);
274
int __init extent_io_init(void);
275
void __cold extent_io_exit(void);
276 277 278

u64 count_range_bits(struct extent_io_tree *tree,
		     u64 *start, u64 search_end,
279
		     u64 max_bytes, unsigned bits, int contig);
280

281
void free_extent_state(struct extent_state *state);
282
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
283
		   unsigned bits, int filled,
284
		   struct extent_state *cached_state);
285
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
286
		unsigned bits, struct extent_changeset *changeset);
287
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
288
		     unsigned bits, int wake, int delete,
289
		     struct extent_state **cached);
290 291 292 293
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
		     unsigned bits, int wake, int delete,
		     struct extent_state **cached, gfp_t mask,
		     struct extent_changeset *changeset);
294

295 296
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
297
	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
298 299 300
}

static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
301
		u64 end, struct extent_state **cached)
302
{
303
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
304
				GFP_NOFS, NULL);
305 306
}

307 308
static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
		u64 start, u64 end, struct extent_state **cached)
309
{
310 311
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
				GFP_ATOMIC, NULL);
312 313 314
}

static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
315
		u64 end, unsigned bits)
316 317 318 319 320 321
{
	int wake = 0;

	if (bits & EXTENT_LOCKED)
		wake = 1;

322
	return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
323 324
}

325
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
326
			   unsigned bits, struct extent_changeset *changeset);
327
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
328
		   unsigned bits, u64 *failed_start,
329
		   struct extent_state **cached_state, gfp_t mask);
330 331

static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
332
		u64 end, unsigned bits)
333
{
334
	return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
335 336
}

337
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
338
		u64 end, struct extent_state **cached_state)
339
{
340
	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
341
				cached_state, GFP_NOFS, NULL);
342
}
343 344 345 346 347 348 349 350

static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
		u64 end, gfp_t mask)
{
	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
			      NULL, mask);
}

351
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
352
				     u64 end, struct extent_state **cached)
353 354 355
{
	return clear_extent_bit(tree, start, end,
				EXTENT_DIRTY | EXTENT_DELALLOC |
356
				EXTENT_DO_ACCOUNTING, 0, 0, cached);
357 358
}

J
Josef Bacik 已提交
359
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
360
		       unsigned bits, unsigned clear_bits,
361
		       struct extent_state **cached_state);
362 363

static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
364 365
				      u64 end, unsigned int extra_bits,
				      struct extent_state **cached_state)
366 367
{
	return set_extent_bit(tree, start, end,
368
			      EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
369
			      NULL, cached_state, GFP_NOFS);
370 371 372
}

static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
373
		u64 end, struct extent_state **cached_state)
374 375 376
{
	return set_extent_bit(tree, start, end,
			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
377
			      NULL, cached_state, GFP_NOFS);
378 379 380
}

static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
381
		u64 end)
382
{
383 384
	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
			GFP_NOFS);
385 386 387 388 389 390 391 392 393
}

static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
		u64 end, struct extent_state **cached_state, gfp_t mask)
{
	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
			      cached_state, mask);
}

394
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
395
			  u64 *start_ret, u64 *end_ret, unsigned bits,
396
			  struct extent_state **cached_state);
397 398
int extent_invalidatepage(struct extent_io_tree *tree,
			  struct page *page, unsigned long offset);
399
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
400
int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
401
			      int mode);
402
int extent_writepages(struct address_space *mapping,
403
		      struct writeback_control *wbc);
404 405
int btree_write_cache_pages(struct address_space *mapping,
			    struct writeback_control *wbc);
406 407
int extent_readpages(struct address_space *mapping, struct list_head *pages,
		     unsigned nr_pages);
Y
Yehuda Sadeh 已提交
408
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
409
		__u64 start, __u64 len);
410 411
void set_page_extent_mapped(struct page *page);

412
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
413
					  u64 start);
414 415
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
						  u64 start, unsigned long len);
416
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
417
						u64 start);
418
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
419
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
420
					 u64 start);
421
void free_extent_buffer(struct extent_buffer *eb);
422
void free_extent_buffer_stale(struct extent_buffer *eb);
423 424 425
#define WAIT_NONE	0
#define WAIT_COMPLETE	1
#define WAIT_PAGE_LOCK	2
426
int read_extent_buffer_pages(struct extent_io_tree *tree,
427
			     struct extent_buffer *eb, int wait,
428
			     int mirror_num);
429
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
430

431
static inline int num_extent_pages(const struct extent_buffer *eb)
432
{
433 434
	return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
	       (eb->start >> PAGE_SHIFT);
435 436
}

437 438 439 440 441
static inline void extent_buffer_get(struct extent_buffer *eb)
{
	atomic_inc(&eb->refs);
}

442 443 444 445 446
static inline int extent_buffer_uptodate(struct extent_buffer *eb)
{
	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
}

447 448 449
int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
			 unsigned long start, unsigned long len);
void read_extent_buffer(const struct extent_buffer *eb, void *dst,
450 451
			unsigned long start,
			unsigned long len);
452 453
int read_extent_buffer_to_user(const struct extent_buffer *eb,
			       void __user *dst, unsigned long start,
454
			       unsigned long len);
455 456 457
void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
		const void *src);
458 459
void write_extent_buffer(struct extent_buffer *eb, const void *src,
			 unsigned long start, unsigned long len);
460 461
void copy_extent_buffer_full(struct extent_buffer *dst,
			     struct extent_buffer *src);
462 463 464 465 466 467 468
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
			unsigned long dst_offset, unsigned long src_offset,
			unsigned long len);
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len);
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len);
469 470
void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
			   unsigned long len);
471 472 473 474 475 476
int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
			   unsigned long pos);
void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
			      unsigned long pos, unsigned long len);
void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
				unsigned long pos, unsigned long len);
477
void clear_extent_buffer_dirty(struct extent_buffer *eb);
478
bool set_extent_buffer_dirty(struct extent_buffer *eb);
479
void set_extent_buffer_uptodate(struct extent_buffer *eb);
480
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
481
int extent_buffer_under_io(struct extent_buffer *eb);
482 483 484 485
int map_private_extent_buffer(const struct extent_buffer *eb,
			      unsigned long offset, unsigned long min_len,
			      char **map, unsigned long *map_start,
			      unsigned long *map_len);
486
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
487
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
488
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
489
				 u64 delalloc_end, struct page *locked_page,
490
				 unsigned bits_to_clear,
491
				 unsigned long page_ops);
492
struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
493
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
494
struct bio *btrfs_bio_clone(struct bio *bio);
495
struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
496

497
struct btrfs_fs_info;
498
struct btrfs_inode;
499

500 501 502
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
		      u64 length, u64 logical, struct page *page,
		      unsigned int pg_offset, int mirror_num);
503 504 505 506
int clean_io_failure(struct btrfs_fs_info *fs_info,
		     struct extent_io_tree *failure_tree,
		     struct extent_io_tree *io_tree, u64 start,
		     struct page *page, u64 ino, unsigned int pg_offset);
507
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
508 509
int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
			 struct extent_buffer *eb, int mirror_num);
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529

/*
 * When IO fails, either with EIO or csum verification fails, we
 * try other mirrors that might have a good copy of the data.  This
 * io_failure_record is used to record state as we go through all the
 * mirrors.  If another mirror has good data, the page is set up to date
 * and things continue.  If a good mirror can't be found, the original
 * bio end_io callback is called to indicate things have failed.
 */
struct io_failure_record {
	struct page *page;
	u64 start;
	u64 len;
	u64 logical;
	unsigned long bio_flags;
	int this_mirror;
	int failed_mirror;
	int in_validation;
};

530

531 532
void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
		u64 end);
533 534
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
				struct io_failure_record **failrec_ret);
535
bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
536
			    struct io_failure_record *failrec, int fail_mirror);
537 538 539
struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
				    struct io_failure_record *failrec,
				    struct page *page, int pg_offset, int icsum,
540
				    bio_end_io_t *endio_func, void *data);
541 542 543
int free_io_failure(struct extent_io_tree *failure_tree,
		    struct extent_io_tree *io_tree,
		    struct io_failure_record *rec);
544
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
545
bool find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree,
546 547
			     struct page *locked_page, u64 *start,
			     u64 *end);
548
#endif
549
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
550
					       u64 start);
551

552
#endif