extent_io.h 17.4 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4

#ifndef BTRFS_EXTENT_IO_H
#define BTRFS_EXTENT_IO_H
5 6

#include <linux/rbtree.h>
7
#include <linux/refcount.h>
8
#include "ulist.h"
9 10

/* bits for the extent state */
11 12 13 14 15 16 17 18 19
#define EXTENT_DIRTY		(1U << 0)
#define EXTENT_WRITEBACK	(1U << 1)
#define EXTENT_UPTODATE		(1U << 2)
#define EXTENT_LOCKED		(1U << 3)
#define EXTENT_NEW		(1U << 4)
#define EXTENT_DELALLOC		(1U << 5)
#define EXTENT_DEFRAG		(1U << 6)
#define EXTENT_BOUNDARY		(1U << 9)
#define EXTENT_NODATASUM	(1U << 10)
20
#define EXTENT_CLEAR_META_RESV	(1U << 11)
21 22 23 24
#define EXTENT_FIRST_DELALLOC	(1U << 12)
#define EXTENT_NEED_WAIT	(1U << 13)
#define EXTENT_DAMAGED		(1U << 14)
#define EXTENT_NORESERVE	(1U << 15)
25
#define EXTENT_QGROUP_RESERVED	(1U << 16)
26
#define EXTENT_CLEAR_DATA_RESV	(1U << 17)
27
#define EXTENT_DELALLOC_NEW	(1U << 18)
28
#define EXTENT_IOBITS		(EXTENT_LOCKED | EXTENT_WRITEBACK)
29 30
#define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
				 EXTENT_CLEAR_DATA_RESV)
31
#define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
32

33 34 35 36
/*
 * flags for bio submission. The high bits indicate the compression
 * type for this bio
 */
C
Chris Mason 已提交
37
#define EXTENT_BIO_COMPRESSED 1
38
#define EXTENT_BIO_FLAG_SHIFT 16
C
Chris Mason 已提交
39

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
enum {
	EXTENT_BUFFER_UPTODATE,
	EXTENT_BUFFER_DIRTY,
	EXTENT_BUFFER_CORRUPT,
	/* this got triggered by readahead */
	EXTENT_BUFFER_READAHEAD,
	EXTENT_BUFFER_TREE_REF,
	EXTENT_BUFFER_STALE,
	EXTENT_BUFFER_WRITEBACK,
	/* read IO error */
	EXTENT_BUFFER_READ_ERR,
	EXTENT_BUFFER_UNMAPPED,
	EXTENT_BUFFER_IN_TREE,
	/* write IO error */
	EXTENT_BUFFER_WRITE_ERR,
};
56

57
/* these are flags for __process_pages_contig */
58 59 60 61 62
#define PAGE_UNLOCK		(1 << 0)
#define PAGE_CLEAR_DIRTY	(1 << 1)
#define PAGE_SET_WRITEBACK	(1 << 2)
#define PAGE_END_WRITEBACK	(1 << 3)
#define PAGE_SET_PRIVATE2	(1 << 4)
63
#define PAGE_SET_ERROR		(1 << 5)
64
#define PAGE_LOCK		(1 << 6)
65

66 67 68 69 70 71
/*
 * page->private values.  Every page that is controlled by the extent
 * map has page->private set to one.
 */
#define EXTENT_PAGE_PRIVATE 1

72 73 74 75 76 77 78 79 80 81 82 83 84 85
/*
 * The extent buffer bitmap operations are done with byte granularity instead of
 * word granularity for two reasons:
 * 1. The bitmaps must be little-endian on disk.
 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
 *    single word in a bitmap may straddle two pages in the extent buffer.
 */
#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
#define BITMAP_FIRST_BYTE_MASK(start) \
	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
#define BITMAP_LAST_BYTE_MASK(nbits) \
	(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))

86
struct extent_state;
87
struct btrfs_root;
88
struct btrfs_inode;
89
struct btrfs_io_bio;
90
struct io_failure_record;
91

92
typedef	blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *bio,
93 94
				       int mirror_num, unsigned long bio_flags,
				       u64 bio_offset);
95 96

typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
97
		struct bio *bio, u64 bio_offset);
98

99
struct extent_io_ops {
100 101 102 103
	/*
	 * The following callbacks must be allways defined, the function
	 * pointer will be called unconditionally.
	 */
104
	extent_submit_bio_hook_t *submit_bio_hook;
105 106 107
	int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
				    struct page *page, u64 start, u64 end,
				    int mirror);
108 109 110 111
};

struct extent_io_tree {
	struct rb_root state;
112
	void *private_data;
113
	u64 dirty_bytes;
114
	int track_uptodate;
115
	spinlock_t lock;
116
	const struct extent_io_ops *ops;
117 118 119 120 121 122
};

struct extent_state {
	u64 start;
	u64 end; /* inclusive */
	struct rb_node rb_node;
J
Josef Bacik 已提交
123 124

	/* ADD NEW ELEMENTS AFTER THIS */
125
	wait_queue_head_t wq;
126
	refcount_t refs;
127
	unsigned state;
128

129
	struct io_failure_record *failrec;
130

131
#ifdef CONFIG_BTRFS_DEBUG
132
	struct list_head leak_list;
133
#endif
134 135
};

136
#define INLINE_EXTENT_BUFFER_PAGES 16
137
#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
138 139 140
struct extent_buffer {
	u64 start;
	unsigned long len;
141
	unsigned long bflags;
142
	struct btrfs_fs_info *fs_info;
143
	spinlock_t refs_lock;
144
	atomic_t refs;
145
	atomic_t io_pages;
146
	int read_mirror;
147
	struct rcu_head rcu_head;
148
	pid_t lock_owner;
149

150 151 152 153 154 155 156
	/* count of read lock holders on the extent buffer */
	atomic_t write_locks;
	atomic_t read_locks;
	atomic_t blocking_writers;
	atomic_t blocking_readers;
	atomic_t spinning_readers;
	atomic_t spinning_writers;
157 158 159
	short lock_nested;
	/* >= 0 if eb belongs to a log tree, -1 otherwise */
	short log_index;
160 161 162 163 164 165 166 167

	/* protects write locks */
	rwlock_t lock;

	/* readers use lock_wq while they wait for the write
	 * lock holders to unlock
	 */
	wait_queue_head_t write_lock_wq;
168

169 170
	/* writers use read_lock_wq while they wait for readers
	 * to unlock
171
	 */
172
	wait_queue_head_t read_lock_wq;
173
	struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
174 175 176
#ifdef CONFIG_BTRFS_DEBUG
	struct list_head leak_list;
#endif
177 178
};

179 180 181 182 183
/*
 * Structure to record how many bytes and which ranges are set/cleared
 */
struct extent_changeset {
	/* How many bytes are set/cleared in this operation */
184
	unsigned int bytes_changed;
185 186

	/* Changed ranges */
187
	struct ulist range_changed;
188 189
};

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
static inline void extent_changeset_init(struct extent_changeset *changeset)
{
	changeset->bytes_changed = 0;
	ulist_init(&changeset->range_changed);
}

static inline struct extent_changeset *extent_changeset_alloc(void)
{
	struct extent_changeset *ret;

	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
	if (!ret)
		return NULL;

	extent_changeset_init(ret);
	return ret;
}

static inline void extent_changeset_release(struct extent_changeset *changeset)
{
	if (!changeset)
		return;
	changeset->bytes_changed = 0;
	ulist_release(&changeset->range_changed);
}

static inline void extent_changeset_free(struct extent_changeset *changeset)
{
	if (!changeset)
		return;
	extent_changeset_release(changeset);
	kfree(changeset);
}

224 225 226 227 228 229 230 231 232 233 234
static inline void extent_set_compress_type(unsigned long *bio_flags,
					    int compress_type)
{
	*bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
}

static inline int extent_compress_type(unsigned long bio_flags)
{
	return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
}

235 236
struct extent_map_tree;

237
typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
238
					  struct page *page,
239
					  size_t pg_offset,
240 241 242
					  u64 start, u64 len,
					  int create);

243
void extent_io_tree_init(struct extent_io_tree *tree, void *private_data);
244
int try_release_extent_mapping(struct page *page, gfp_t mask);
245
int try_release_extent_buffer(struct page *page);
246
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
247
		     struct extent_state **cached);
248 249 250 251 252 253

static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
	return lock_extent_bits(tree, start, end, NULL);
}

254
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
255
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
256
			  get_extent_t *get_extent, int mirror_num);
257
int __init extent_io_init(void);
258
void __cold extent_io_exit(void);
259 260 261

u64 count_range_bits(struct extent_io_tree *tree,
		     u64 *start, u64 search_end,
262
		     u64 max_bytes, unsigned bits, int contig);
263

264
void free_extent_state(struct extent_state *state);
265
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
266
		   unsigned bits, int filled,
267
		   struct extent_state *cached_state);
268
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
269
		unsigned bits, struct extent_changeset *changeset);
270
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
271
		     unsigned bits, int wake, int delete,
272
		     struct extent_state **cached);
273 274 275 276
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
		     unsigned bits, int wake, int delete,
		     struct extent_state **cached, gfp_t mask,
		     struct extent_changeset *changeset);
277

278 279
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
280
	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
281 282 283
}

static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
284
		u64 end, struct extent_state **cached)
285
{
286
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
287
				GFP_NOFS, NULL);
288 289
}

290 291
static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
		u64 start, u64 end, struct extent_state **cached)
292
{
293 294
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
				GFP_ATOMIC, NULL);
295 296 297
}

static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
298
		u64 end, unsigned bits)
299 300 301 302 303 304
{
	int wake = 0;

	if (bits & EXTENT_LOCKED)
		wake = 1;

305
	return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
306 307
}

308
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
309
			   unsigned bits, struct extent_changeset *changeset);
310
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
311
		   unsigned bits, u64 *failed_start,
312
		   struct extent_state **cached_state, gfp_t mask);
313 314

static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
315
		u64 end, unsigned bits)
316
{
317
	return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
318 319
}

320
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
321
		u64 end, struct extent_state **cached_state)
322
{
323
	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
324
				cached_state, GFP_NOFS, NULL);
325
}
326 327 328 329 330 331 332 333

static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
		u64 end, gfp_t mask)
{
	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
			      NULL, mask);
}

334
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
335
				     u64 end, struct extent_state **cached)
336 337 338
{
	return clear_extent_bit(tree, start, end,
				EXTENT_DIRTY | EXTENT_DELALLOC |
339
				EXTENT_DO_ACCOUNTING, 0, 0, cached);
340 341
}

J
Josef Bacik 已提交
342
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
343
		       unsigned bits, unsigned clear_bits,
344
		       struct extent_state **cached_state);
345 346

static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
347 348
				      u64 end, unsigned int extra_bits,
				      struct extent_state **cached_state)
349 350
{
	return set_extent_bit(tree, start, end,
351
			      EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
352
			      NULL, cached_state, GFP_NOFS);
353 354 355
}

static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
356
		u64 end, struct extent_state **cached_state)
357 358 359
{
	return set_extent_bit(tree, start, end,
			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
360
			      NULL, cached_state, GFP_NOFS);
361 362 363
}

static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
364
		u64 end)
365
{
366 367
	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
			GFP_NOFS);
368 369 370 371 372 373 374 375 376
}

static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
		u64 end, struct extent_state **cached_state, gfp_t mask)
{
	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
			      cached_state, mask);
}

377
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
378
			  u64 *start_ret, u64 *end_ret, unsigned bits,
379
			  struct extent_state **cached_state);
380 381
int extent_invalidatepage(struct extent_io_tree *tree,
			  struct page *page, unsigned long offset);
382
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
383
int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
384
			      int mode);
385
int extent_writepages(struct address_space *mapping,
386
		      struct writeback_control *wbc);
387 388
int btree_write_cache_pages(struct address_space *mapping,
			    struct writeback_control *wbc);
389 390
int extent_readpages(struct address_space *mapping, struct list_head *pages,
		     unsigned nr_pages);
Y
Yehuda Sadeh 已提交
391
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
392
		__u64 start, __u64 len);
393 394
void set_page_extent_mapped(struct page *page);

395
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
396
					  u64 start);
397 398
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
						  u64 start, unsigned long len);
399
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
400
						u64 start);
401
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
402
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
403
					 u64 start);
404
void free_extent_buffer(struct extent_buffer *eb);
405
void free_extent_buffer_stale(struct extent_buffer *eb);
406 407 408
#define WAIT_NONE	0
#define WAIT_COMPLETE	1
#define WAIT_PAGE_LOCK	2
409
int read_extent_buffer_pages(struct extent_io_tree *tree,
410
			     struct extent_buffer *eb, int wait,
411
			     int mirror_num);
412
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
413

414
static inline int num_extent_pages(const struct extent_buffer *eb)
415
{
416 417
	return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
	       (eb->start >> PAGE_SHIFT);
418 419
}

420 421 422 423 424
static inline void extent_buffer_get(struct extent_buffer *eb)
{
	atomic_inc(&eb->refs);
}

425 426 427 428 429
static inline int extent_buffer_uptodate(struct extent_buffer *eb)
{
	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
}

430 431 432
int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
			 unsigned long start, unsigned long len);
void read_extent_buffer(const struct extent_buffer *eb, void *dst,
433 434
			unsigned long start,
			unsigned long len);
435 436
int read_extent_buffer_to_user(const struct extent_buffer *eb,
			       void __user *dst, unsigned long start,
437
			       unsigned long len);
438 439 440
void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
		const void *src);
441 442
void write_extent_buffer(struct extent_buffer *eb, const void *src,
			 unsigned long start, unsigned long len);
443 444
void copy_extent_buffer_full(struct extent_buffer *dst,
			     struct extent_buffer *src);
445 446 447 448 449 450 451
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
			unsigned long dst_offset, unsigned long src_offset,
			unsigned long len);
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len);
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len);
452 453
void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
			   unsigned long len);
454 455 456 457 458 459
int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
			   unsigned long pos);
void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
			      unsigned long pos, unsigned long len);
void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
				unsigned long pos, unsigned long len);
460
void clear_extent_buffer_dirty(struct extent_buffer *eb);
461
bool set_extent_buffer_dirty(struct extent_buffer *eb);
462
void set_extent_buffer_uptodate(struct extent_buffer *eb);
463
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
464
int extent_buffer_under_io(struct extent_buffer *eb);
465 466 467 468
int map_private_extent_buffer(const struct extent_buffer *eb,
			      unsigned long offset, unsigned long min_len,
			      char **map, unsigned long *map_start,
			      unsigned long *map_len);
469
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
470
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
471
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
472
				 u64 delalloc_end, struct page *locked_page,
473
				 unsigned bits_to_clear,
474
				 unsigned long page_ops);
475
struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
476
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
477
struct bio *btrfs_bio_clone(struct bio *bio);
478
struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
479

480
struct btrfs_fs_info;
481
struct btrfs_inode;
482

483 484 485
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
		      u64 length, u64 logical, struct page *page,
		      unsigned int pg_offset, int mirror_num);
486 487 488 489
int clean_io_failure(struct btrfs_fs_info *fs_info,
		     struct extent_io_tree *failure_tree,
		     struct extent_io_tree *io_tree, u64 start,
		     struct page *page, u64 ino, unsigned int pg_offset);
490
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
491 492
int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
			 struct extent_buffer *eb, int mirror_num);
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512

/*
 * When IO fails, either with EIO or csum verification fails, we
 * try other mirrors that might have a good copy of the data.  This
 * io_failure_record is used to record state as we go through all the
 * mirrors.  If another mirror has good data, the page is set up to date
 * and things continue.  If a good mirror can't be found, the original
 * bio end_io callback is called to indicate things have failed.
 */
struct io_failure_record {
	struct page *page;
	u64 start;
	u64 len;
	u64 logical;
	unsigned long bio_flags;
	int this_mirror;
	int failed_mirror;
	int in_validation;
};

513

514 515
void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
		u64 end);
516 517
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
				struct io_failure_record **failrec_ret);
518
bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
519
			    struct io_failure_record *failrec, int fail_mirror);
520 521 522
struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
				    struct io_failure_record *failrec,
				    struct page *page, int pg_offset, int icsum,
523
				    bio_end_io_t *endio_func, void *data);
524 525 526
int free_io_failure(struct extent_io_tree *failure_tree,
		    struct extent_io_tree *io_tree,
		    struct io_failure_record *rec);
527
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
528
bool find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree,
529 530
			     struct page *locked_page, u64 *start,
			     u64 *end);
531
#endif
532
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
533
					       u64 start);
534

535
#endif