extent_io.h 17.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4

#ifndef BTRFS_EXTENT_IO_H
#define BTRFS_EXTENT_IO_H
5 6

#include <linux/rbtree.h>
7
#include <linux/refcount.h>
8
#include "ulist.h"
9 10

/* bits for the extent state */
11
#define EXTENT_DIRTY		(1U << 0)
N
Nikolay Borisov 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24 25
#define EXTENT_UPTODATE		(1U << 1)
#define EXTENT_LOCKED		(1U << 2)
#define EXTENT_NEW		(1U << 3)
#define EXTENT_DELALLOC		(1U << 4)
#define EXTENT_DEFRAG		(1U << 5)
#define EXTENT_BOUNDARY		(1U << 6)
#define EXTENT_NODATASUM	(1U << 7)
#define EXTENT_CLEAR_META_RESV	(1U << 8)
#define EXTENT_NEED_WAIT	(1U << 9)
#define EXTENT_DAMAGED		(1U << 10)
#define EXTENT_NORESERVE	(1U << 11)
#define EXTENT_QGROUP_RESERVED	(1U << 12)
#define EXTENT_CLEAR_DATA_RESV	(1U << 13)
#define EXTENT_DELALLOC_NEW	(1U << 14)
26 27
#define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
				 EXTENT_CLEAR_DATA_RESV)
28
#define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING)
29

30 31 32 33
/*
 * flags for bio submission. The high bits indicate the compression
 * type for this bio
 */
C
Chris Mason 已提交
34
#define EXTENT_BIO_COMPRESSED 1
35
#define EXTENT_BIO_FLAG_SHIFT 16
C
Chris Mason 已提交
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
enum {
	EXTENT_BUFFER_UPTODATE,
	EXTENT_BUFFER_DIRTY,
	EXTENT_BUFFER_CORRUPT,
	/* this got triggered by readahead */
	EXTENT_BUFFER_READAHEAD,
	EXTENT_BUFFER_TREE_REF,
	EXTENT_BUFFER_STALE,
	EXTENT_BUFFER_WRITEBACK,
	/* read IO error */
	EXTENT_BUFFER_READ_ERR,
	EXTENT_BUFFER_UNMAPPED,
	EXTENT_BUFFER_IN_TREE,
	/* write IO error */
	EXTENT_BUFFER_WRITE_ERR,
};
53

54
/* these are flags for __process_pages_contig */
55 56 57 58 59
#define PAGE_UNLOCK		(1 << 0)
#define PAGE_CLEAR_DIRTY	(1 << 1)
#define PAGE_SET_WRITEBACK	(1 << 2)
#define PAGE_END_WRITEBACK	(1 << 3)
#define PAGE_SET_PRIVATE2	(1 << 4)
60
#define PAGE_SET_ERROR		(1 << 5)
61
#define PAGE_LOCK		(1 << 6)
62

63 64 65 66 67 68
/*
 * page->private values.  Every page that is controlled by the extent
 * map has page->private set to one.
 */
#define EXTENT_PAGE_PRIVATE 1

69 70 71 72 73 74 75 76 77 78 79 80 81 82
/*
 * The extent buffer bitmap operations are done with byte granularity instead of
 * word granularity for two reasons:
 * 1. The bitmaps must be little-endian on disk.
 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
 *    single word in a bitmap may straddle two pages in the extent buffer.
 */
#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
#define BITMAP_FIRST_BYTE_MASK(start) \
	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
#define BITMAP_LAST_BYTE_MASK(nbits) \
	(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))

83
struct extent_state;
84
struct btrfs_root;
85
struct btrfs_inode;
86
struct btrfs_io_bio;
87
struct io_failure_record;
88

89
typedef	blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *bio,
90 91
				       int mirror_num, unsigned long bio_flags,
				       u64 bio_offset);
92 93

typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
94
		struct bio *bio, u64 bio_offset);
95

96
struct extent_io_ops {
97
	/*
98
	 * The following callbacks must be always defined, the function
99 100
	 * pointer will be called unconditionally.
	 */
101
	extent_submit_bio_hook_t *submit_bio_hook;
102 103 104
	int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
				    struct page *page, u64 start, u64 end,
				    int mirror);
105 106
};

107 108 109 110 111 112 113 114 115 116 117
enum {
	IO_TREE_FS_INFO_FREED_EXTENTS0,
	IO_TREE_FS_INFO_FREED_EXTENTS1,
	IO_TREE_INODE_IO,
	IO_TREE_INODE_IO_FAILURE,
	IO_TREE_RELOC_BLOCKS,
	IO_TREE_TRANS_DIRTY_PAGES,
	IO_TREE_ROOT_DIRTY_LOG_PAGES,
	IO_TREE_SELFTEST,
};

118 119
struct extent_io_tree {
	struct rb_root state;
120
	struct btrfs_fs_info *fs_info;
121
	void *private_data;
122
	u64 dirty_bytes;
123
	bool track_uptodate;
124 125 126 127

	/* Who owns this io tree, should be one of IO_TREE_* */
	u8 owner;

128
	spinlock_t lock;
129
	const struct extent_io_ops *ops;
130 131 132 133 134 135
};

struct extent_state {
	u64 start;
	u64 end; /* inclusive */
	struct rb_node rb_node;
J
Josef Bacik 已提交
136 137

	/* ADD NEW ELEMENTS AFTER THIS */
138
	wait_queue_head_t wq;
139
	refcount_t refs;
140
	unsigned state;
141

142
	struct io_failure_record *failrec;
143

144
#ifdef CONFIG_BTRFS_DEBUG
145
	struct list_head leak_list;
146
#endif
147 148
};

149
#define INLINE_EXTENT_BUFFER_PAGES 16
150
#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
151 152 153
struct extent_buffer {
	u64 start;
	unsigned long len;
154
	unsigned long bflags;
155
	struct btrfs_fs_info *fs_info;
156
	spinlock_t refs_lock;
157
	atomic_t refs;
158
	atomic_t io_pages;
159
	int read_mirror;
160
	struct rcu_head rcu_head;
161
	pid_t lock_owner;
162

163 164
	atomic_t blocking_writers;
	atomic_t blocking_readers;
165 166 167
	short lock_nested;
	/* >= 0 if eb belongs to a log tree, -1 otherwise */
	short log_index;
168 169 170 171 172 173 174 175

	/* protects write locks */
	rwlock_t lock;

	/* readers use lock_wq while they wait for the write
	 * lock holders to unlock
	 */
	wait_queue_head_t write_lock_wq;
176

177 178
	/* writers use read_lock_wq while they wait for readers
	 * to unlock
179
	 */
180
	wait_queue_head_t read_lock_wq;
181
	struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
182
#ifdef CONFIG_BTRFS_DEBUG
183
	atomic_t spinning_writers;
184
	atomic_t spinning_readers;
185
	atomic_t read_locks;
186
	atomic_t write_locks;
187 188
	struct list_head leak_list;
#endif
189 190
};

191 192 193 194 195
/*
 * Structure to record how many bytes and which ranges are set/cleared
 */
struct extent_changeset {
	/* How many bytes are set/cleared in this operation */
196
	unsigned int bytes_changed;
197 198

	/* Changed ranges */
199
	struct ulist range_changed;
200 201
};

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
static inline void extent_changeset_init(struct extent_changeset *changeset)
{
	changeset->bytes_changed = 0;
	ulist_init(&changeset->range_changed);
}

static inline struct extent_changeset *extent_changeset_alloc(void)
{
	struct extent_changeset *ret;

	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
	if (!ret)
		return NULL;

	extent_changeset_init(ret);
	return ret;
}

static inline void extent_changeset_release(struct extent_changeset *changeset)
{
	if (!changeset)
		return;
	changeset->bytes_changed = 0;
	ulist_release(&changeset->range_changed);
}

static inline void extent_changeset_free(struct extent_changeset *changeset)
{
	if (!changeset)
		return;
	extent_changeset_release(changeset);
	kfree(changeset);
}

236 237 238 239 240 241 242 243 244 245 246
static inline void extent_set_compress_type(unsigned long *bio_flags,
					    int compress_type)
{
	*bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
}

static inline int extent_compress_type(unsigned long bio_flags)
{
	return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
}

247 248
struct extent_map_tree;

249
typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
250
					  struct page *page,
251
					  size_t pg_offset,
252 253 254
					  u64 start, u64 len,
					  int create);

255
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
256 257
			 struct extent_io_tree *tree, unsigned int owner,
			 void *private_data);
258
int try_release_extent_mapping(struct page *page, gfp_t mask);
259
int try_release_extent_buffer(struct page *page);
260
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
261
		     struct extent_state **cached);
262 263 264 265 266 267

static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
	return lock_extent_bits(tree, start, end, NULL);
}

268
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
269
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
270
			  get_extent_t *get_extent, int mirror_num);
271
int __init extent_io_init(void);
272
void __cold extent_io_exit(void);
273 274 275

u64 count_range_bits(struct extent_io_tree *tree,
		     u64 *start, u64 search_end,
276
		     u64 max_bytes, unsigned bits, int contig);
277

278
void free_extent_state(struct extent_state *state);
279
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
280
		   unsigned bits, int filled,
281
		   struct extent_state *cached_state);
282
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
283
		unsigned bits, struct extent_changeset *changeset);
284
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
285
		     unsigned bits, int wake, int delete,
286
		     struct extent_state **cached);
287 288 289 290
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
		     unsigned bits, int wake, int delete,
		     struct extent_state **cached, gfp_t mask,
		     struct extent_changeset *changeset);
291

292 293
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
294
	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
295 296 297
}

static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
298
		u64 end, struct extent_state **cached)
299
{
300
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
301
				GFP_NOFS, NULL);
302 303
}

304 305
static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
		u64 start, u64 end, struct extent_state **cached)
306
{
307 308
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
				GFP_ATOMIC, NULL);
309 310 311
}

static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
312
		u64 end, unsigned bits)
313 314 315 316 317 318
{
	int wake = 0;

	if (bits & EXTENT_LOCKED)
		wake = 1;

319
	return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
320 321
}

322
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
323
			   unsigned bits, struct extent_changeset *changeset);
324
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
325
		   unsigned bits, u64 *failed_start,
326
		   struct extent_state **cached_state, gfp_t mask);
327 328

static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
329
		u64 end, unsigned bits)
330
{
331
	return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
332 333
}

334
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
335
		u64 end, struct extent_state **cached_state)
336
{
337
	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
338
				cached_state, GFP_NOFS, NULL);
339
}
340 341 342 343 344 345 346 347

static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
		u64 end, gfp_t mask)
{
	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
			      NULL, mask);
}

348
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
349
				     u64 end, struct extent_state **cached)
350 351 352
{
	return clear_extent_bit(tree, start, end,
				EXTENT_DIRTY | EXTENT_DELALLOC |
353
				EXTENT_DO_ACCOUNTING, 0, 0, cached);
354 355
}

J
Josef Bacik 已提交
356
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
357
		       unsigned bits, unsigned clear_bits,
358
		       struct extent_state **cached_state);
359 360

static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
361 362
				      u64 end, unsigned int extra_bits,
				      struct extent_state **cached_state)
363 364
{
	return set_extent_bit(tree, start, end,
365
			      EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
366
			      NULL, cached_state, GFP_NOFS);
367 368 369
}

static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
370
		u64 end, struct extent_state **cached_state)
371 372 373
{
	return set_extent_bit(tree, start, end,
			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
374
			      NULL, cached_state, GFP_NOFS);
375 376 377
}

static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
378
		u64 end)
379
{
380 381
	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
			GFP_NOFS);
382 383 384 385 386 387 388 389 390
}

static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
		u64 end, struct extent_state **cached_state, gfp_t mask)
{
	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
			      cached_state, mask);
}

391
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
392
			  u64 *start_ret, u64 *end_ret, unsigned bits,
393
			  struct extent_state **cached_state);
394 395
int extent_invalidatepage(struct extent_io_tree *tree,
			  struct page *page, unsigned long offset);
396
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
397
int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
398
			      int mode);
399
int extent_writepages(struct address_space *mapping,
400
		      struct writeback_control *wbc);
401 402
int btree_write_cache_pages(struct address_space *mapping,
			    struct writeback_control *wbc);
403 404
int extent_readpages(struct address_space *mapping, struct list_head *pages,
		     unsigned nr_pages);
Y
Yehuda Sadeh 已提交
405
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
406
		__u64 start, __u64 len);
407 408
void set_page_extent_mapped(struct page *page);

409
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
410
					  u64 start);
411 412
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
						  u64 start, unsigned long len);
413
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
414
						u64 start);
415
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
416
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
417
					 u64 start);
418
void free_extent_buffer(struct extent_buffer *eb);
419
void free_extent_buffer_stale(struct extent_buffer *eb);
420 421 422
#define WAIT_NONE	0
#define WAIT_COMPLETE	1
#define WAIT_PAGE_LOCK	2
423
int read_extent_buffer_pages(struct extent_io_tree *tree,
424
			     struct extent_buffer *eb, int wait,
425
			     int mirror_num);
426
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
427

428
static inline int num_extent_pages(const struct extent_buffer *eb)
429
{
430 431
	return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
	       (eb->start >> PAGE_SHIFT);
432 433
}

434 435 436 437 438
static inline void extent_buffer_get(struct extent_buffer *eb)
{
	atomic_inc(&eb->refs);
}

439 440 441 442 443
static inline int extent_buffer_uptodate(struct extent_buffer *eb)
{
	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
}

444 445 446
int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
			 unsigned long start, unsigned long len);
void read_extent_buffer(const struct extent_buffer *eb, void *dst,
447 448
			unsigned long start,
			unsigned long len);
449 450
int read_extent_buffer_to_user(const struct extent_buffer *eb,
			       void __user *dst, unsigned long start,
451
			       unsigned long len);
452 453 454
void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
		const void *src);
455 456
void write_extent_buffer(struct extent_buffer *eb, const void *src,
			 unsigned long start, unsigned long len);
457 458
void copy_extent_buffer_full(struct extent_buffer *dst,
			     struct extent_buffer *src);
459 460 461 462 463 464 465
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
			unsigned long dst_offset, unsigned long src_offset,
			unsigned long len);
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len);
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
			   unsigned long src_offset, unsigned long len);
466 467
void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
			   unsigned long len);
468 469 470 471 472 473
int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
			   unsigned long pos);
void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
			      unsigned long pos, unsigned long len);
void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
				unsigned long pos, unsigned long len);
474
void clear_extent_buffer_dirty(struct extent_buffer *eb);
475
bool set_extent_buffer_dirty(struct extent_buffer *eb);
476
void set_extent_buffer_uptodate(struct extent_buffer *eb);
477
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
478
int extent_buffer_under_io(struct extent_buffer *eb);
479 480 481 482
int map_private_extent_buffer(const struct extent_buffer *eb,
			      unsigned long offset, unsigned long min_len,
			      char **map, unsigned long *map_start,
			      unsigned long *map_len);
483
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
484
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
485
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
486
				 u64 delalloc_end, struct page *locked_page,
487
				 unsigned bits_to_clear,
488
				 unsigned long page_ops);
489
struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
490
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
491
struct bio *btrfs_bio_clone(struct bio *bio);
492
struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
493

494
struct btrfs_fs_info;
495
struct btrfs_inode;
496

497 498 499
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
		      u64 length, u64 logical, struct page *page,
		      unsigned int pg_offset, int mirror_num);
500 501 502 503
int clean_io_failure(struct btrfs_fs_info *fs_info,
		     struct extent_io_tree *failure_tree,
		     struct extent_io_tree *io_tree, u64 start,
		     struct page *page, u64 ino, unsigned int pg_offset);
504
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
505 506
int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
			 struct extent_buffer *eb, int mirror_num);
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526

/*
 * When IO fails, either with EIO or csum verification fails, we
 * try other mirrors that might have a good copy of the data.  This
 * io_failure_record is used to record state as we go through all the
 * mirrors.  If another mirror has good data, the page is set up to date
 * and things continue.  If a good mirror can't be found, the original
 * bio end_io callback is called to indicate things have failed.
 */
struct io_failure_record {
	struct page *page;
	u64 start;
	u64 len;
	u64 logical;
	unsigned long bio_flags;
	int this_mirror;
	int failed_mirror;
	int in_validation;
};

527

528 529
void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
		u64 end);
530 531
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
				struct io_failure_record **failrec_ret);
532
bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
533
			    struct io_failure_record *failrec, int fail_mirror);
534 535 536
struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
				    struct io_failure_record *failrec,
				    struct page *page, int pg_offset, int icsum,
537
				    bio_end_io_t *endio_func, void *data);
538 539 540
int free_io_failure(struct extent_io_tree *failure_tree,
		    struct extent_io_tree *io_tree,
		    struct io_failure_record *rec);
541
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
542
bool find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree,
543 544
			     struct page *locked_page, u64 *start,
			     u64 *end);
545
#endif
546
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
547
					       u64 start);
548

549
#endif