volumes.h 19.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 */

6 7
#ifndef BTRFS_VOLUMES_H
#define BTRFS_VOLUMES_H
8

9
#include <linux/bio.h>
10
#include <linux/sort.h>
11
#include <linux/btrfs.h>
12
#include "async-thread.h"
13

14 15
#define BTRFS_MAX_DATA_CHUNK_SIZE	(10ULL * SZ_1G)

16 17
extern struct mutex uuid_mutex;

18
#define BTRFS_STRIPE_LEN	SZ_64K
19

20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
/* Used by sanity check for btrfs_raid_types. */
#define const_ffs(n) (__builtin_ctzll(n) + 1)

/*
 * The conversion from BTRFS_BLOCK_GROUP_* bits to btrfs_raid_type requires
 * RAID0 always to be the lowest profile bit.
 * Although it's part of on-disk format and should never change, do extra
 * compile-time sanity checks.
 */
static_assert(const_ffs(BTRFS_BLOCK_GROUP_RAID0) <
	      const_ffs(BTRFS_BLOCK_GROUP_PROFILE_MASK & ~BTRFS_BLOCK_GROUP_RAID0));
static_assert(const_ilog2(BTRFS_BLOCK_GROUP_RAID0) >
	      ilog2(BTRFS_BLOCK_GROUP_TYPE_MASK));

/* ilog2() can handle both constants and variables */
#define BTRFS_BG_FLAG_TO_INDEX(profile)					\
	ilog2((profile) >> (ilog2(BTRFS_BLOCK_GROUP_RAID0) - 1))

38
enum btrfs_raid_types {
39 40 41 42 43 44 45 46 47 48 49 50
	/* SINGLE is the special one as it doesn't have on-disk bit. */
	BTRFS_RAID_SINGLE  = 0,

	BTRFS_RAID_RAID0   = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID0),
	BTRFS_RAID_RAID1   = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1),
	BTRFS_RAID_DUP	   = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_DUP),
	BTRFS_RAID_RAID10  = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID10),
	BTRFS_RAID_RAID5   = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID5),
	BTRFS_RAID_RAID6   = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID6),
	BTRFS_RAID_RAID1C3 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C3),
	BTRFS_RAID_RAID1C4 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C4),

51 52 53
	BTRFS_NR_RAID_TYPES
};

54 55 56 57 58 59
struct btrfs_io_geometry {
	/* remaining bytes before crossing a stripe */
	u64 len;
	/* offset of logical address in chunk */
	u64 offset;
	/* length of single IO stripe */
60 61 62
	u32 stripe_len;
	/* offset of address in stripe */
	u32 stripe_offset;
63 64 65 66 67 68
	/* number of stripe where address falls */
	u64 stripe_nr;
	/* offset of raid56 stripe into the chunk */
	u64 raid56_stripe_offset;
};

69 70 71 72 73 74 75
/*
 * Use sequence counter to get consistent device stat data on
 * 32-bit processors.
 */
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
#include <linux/seqlock.h>
#define __BTRFS_NEED_DEVICE_DATA_ORDERED
76 77
#define btrfs_device_data_ordered_init(device)	\
	seqcount_init(&device->data_seqcount)
78
#else
79
#define btrfs_device_data_ordered_init(device) do { } while (0)
80 81
#endif

82
#define BTRFS_DEV_STATE_WRITEABLE	(0)
83
#define BTRFS_DEV_STATE_IN_FS_METADATA	(1)
84
#define BTRFS_DEV_STATE_MISSING		(2)
85
#define BTRFS_DEV_STATE_REPLACE_TGT	(3)
86
#define BTRFS_DEV_STATE_FLUSH_SENT	(4)
87
#define BTRFS_DEV_STATE_NO_READA	(5)
88

89 90
struct btrfs_zoned_device_info;

91
struct btrfs_device {
92 93
	struct list_head dev_list; /* device_list_mutex */
	struct list_head dev_alloc_list; /* chunk mutex */
94
	struct list_head post_commit_list; /* chunk mutex */
Y
Yan Zheng 已提交
95
	struct btrfs_fs_devices *fs_devices;
96
	struct btrfs_fs_info *fs_info;
97

98
	struct rcu_string __rcu *name;
99 100 101 102 103

	u64 generation;

	struct block_device *bdev;

104 105
	struct btrfs_zoned_device_info *zone_info;

106 107 108
	/* the mode sent to blkdev_get */
	fmode_t mode;

109 110 111 112 113
	/*
	 * Device's major-minor number. Must be set even if the device is not
	 * opened (bdev == NULL), unless the device is missing.
	 */
	dev_t devt;
114
	unsigned long dev_state;
115
	blk_status_t last_flush_error;
116

117
#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
118
	seqcount_t data_seqcount;
119 120
#endif

121 122 123
	/* the internal btrfs device id */
	u64 devid;

124
	/* size of the device in memory */
125 126
	u64 total_bytes;

127
	/* size of the device on disk */
128 129
	u64 disk_total_bytes;

130 131 132 133 134 135 136 137
	/* bytes used */
	u64 bytes_used;

	/* optimal io alignment for this device */
	u32 io_align;

	/* optimal io width for this device */
	u32 io_width;
138 139
	/* type and info about this device */
	u64 type;
140 141 142 143 144

	/* minimal io size for this device */
	u32 sector_size;

	/* physical drive uuid (or lvm uuid) */
145
	u8 uuid[BTRFS_UUID_SIZE];
146

147 148 149 150
	/*
	 * size of the device on the current transaction
	 *
	 * This variant is update when committing the transaction,
151
	 * and protected by chunk mutex
152 153 154
	 */
	u64 commit_total_bytes;

155 156
	/* bytes used on the current transaction */
	u64 commit_bytes_used;
157

158 159 160 161
	/* for sending down flush barriers */
	struct bio *flush_bio;
	struct completion flush_wait;

A
Arne Jansen 已提交
162
	/* per-device scrub information */
163
	struct scrub_ctx *scrub_ctx;
A
Arne Jansen 已提交
164

165 166
	/* disk I/O failure stats. For detailed description refer to
	 * enum btrfs_dev_stat_values in ioctl.h */
167
	int dev_stats_valid;
168 169 170

	/* Counter to record the change of device stats */
	atomic_t dev_stats_ccnt;
171
	atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
172 173

	struct extent_io_tree alloc_state;
174 175 176 177

	struct completion kobj_unregister;
	/* For sysfs/FSID/devinfo/devid/ */
	struct kobject devid_kobj;
178 179 180

	/* Bandwidth limit for scrub, in bytes */
	u64 scrub_speed_max;
181 182
};

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
/*
 * If we read those variants at the context of their own lock, we needn't
 * use the following helpers, reading them directly is safe.
 */
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
#define BTRFS_DEVICE_GETSET_FUNCS(name)					\
static inline u64							\
btrfs_device_get_##name(const struct btrfs_device *dev)			\
{									\
	u64 size;							\
	unsigned int seq;						\
									\
	do {								\
		seq = read_seqcount_begin(&dev->data_seqcount);		\
		size = dev->name;					\
	} while (read_seqcount_retry(&dev->data_seqcount, seq));	\
	return size;							\
}									\
									\
static inline void							\
btrfs_device_set_##name(struct btrfs_device *dev, u64 size)		\
{									\
205
	preempt_disable();						\
206 207 208
	write_seqcount_begin(&dev->data_seqcount);			\
	dev->name = size;						\
	write_seqcount_end(&dev->data_seqcount);			\
209
	preempt_enable();						\
210
}
211
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
#define BTRFS_DEVICE_GETSET_FUNCS(name)					\
static inline u64							\
btrfs_device_get_##name(const struct btrfs_device *dev)			\
{									\
	u64 size;							\
									\
	preempt_disable();						\
	size = dev->name;						\
	preempt_enable();						\
	return size;							\
}									\
									\
static inline void							\
btrfs_device_set_##name(struct btrfs_device *dev, u64 size)		\
{									\
	preempt_disable();						\
	dev->name = size;						\
	preempt_enable();						\
}
#else
#define BTRFS_DEVICE_GETSET_FUNCS(name)					\
static inline u64							\
btrfs_device_get_##name(const struct btrfs_device *dev)			\
{									\
	return dev->name;						\
}									\
									\
static inline void							\
btrfs_device_set_##name(struct btrfs_device *dev, u64 size)		\
{									\
	dev->name = size;						\
}
#endif

BTRFS_DEVICE_GETSET_FUNCS(total_bytes);
BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes);
BTRFS_DEVICE_GETSET_FUNCS(bytes_used);

250 251
enum btrfs_chunk_allocation_policy {
	BTRFS_CHUNK_ALLOC_REGULAR,
252
	BTRFS_CHUNK_ALLOC_ZONED,
253 254
};

A
Anand Jain 已提交
255 256 257 258 259 260 261 262 263 264
/*
 * Read policies for mirrored block group profiles, read picks the stripe based
 * on these policies.
 */
enum btrfs_read_policy {
	/* Use process PID to choose the stripe */
	BTRFS_READ_POLICY_PID,
	BTRFS_NR_READ_POLICY,
};

265 266
struct btrfs_fs_devices {
	u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
267
	u8 metadata_uuid[BTRFS_FSID_SIZE];
268
	bool fsid_change;
269
	struct list_head fs_list;
270

271 272 273 274
	/*
	 * Number of devices under this fsid including missing and
	 * replace-target device and excludes seed devices.
	 */
275
	u64 num_devices;
276 277 278 279 280

	/*
	 * The number of devices that successfully opened, including
	 * replace-target, excludes seed devices.
	 */
281
	u64 open_devices;
282 283

	/* The number of devices that are under the chunk allocation list. */
Y
Yan Zheng 已提交
284
	u64 rw_devices;
285 286

	/* Count of missing devices under this fsid excluding seed device. */
287
	u64 missing_devices;
Y
Yan Zheng 已提交
288
	u64 total_rw_bytes;
289 290 291 292 293 294

	/*
	 * Count of devices from btrfs_super_block::num_devices for this fsid,
	 * which includes the seed device, excludes the transient replace-target
	 * device.
	 */
J
Josef Bacik 已提交
295
	u64 total_devices;
296 297 298 299

	/* Highest generation number of seen devices */
	u64 latest_generation;

300 301 302 303 304
	/*
	 * The mount device or a device with highest generation after removal
	 * or replace.
	 */
	struct btrfs_device *latest_dev;
305 306 307

	/* all of the devices in the FS, protected by a mutex
	 * so we can safely walk it to write out the supers without
308 309 310
	 * worrying about add/remove by the multi-device code.
	 * Scrubbing super can kick off supers writing by holding
	 * this mutex lock.
311 312
	 */
	struct mutex device_list_mutex;
313 314

	/* List of all devices, protected by device_list_mutex */
315
	struct list_head devices;
316

317 318 319 320
	/*
	 * Devices which can satisfy space allocation. Protected by
	 * chunk_mutex
	 */
321
	struct list_head alloc_list;
Y
Yan Zheng 已提交
322

323
	struct list_head seed_list;
324
	bool seeding;
Y
Yan Zheng 已提交
325 326

	int opened;
C
Chris Mason 已提交
327 328 329 330

	/* set when we find or add a device that doesn't have the
	 * nonrot flag set
	 */
331
	bool rotating;
332

333
	struct btrfs_fs_info *fs_info;
334
	/* sysfs kobjects */
335
	struct kobject fsid_kobj;
336
	struct kobject *devices_kobj;
337
	struct kobject *devinfo_kobj;
338
	struct completion kobj_unregister;
339 340

	enum btrfs_chunk_allocation_policy chunk_alloc_policy;
A
Anand Jain 已提交
341 342 343

	/* Policy used to read the mirrored stripes */
	enum btrfs_read_policy read_policy;
344 345
};

346 347
#define BTRFS_BIO_INLINE_CSUM_SIZE	64

Q
Qu Wenruo 已提交
348 349 350 351 352 353 354 355 356
#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info)	\
			- sizeof(struct btrfs_chunk))		\
			/ sizeof(struct btrfs_stripe) + 1)

#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
				- 2 * sizeof(struct btrfs_disk_key)	\
				- 2 * sizeof(struct btrfs_chunk))	\
				/ sizeof(struct btrfs_stripe) + 1)

357
/*
358 359 360
 * Additional info to pass along bio.
 *
 * Mostly for btrfs specific features like csum and mirror_num.
361
 */
362
struct btrfs_bio {
363
	unsigned int mirror_num;
364

365 366 367
	/* for direct I/O */
	u64 file_offset;

368
	/* @device is for stripe IO submission. */
369
	struct btrfs_device *device;
370 371
	u8 *csum;
	u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
372
	struct bvec_iter iter;
373

374 375
	/*
	 * This member must come last, bio_alloc_bioset will allocate enough
376
	 * bytes for entire btrfs_bio but relies on bio being last.
377
	 */
378 379 380
	struct bio bio;
};

381
static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
382
{
383
	return container_of(bio, struct btrfs_bio, bio);
384 385
}

386
static inline void btrfs_bio_free_csum(struct btrfs_bio *bbio)
387
{
388 389 390
	if (bbio->csum != bbio->csum_inline) {
		kfree(bbio->csum);
		bbio->csum = NULL;
391 392 393
	}
}

394
struct btrfs_io_stripe {
395 396
	struct btrfs_device *dev;
	u64 physical;
397
	u64 length; /* only used for discard mappings */
398 399
};

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
/*
 * Context for IO subsmission for device stripe.
 *
 * - Track the unfinished mirrors for mirror based profiles
 *   Mirror based profiles are SINGLE/DUP/RAID1/RAID10.
 *
 * - Contain the logical -> physical mapping info
 *   Used by submit_stripe_bio() for mapping logical bio
 *   into physical device address.
 *
 * - Contain device replace info
 *   Used by handle_ops_on_dev_replace() to copy logical bios
 *   into the new device.
 *
 * - Contain RAID56 full stripe logical bytenrs
 */
struct btrfs_io_context {
417
	refcount_t refs;
418
	atomic_t stripes_pending;
419
	struct btrfs_fs_info *fs_info;
Z
Zhao Lei 已提交
420
	u64 map_type; /* get from map_lookup->type */
421
	bio_end_io_t *end_io;
422
	struct bio *orig_bio;
423
	void *private;
424 425
	atomic_t error;
	int max_errors;
426
	int num_stripes;
427
	int mirror_num;
428 429
	int num_tgtdevs;
	int *tgtdev_map;
430 431 432 433 434 435
	/*
	 * logical block numbers for the start of each stripe
	 * The last one or two are p/q.  These are sorted,
	 * so raid_map[0] is the start of our full stripe
	 */
	u64 *raid_map;
436
	struct btrfs_io_stripe stripes[];
437 438
};

439 440 441 442
struct btrfs_device_info {
	struct btrfs_device *dev;
	u64 dev_offset;
	u64 max_avail;
443
	u64 total_avail;
444 445
};

446
struct btrfs_raid_attr {
447 448 449 450 451 452 453 454
	u8 sub_stripes;		/* sub_stripes info for map */
	u8 dev_stripes;		/* stripes per dev */
	u8 devs_max;		/* max devs to use */
	u8 devs_min;		/* min devs needed */
	u8 tolerated_failures;	/* max tolerated fail devs */
	u8 devs_increment;	/* ndevs has to be a multiple of this */
	u8 ncopies;		/* how many copies to data has */
	u8 nparity;		/* number of stripes worth of bytes to store
455
				 * parity information */
456
	u8 mindev_error;	/* error code if min devs requisite is unmet */
457
	const char raid_name[8]; /* name of the raid */
458
	u64 bg_flag;		/* block group flag of the raid */
459 460
};

Z
Zhao Lei 已提交
461 462
extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];

463 464 465 466
struct map_lookup {
	u64 type;
	int io_align;
	int io_width;
467
	u32 stripe_len;
468 469
	int num_stripes;
	int sub_stripes;
470
	int verified_stripes; /* For mount time dev extent verification */
471
	struct btrfs_io_stripe stripes[];
472 473
};

A
Arne Jansen 已提交
474
#define map_lookup_size(n) (sizeof(struct map_lookup) + \
475
			    (sizeof(struct btrfs_io_stripe) * (n)))
A
Arne Jansen 已提交
476

477
struct btrfs_balance_args;
478
struct btrfs_balance_progress;
479 480 481 482 483 484
struct btrfs_balance_control {
	struct btrfs_balance_args data;
	struct btrfs_balance_args meta;
	struct btrfs_balance_args sys;

	u64 flags;
485 486

	struct btrfs_balance_progress stat;
487 488
};

489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
/*
 * Search for a given device by the set parameters
 */
struct btrfs_dev_lookup_args {
	u64 devid;
	u8 *uuid;
	u8 *fsid;
	bool missing;
};

/* We have to initialize to -1 because BTRFS_DEV_REPLACE_DEVID is 0 */
#define BTRFS_DEV_LOOKUP_ARGS_INIT { .devid = (u64)-1 }

#define BTRFS_DEV_LOOKUP_ARGS(name) \
	struct btrfs_dev_lookup_args name = BTRFS_DEV_LOOKUP_ARGS_INIT

505 506 507 508 509 510 511 512 513 514 515 516 517
enum btrfs_map_op {
	BTRFS_MAP_READ,
	BTRFS_MAP_WRITE,
	BTRFS_MAP_DISCARD,
	BTRFS_MAP_GET_READ_MIRRORS,
};

static inline enum btrfs_map_op btrfs_op(struct bio *bio)
{
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		return BTRFS_MAP_DISCARD;
	case REQ_OP_WRITE:
518
	case REQ_OP_ZONE_APPEND:
519 520 521
		return BTRFS_MAP_WRITE;
	default:
		WARN_ON_ONCE(1);
522
		fallthrough;
523 524 525 526 527
	case REQ_OP_READ:
		return BTRFS_MAP_READ;
	}
}

528 529
void btrfs_get_bioc(struct btrfs_io_context *bioc);
void btrfs_put_bioc(struct btrfs_io_context *bioc);
530
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
531
		    u64 logical, u64 *length,
532
		    struct btrfs_io_context **bioc_ret, int mirror_num);
533
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
534
		     u64 logical, u64 *length,
535
		     struct btrfs_io_context **bioc_ret);
536
int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
537
			  enum btrfs_map_op op, u64 logical,
538
			  struct btrfs_io_geometry *io_geom);
539
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
540
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
541
struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
542
					    u64 type);
543
void btrfs_mapping_tree_free(struct extent_map_tree *tree);
544
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
545
			   int mirror_num);
546
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
547
		       fmode_t flags, void *holder);
548 549
struct btrfs_device *btrfs_scan_one_device(const char *path,
					   fmode_t flags, void *holder);
A
Anand Jain 已提交
550
int btrfs_forget_devices(dev_t devt);
551
void btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
552
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices);
553 554
void btrfs_assign_next_active_device(struct btrfs_device *device,
				     struct btrfs_device *this_dev);
555 556 557
struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
						  u64 devid,
						  const char *devpath);
558 559 560
int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
				 struct btrfs_dev_lookup_args *args,
				 const char *path);
561 562 563
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
					const u64 *devid,
					const u8 *uuid);
564
void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args);
565
void btrfs_free_device(struct btrfs_device *device);
566
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
567
		    struct btrfs_dev_lookup_args *args,
568
		    struct block_device **bdev, fmode_t *mode);
569
void __exit btrfs_cleanup_fs_uuids(void);
570
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
571 572
int btrfs_grow_device(struct btrfs_trans_handle *trans,
		      struct btrfs_device *device, u64 new_size);
573 574
struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
				       const struct btrfs_dev_lookup_args *args);
575
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
576
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
577 578
int btrfs_balance(struct btrfs_fs_info *fs_info,
		  struct btrfs_balance_control *bctl,
579
		  struct btrfs_ioctl_balance_args *bargs);
580
void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
581
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
582
int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
583
int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
584
int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset);
585
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
586
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
587
int btrfs_uuid_scan_kthread(void *data);
588
bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset);
589
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
590
			 u64 *start, u64 *max_avail);
591
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
592
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
593
			struct btrfs_ioctl_get_dev_stats *stats);
594
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
595
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
596
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
597
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
598
void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev);
599
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
600
int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
601
			   u64 logical, u64 len);
602
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
D
David Woodhouse 已提交
603
				    u64 logical);
604 605
int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
				     struct btrfs_block_group *bg);
606
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
607 608
struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
				       u64 logical, u64 length);
609
void btrfs_release_disk_super(struct btrfs_super_block *super);
610

611 612 613 614
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
				      int index)
{
	atomic_inc(dev->dev_stat_values + index);
615 616 617 618 619 620
	/*
	 * This memory barrier orders stores updating statistics before stores
	 * updating dev_stats_ccnt.
	 *
	 * It pairs with smp_rmb() in btrfs_run_dev_stats().
	 */
621 622
	smp_mb__before_atomic();
	atomic_inc(&dev->dev_stats_ccnt);
623 624 625 626 627 628 629 630 631 632 633 634 635 636
}

static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
				      int index)
{
	return atomic_read(dev->dev_stat_values + index);
}

static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
						int index)
{
	int ret;

	ret = atomic_xchg(dev->dev_stat_values + index, 0);
637 638 639 640 641 642 643
	/*
	 * atomic_xchg implies a full memory barriers as per atomic_t.txt:
	 * - RMW operations that have a return value are fully ordered;
	 *
	 * This implicit memory barriers is paired with the smp_rmb in
	 * btrfs_run_dev_stats
	 */
644
	atomic_inc(&dev->dev_stats_ccnt);
645 646 647 648 649 650 651
	return ret;
}

static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
				      int index, unsigned long val)
{
	atomic_set(dev->dev_stat_values + index, val);
652 653 654 655 656 657
	/*
	 * This memory barrier orders stores updating statistics before stores
	 * updating dev_stats_ccnt.
	 *
	 * It pairs with smp_rmb() in btrfs_run_dev_stats().
	 */
658 659
	smp_mb__before_atomic();
	atomic_inc(&dev->dev_stats_ccnt);
660 661
}

662
void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
663

D
David Sterba 已提交
664
struct list_head * __attribute_const__ btrfs_get_fs_uuids(void);
665 666
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
					struct btrfs_device *failing_dev);
667 668 669
void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
			       struct block_device *bdev,
			       const char *device_path);
670

671
enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags);
672
int btrfs_bg_type_to_factor(u64 flags);
673
const char *btrfs_bg_type_to_raid_name(u64 flags);
674
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
675
bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
676

677
#endif