提交 b28cf572 编写于 作者: C Chris Mason

Merge branch 'misc-cleanups-4.5' of...

Merge branch 'misc-cleanups-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux into for-linus-4.5
Signed-off-by: NChris Mason <clm@fb.com>
...@@ -520,13 +520,10 @@ static inline int ref_for_same_block(struct __prelim_ref *ref1, ...@@ -520,13 +520,10 @@ static inline int ref_for_same_block(struct __prelim_ref *ref1,
static int __add_missing_keys(struct btrfs_fs_info *fs_info, static int __add_missing_keys(struct btrfs_fs_info *fs_info,
struct list_head *head) struct list_head *head)
{ {
struct list_head *pos; struct __prelim_ref *ref;
struct extent_buffer *eb; struct extent_buffer *eb;
list_for_each(pos, head) { list_for_each_entry(ref, head, list) {
struct __prelim_ref *ref;
ref = list_entry(pos, struct __prelim_ref, list);
if (ref->parent) if (ref->parent)
continue; continue;
if (ref->key_for_search.type) if (ref->key_for_search.type)
...@@ -563,23 +560,15 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info, ...@@ -563,23 +560,15 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
*/ */
static void __merge_refs(struct list_head *head, int mode) static void __merge_refs(struct list_head *head, int mode)
{ {
struct list_head *pos1; struct __prelim_ref *ref1;
list_for_each(pos1, head) { list_for_each_entry(ref1, head, list) {
struct list_head *n2; struct __prelim_ref *ref2 = ref1, *tmp;
struct list_head *pos2;
struct __prelim_ref *ref1;
ref1 = list_entry(pos1, struct __prelim_ref, list); list_for_each_entry_safe_continue(ref2, tmp, head, list) {
for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
pos2 = n2, n2 = pos2->next) {
struct __prelim_ref *ref2;
struct __prelim_ref *xchg; struct __prelim_ref *xchg;
struct extent_inode_elem *eie; struct extent_inode_elem *eie;
ref2 = list_entry(pos2, struct __prelim_ref, list);
if (!ref_for_same_block(ref1, ref2)) if (!ref_for_same_block(ref1, ref2))
continue; continue;
if (mode == 1) { if (mode == 1) {
......
...@@ -531,13 +531,9 @@ static struct btrfsic_block *btrfsic_block_hashtable_lookup( ...@@ -531,13 +531,9 @@ static struct btrfsic_block *btrfsic_block_hashtable_lookup(
(((unsigned int)(dev_bytenr >> 16)) ^ (((unsigned int)(dev_bytenr >> 16)) ^
((unsigned int)((uintptr_t)bdev))) & ((unsigned int)((uintptr_t)bdev))) &
(BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
struct list_head *elem; struct btrfsic_block *b;
list_for_each(elem, h->table + hashval) {
struct btrfsic_block *const b =
list_entry(elem, struct btrfsic_block,
collision_resolving_node);
list_for_each_entry(b, h->table + hashval, collision_resolving_node) {
if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr) if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
return b; return b;
} }
...@@ -588,13 +584,9 @@ static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup( ...@@ -588,13 +584,9 @@ static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
((unsigned int)((uintptr_t)bdev_ref_to)) ^ ((unsigned int)((uintptr_t)bdev_ref_to)) ^
((unsigned int)((uintptr_t)bdev_ref_from))) & ((unsigned int)((uintptr_t)bdev_ref_from))) &
(BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
struct list_head *elem; struct btrfsic_block_link *l;
list_for_each(elem, h->table + hashval) {
struct btrfsic_block_link *const l =
list_entry(elem, struct btrfsic_block_link,
collision_resolving_node);
list_for_each_entry(l, h->table + hashval, collision_resolving_node) {
BUG_ON(NULL == l->block_ref_to); BUG_ON(NULL == l->block_ref_to);
BUG_ON(NULL == l->block_ref_from); BUG_ON(NULL == l->block_ref_from);
if (l->block_ref_to->dev_state->bdev == bdev_ref_to && if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
...@@ -639,13 +631,9 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( ...@@ -639,13 +631,9 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
const unsigned int hashval = const unsigned int hashval =
(((unsigned int)((uintptr_t)bdev)) & (((unsigned int)((uintptr_t)bdev)) &
(BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1)); (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
struct list_head *elem; struct btrfsic_dev_state *ds;
list_for_each(elem, h->table + hashval) {
struct btrfsic_dev_state *const ds =
list_entry(elem, struct btrfsic_dev_state,
collision_resolving_node);
list_for_each_entry(ds, h->table + hashval, collision_resolving_node) {
if (ds->bdev == bdev) if (ds->bdev == bdev)
return ds; return ds;
} }
...@@ -1720,29 +1708,20 @@ static int btrfsic_read_block(struct btrfsic_state *state, ...@@ -1720,29 +1708,20 @@ static int btrfsic_read_block(struct btrfsic_state *state,
static void btrfsic_dump_database(struct btrfsic_state *state) static void btrfsic_dump_database(struct btrfsic_state *state)
{ {
struct list_head *elem_all; const struct btrfsic_block *b_all;
BUG_ON(NULL == state); BUG_ON(NULL == state);
printk(KERN_INFO "all_blocks_list:\n"); printk(KERN_INFO "all_blocks_list:\n");
list_for_each(elem_all, &state->all_blocks_list) { list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) {
const struct btrfsic_block *const b_all = const struct btrfsic_block_link *l;
list_entry(elem_all, struct btrfsic_block,
all_blocks_node);
struct list_head *elem_ref_to;
struct list_head *elem_ref_from;
printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n", printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
btrfsic_get_block_type(state, b_all), btrfsic_get_block_type(state, b_all),
b_all->logical_bytenr, b_all->dev_state->name, b_all->logical_bytenr, b_all->dev_state->name,
b_all->dev_bytenr, b_all->mirror_num); b_all->dev_bytenr, b_all->mirror_num);
list_for_each(elem_ref_to, &b_all->ref_to_list) { list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_to,
struct btrfsic_block_link,
node_ref_to);
printk(KERN_INFO " %c @%llu (%s/%llu/%d)" printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
" refers %u* to" " refers %u* to"
" %c @%llu (%s/%llu/%d)\n", " %c @%llu (%s/%llu/%d)\n",
...@@ -1757,12 +1736,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state) ...@@ -1757,12 +1736,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
l->block_ref_to->mirror_num); l->block_ref_to->mirror_num);
} }
list_for_each(elem_ref_from, &b_all->ref_from_list) { list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_from,
struct btrfsic_block_link,
node_ref_from);
printk(KERN_INFO " %c @%llu (%s/%llu/%d)" printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
" is ref %u* from" " is ref %u* from"
" %c @%llu (%s/%llu/%d)\n", " %c @%llu (%s/%llu/%d)\n",
...@@ -1845,8 +1819,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, ...@@ -1845,8 +1819,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
&state->block_hashtable); &state->block_hashtable);
if (NULL != block) { if (NULL != block) {
u64 bytenr = 0; u64 bytenr = 0;
struct list_head *elem_ref_to; struct btrfsic_block_link *l, *tmp;
struct list_head *tmp_ref_to;
if (block->is_superblock) { if (block->is_superblock) {
bytenr = btrfs_super_bytenr((struct btrfs_super_block *) bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
...@@ -1967,13 +1940,8 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, ...@@ -1967,13 +1940,8 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
* because it still carries valueable information * because it still carries valueable information
* like whether it was ever written and IO completed. * like whether it was ever written and IO completed.
*/ */
list_for_each_safe(elem_ref_to, tmp_ref_to, list_for_each_entry_safe(l, tmp, &block->ref_to_list,
&block->ref_to_list) { node_ref_to) {
struct btrfsic_block_link *const l =
list_entry(elem_ref_to,
struct btrfsic_block_link,
node_ref_to);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
btrfsic_print_rem_link(state, l); btrfsic_print_rem_link(state, l);
l->ref_cnt--; l->ref_cnt--;
...@@ -2436,7 +2404,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, ...@@ -2436,7 +2404,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
struct btrfsic_block *const block, struct btrfsic_block *const block,
int recursion_level) int recursion_level)
{ {
struct list_head *elem_ref_to; const struct btrfsic_block_link *l;
int ret = 0; int ret = 0;
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
...@@ -2464,11 +2432,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, ...@@ -2464,11 +2432,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
* This algorithm is recursive because the amount of used stack * This algorithm is recursive because the amount of used stack
* space is very small and the max recursion depth is limited. * space is very small and the max recursion depth is limited.
*/ */
list_for_each(elem_ref_to, &block->ref_to_list) { list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_to, struct btrfsic_block_link,
node_ref_to);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO printk(KERN_INFO
"rl=%d, %c @%llu (%s/%llu/%d)" "rl=%d, %c @%llu (%s/%llu/%d)"
...@@ -2561,7 +2525,7 @@ static int btrfsic_is_block_ref_by_superblock( ...@@ -2561,7 +2525,7 @@ static int btrfsic_is_block_ref_by_superblock(
const struct btrfsic_block *block, const struct btrfsic_block *block,
int recursion_level) int recursion_level)
{ {
struct list_head *elem_ref_from; const struct btrfsic_block_link *l;
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
/* refer to comment at "abort cyclic linkage (case 1)" */ /* refer to comment at "abort cyclic linkage (case 1)" */
...@@ -2576,11 +2540,7 @@ static int btrfsic_is_block_ref_by_superblock( ...@@ -2576,11 +2540,7 @@ static int btrfsic_is_block_ref_by_superblock(
* This algorithm is recursive because the amount of used stack space * This algorithm is recursive because the amount of used stack space
* is very small and the max recursion depth is limited. * is very small and the max recursion depth is limited.
*/ */
list_for_each(elem_ref_from, &block->ref_from_list) { list_for_each_entry(l, &block->ref_from_list, node_ref_from) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_from, struct btrfsic_block_link,
node_ref_from);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO printk(KERN_INFO
"rl=%d, %c @%llu (%s/%llu/%d)" "rl=%d, %c @%llu (%s/%llu/%d)"
...@@ -2669,7 +2629,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, ...@@ -2669,7 +2629,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
const struct btrfsic_block *block, const struct btrfsic_block *block,
int indent_level) int indent_level)
{ {
struct list_head *elem_ref_to; const struct btrfsic_block_link *l;
int indent_add; int indent_add;
static char buf[80]; static char buf[80];
int cursor_position; int cursor_position;
...@@ -2704,11 +2664,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, ...@@ -2704,11 +2664,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
} }
cursor_position = indent_level; cursor_position = indent_level;
list_for_each(elem_ref_to, &block->ref_to_list) { list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_to, struct btrfsic_block_link,
node_ref_to);
while (cursor_position < indent_level) { while (cursor_position < indent_level) {
printk(" "); printk(" ");
cursor_position++; cursor_position++;
...@@ -3165,8 +3121,7 @@ int btrfsic_mount(struct btrfs_root *root, ...@@ -3165,8 +3121,7 @@ int btrfsic_mount(struct btrfs_root *root,
void btrfsic_unmount(struct btrfs_root *root, void btrfsic_unmount(struct btrfs_root *root,
struct btrfs_fs_devices *fs_devices) struct btrfs_fs_devices *fs_devices)
{ {
struct list_head *elem_all; struct btrfsic_block *b_all, *tmp_all;
struct list_head *tmp_all;
struct btrfsic_state *state; struct btrfsic_state *state;
struct list_head *dev_head = &fs_devices->devices; struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device; struct btrfs_device *device;
...@@ -3206,20 +3161,12 @@ void btrfsic_unmount(struct btrfs_root *root, ...@@ -3206,20 +3161,12 @@ void btrfsic_unmount(struct btrfs_root *root,
* just free all memory that was allocated dynamically. * just free all memory that was allocated dynamically.
* Free the blocks and the block_links. * Free the blocks and the block_links.
*/ */
list_for_each_safe(elem_all, tmp_all, &state->all_blocks_list) { list_for_each_entry_safe(b_all, tmp_all, &state->all_blocks_list,
struct btrfsic_block *const b_all = all_blocks_node) {
list_entry(elem_all, struct btrfsic_block, struct btrfsic_block_link *l, *tmp;
all_blocks_node);
struct list_head *elem_ref_to;
struct list_head *tmp_ref_to;
list_for_each_safe(elem_ref_to, tmp_ref_to,
&b_all->ref_to_list) {
struct btrfsic_block_link *const l =
list_entry(elem_ref_to,
struct btrfsic_block_link,
node_ref_to);
list_for_each_entry_safe(l, tmp, &b_all->ref_to_list,
node_ref_to) {
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
btrfsic_print_rem_link(state, l); btrfsic_print_rem_link(state, l);
......
...@@ -1555,7 +1555,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, ...@@ -1555,7 +1555,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
return 0; return 0;
} }
search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); search_start = buf->start & ~((u64)SZ_1G - 1);
if (parent) if (parent)
btrfs_set_lock_blocking(parent); btrfs_set_lock_blocking(parent);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/btrfs.h> #include <linux/btrfs.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/sizes.h>
#include "extent_io.h" #include "extent_io.h"
#include "extent_map.h" #include "extent_map.h"
#include "async-thread.h" #include "async-thread.h"
...@@ -199,9 +200,9 @@ static const int btrfs_csum_sizes[] = { 4 }; ...@@ -199,9 +200,9 @@ static const int btrfs_csum_sizes[] = { 4 };
/* ioprio of readahead is set to idle */ /* ioprio of readahead is set to idle */
#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)) #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
#define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024) #define BTRFS_DIRTY_METADATA_THRESH SZ_32M
#define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024) #define BTRFS_MAX_EXTENT_SIZE SZ_128M
/* /*
* The key defines the order in the tree, and so it also defines (optimal) * The key defines the order in the tree, and so it also defines (optimal)
...@@ -4347,7 +4348,7 @@ static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, ...@@ -4347,7 +4348,7 @@ static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info,
#define btrfs_fs_incompat(fs_info, opt) \ #define btrfs_fs_incompat(fs_info, opt) \
__btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) static inline bool __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
{ {
struct btrfs_super_block *disk_super; struct btrfs_super_block *disk_super;
disk_super = fs_info->super_copy; disk_super = fs_info->super_copy;
......
...@@ -54,16 +54,11 @@ static inline void btrfs_init_delayed_node( ...@@ -54,16 +54,11 @@ static inline void btrfs_init_delayed_node(
delayed_node->root = root; delayed_node->root = root;
delayed_node->inode_id = inode_id; delayed_node->inode_id = inode_id;
atomic_set(&delayed_node->refs, 0); atomic_set(&delayed_node->refs, 0);
delayed_node->count = 0;
delayed_node->flags = 0;
delayed_node->ins_root = RB_ROOT; delayed_node->ins_root = RB_ROOT;
delayed_node->del_root = RB_ROOT; delayed_node->del_root = RB_ROOT;
mutex_init(&delayed_node->mutex); mutex_init(&delayed_node->mutex);
delayed_node->index_cnt = 0;
INIT_LIST_HEAD(&delayed_node->n_list); INIT_LIST_HEAD(&delayed_node->n_list);
INIT_LIST_HEAD(&delayed_node->p_list); INIT_LIST_HEAD(&delayed_node->p_list);
delayed_node->bytes_reserved = 0;
memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
} }
static inline int btrfs_is_continuous_delayed_item( static inline int btrfs_is_continuous_delayed_item(
...@@ -132,7 +127,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( ...@@ -132,7 +127,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
if (node) if (node)
return node; return node;
node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
if (!node) if (!node)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
btrfs_init_delayed_node(node, root, ino); btrfs_init_delayed_node(node, root, ino);
......
...@@ -2824,7 +2824,7 @@ int open_ctree(struct super_block *sb, ...@@ -2824,7 +2824,7 @@ int open_ctree(struct super_block *sb,
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
4 * 1024 * 1024 / PAGE_CACHE_SIZE); SZ_4M / PAGE_CACHE_SIZE);
tree_root->nodesize = nodesize; tree_root->nodesize = nodesize;
tree_root->sectorsize = sectorsize; tree_root->sectorsize = sectorsize;
...@@ -3996,7 +3996,6 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root, ...@@ -3996,7 +3996,6 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
balance_dirty_pages_ratelimited( balance_dirty_pages_ratelimited(
root->fs_info->btree_inode->i_mapping); root->fs_info->btree_inode->i_mapping);
} }
return;
} }
void btrfs_btree_balance_dirty(struct btrfs_root *root) void btrfs_btree_balance_dirty(struct btrfs_root *root)
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#ifndef __DISKIO__ #ifndef __DISKIO__
#define __DISKIO__ #define __DISKIO__
#define BTRFS_SUPER_INFO_OFFSET (64 * 1024) #define BTRFS_SUPER_INFO_OFFSET SZ_64K
#define BTRFS_SUPER_INFO_SIZE 4096 #define BTRFS_SUPER_INFO_SIZE 4096
#define BTRFS_SUPER_MIRROR_MAX 3 #define BTRFS_SUPER_MIRROR_MAX 3
...@@ -35,7 +35,7 @@ enum btrfs_wq_endio_type { ...@@ -35,7 +35,7 @@ enum btrfs_wq_endio_type {
static inline u64 btrfs_sb_offset(int mirror) static inline u64 btrfs_sb_offset(int mirror)
{ {
u64 start = 16 * 1024; u64 start = SZ_16K;
if (mirror) if (mirror)
return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror); return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror);
return BTRFS_SUPER_INFO_OFFSET; return BTRFS_SUPER_INFO_OFFSET;
......
...@@ -3347,7 +3347,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, ...@@ -3347,7 +3347,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
* If this block group is smaller than 100 megs don't bother caching the * If this block group is smaller than 100 megs don't bother caching the
* block group. * block group.
*/ */
if (block_group->key.offset < (100 * 1024 * 1024)) { if (block_group->key.offset < (100 * SZ_1M)) {
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_WRITTEN; block_group->disk_cache_state = BTRFS_DC_WRITTEN;
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
...@@ -3447,7 +3447,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, ...@@ -3447,7 +3447,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
* taking up quite a bit since it's not folded into the other space * taking up quite a bit since it's not folded into the other space
* cache. * cache.
*/ */
num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024); num_pages = div_u64(block_group->key.offset, SZ_256M);
if (!num_pages) if (!num_pages)
num_pages = 1; num_pages = 1;
...@@ -4273,14 +4273,13 @@ static int should_alloc_chunk(struct btrfs_root *root, ...@@ -4273,14 +4273,13 @@ static int should_alloc_chunk(struct btrfs_root *root,
*/ */
if (force == CHUNK_ALLOC_LIMITED) { if (force == CHUNK_ALLOC_LIMITED) {
thresh = btrfs_super_total_bytes(root->fs_info->super_copy); thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
thresh = max_t(u64, 64 * 1024 * 1024, thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
div_factor_fine(thresh, 1));
if (num_bytes - num_allocated < thresh) if (num_bytes - num_allocated < thresh)
return 1; return 1;
} }
if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8)) if (num_allocated + SZ_2M < div_factor(num_bytes, 8))
return 0; return 0;
return 1; return 1;
} }
...@@ -4480,7 +4479,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, ...@@ -4480,7 +4479,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
* transaction. * transaction.
*/ */
if (trans->can_flush_pending_bgs && if (trans->can_flush_pending_bgs &&
trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { trans->chunk_bytes_reserved >= (u64)SZ_2M) {
btrfs_create_pending_block_groups(trans, trans->root); btrfs_create_pending_block_groups(trans, trans->root);
btrfs_trans_release_chunk_metadata(trans); btrfs_trans_release_chunk_metadata(trans);
} }
...@@ -4578,7 +4577,7 @@ static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim) ...@@ -4578,7 +4577,7 @@ static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
return nr; return nr;
} }
#define EXTENT_SIZE_PER_ITEM (256 * 1024) #define EXTENT_SIZE_PER_ITEM SZ_256K
/* /*
* shrink metadata reservation for delalloc * shrink metadata reservation for delalloc
...@@ -4783,8 +4782,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root, ...@@ -4783,8 +4782,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
u64 expected; u64 expected;
u64 to_reclaim; u64 to_reclaim;
to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024, to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
16 * 1024 * 1024);
spin_lock(&space_info->lock); spin_lock(&space_info->lock);
if (can_overcommit(root, space_info, to_reclaim, if (can_overcommit(root, space_info, to_reclaim,
BTRFS_RESERVE_FLUSH_ALL)) { BTRFS_RESERVE_FLUSH_ALL)) {
...@@ -4795,8 +4793,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root, ...@@ -4795,8 +4793,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
used = space_info->bytes_used + space_info->bytes_reserved + used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly + space_info->bytes_pinned + space_info->bytes_readonly +
space_info->bytes_may_use; space_info->bytes_may_use;
if (can_overcommit(root, space_info, 1024 * 1024, if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
BTRFS_RESERVE_FLUSH_ALL))
expected = div_factor_fine(space_info->total_bytes, 95); expected = div_factor_fine(space_info->total_bytes, 95);
else else
expected = div_factor_fine(space_info->total_bytes, 90); expected = div_factor_fine(space_info->total_bytes, 90);
...@@ -5352,7 +5349,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) ...@@ -5352,7 +5349,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
spin_lock(&sinfo->lock); spin_lock(&sinfo->lock);
spin_lock(&block_rsv->lock); spin_lock(&block_rsv->lock);
block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024); block_rsv->size = min_t(u64, num_bytes, SZ_512M);
num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
sinfo->bytes_reserved + sinfo->bytes_readonly + sinfo->bytes_reserved + sinfo->bytes_readonly +
...@@ -6256,11 +6253,11 @@ fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info, ...@@ -6256,11 +6253,11 @@ fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
return ret; return ret;
if (ssd) if (ssd)
*empty_cluster = 2 * 1024 * 1024; *empty_cluster = SZ_2M;
if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
ret = &root->fs_info->meta_alloc_cluster; ret = &root->fs_info->meta_alloc_cluster;
if (!ssd) if (!ssd)
*empty_cluster = 64 * 1024; *empty_cluster = SZ_64K;
} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) { } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
ret = &root->fs_info->data_alloc_cluster; ret = &root->fs_info->data_alloc_cluster;
} }
...@@ -9172,7 +9169,7 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) ...@@ -9172,7 +9169,7 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
if ((sinfo->flags & if ((sinfo->flags &
(BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) && (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
!force) !force)
min_allocable_bytes = 1 * 1024 * 1024; min_allocable_bytes = SZ_1M;
else else
min_allocable_bytes = 0; min_allocable_bytes = 0;
......
...@@ -4292,7 +4292,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, ...@@ -4292,7 +4292,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
u64 end = start + PAGE_CACHE_SIZE - 1; u64 end = start + PAGE_CACHE_SIZE - 1;
if (gfpflags_allow_blocking(mask) && if (gfpflags_allow_blocking(mask) &&
page->mapping->host->i_size > 16 * 1024 * 1024) { page->mapping->host->i_size > SZ_16M) {
u64 len; u64 len;
while (start <= end) { while (start <= end) {
len = end - start + 1; len = end - start + 1;
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "volumes.h" #include "volumes.h"
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
#define MAX_CACHE_BYTES_PER_GIG (32 * 1024) #define MAX_CACHE_BYTES_PER_GIG SZ_32K
struct btrfs_trim_range { struct btrfs_trim_range {
u64 start; u64 start;
...@@ -1086,14 +1086,11 @@ write_pinned_extent_entries(struct btrfs_root *root, ...@@ -1086,14 +1086,11 @@ write_pinned_extent_entries(struct btrfs_root *root,
static noinline_for_stack int static noinline_for_stack int
write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list) write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
{ {
struct list_head *pos, *n; struct btrfs_free_space *entry, *next;
int ret; int ret;
/* Write out the bitmaps */ /* Write out the bitmaps */
list_for_each_safe(pos, n, bitmap_list) { list_for_each_entry_safe(entry, next, bitmap_list, list) {
struct btrfs_free_space *entry =
list_entry(pos, struct btrfs_free_space, list);
ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
if (ret) if (ret)
return -ENOSPC; return -ENOSPC;
...@@ -1119,13 +1116,10 @@ static int flush_dirty_cache(struct inode *inode) ...@@ -1119,13 +1116,10 @@ static int flush_dirty_cache(struct inode *inode)
static void noinline_for_stack static void noinline_for_stack
cleanup_bitmap_list(struct list_head *bitmap_list) cleanup_bitmap_list(struct list_head *bitmap_list)
{ {
struct list_head *pos, *n; struct btrfs_free_space *entry, *next;
list_for_each_safe(pos, n, bitmap_list) { list_for_each_entry_safe(entry, next, bitmap_list, list)
struct btrfs_free_space *entry =
list_entry(pos, struct btrfs_free_space, list);
list_del_init(&entry->list); list_del_init(&entry->list);
}
} }
static void noinline_for_stack static void noinline_for_stack
...@@ -1656,11 +1650,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) ...@@ -1656,11 +1650,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
* at or below 32k, so we need to adjust how much memory we allow to be * at or below 32k, so we need to adjust how much memory we allow to be
* used by extent based free space tracking * used by extent based free space tracking
*/ */
if (size < 1024 * 1024 * 1024) if (size < SZ_1G)
max_bytes = MAX_CACHE_BYTES_PER_GIG; max_bytes = MAX_CACHE_BYTES_PER_GIG;
else else
max_bytes = MAX_CACHE_BYTES_PER_GIG * max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
div_u64(size, 1024 * 1024 * 1024);
/* /*
* we want to account for 1 more bitmap than what we have so we can make * we want to account for 1 more bitmap than what we have so we can make
...@@ -2489,8 +2482,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) ...@@ -2489,8 +2482,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
* track of free space, and if we pass 1/2 of that we want to * track of free space, and if we pass 1/2 of that we want to
* start converting things over to using bitmaps * start converting things over to using bitmaps
*/ */
ctl->extents_thresh = ((1024 * 32) / 2) / ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
sizeof(struct btrfs_free_space);
} }
/* /*
......
...@@ -282,7 +282,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root) ...@@ -282,7 +282,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
} }
} }
#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space)) #define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space))
#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
/* /*
......
...@@ -420,15 +420,15 @@ static noinline void compress_file_range(struct inode *inode, ...@@ -420,15 +420,15 @@ static noinline void compress_file_range(struct inode *inode,
unsigned long nr_pages_ret = 0; unsigned long nr_pages_ret = 0;
unsigned long total_compressed = 0; unsigned long total_compressed = 0;
unsigned long total_in = 0; unsigned long total_in = 0;
unsigned long max_compressed = 128 * 1024; unsigned long max_compressed = SZ_128K;
unsigned long max_uncompressed = 128 * 1024; unsigned long max_uncompressed = SZ_128K;
int i; int i;
int will_compress; int will_compress;
int compress_type = root->fs_info->compress_type; int compress_type = root->fs_info->compress_type;
int redirty = 0; int redirty = 0;
/* if this is a small write inside eof, kick off a defrag */ /* if this is a small write inside eof, kick off a defrag */
if ((end - start + 1) < 16 * 1024 && if ((end - start + 1) < SZ_16K &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(NULL, inode); btrfs_add_inode_defrag(NULL, inode);
...@@ -436,7 +436,7 @@ static noinline void compress_file_range(struct inode *inode, ...@@ -436,7 +436,7 @@ static noinline void compress_file_range(struct inode *inode,
again: again:
will_compress = 0; will_compress = 0;
nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE);
/* /*
* we don't want to send crud past the end of i_size through * we don't want to send crud past the end of i_size through
...@@ -950,7 +950,7 @@ static noinline int cow_file_range(struct inode *inode, ...@@ -950,7 +950,7 @@ static noinline int cow_file_range(struct inode *inode,
disk_num_bytes = num_bytes; disk_num_bytes = num_bytes;
/* if this is a small write inside eof, kick off defrag */ /* if this is a small write inside eof, kick off defrag */
if (num_bytes < 64 * 1024 && if (num_bytes < SZ_64K &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(NULL, inode); btrfs_add_inode_defrag(NULL, inode);
...@@ -1113,7 +1113,7 @@ static noinline void async_cow_submit(struct btrfs_work *work) ...@@ -1113,7 +1113,7 @@ static noinline void async_cow_submit(struct btrfs_work *work)
* atomic_sub_return implies a barrier for waitqueue_active * atomic_sub_return implies a barrier for waitqueue_active
*/ */
if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
5 * 1024 * 1024 && 5 * SZ_1M &&
waitqueue_active(&root->fs_info->async_submit_wait)) waitqueue_active(&root->fs_info->async_submit_wait))
wake_up(&root->fs_info->async_submit_wait); wake_up(&root->fs_info->async_submit_wait);
...@@ -1138,7 +1138,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, ...@@ -1138,7 +1138,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long nr_pages; unsigned long nr_pages;
u64 cur_end; u64 cur_end;
int limit = 10 * 1024 * 1024; int limit = 10 * SZ_1M;
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1, 0, NULL, GFP_NOFS); 1, 0, NULL, GFP_NOFS);
...@@ -1154,7 +1154,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, ...@@ -1154,7 +1154,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
!btrfs_test_opt(root, FORCE_COMPRESS)) !btrfs_test_opt(root, FORCE_COMPRESS))
cur_end = end; cur_end = end;
else else
cur_end = min(end, start + 512 * 1024 - 1); cur_end = min(end, start + SZ_512K - 1);
async_cow->end = cur_end; async_cow->end = cur_end;
INIT_LIST_HEAD(&async_cow->extents); INIT_LIST_HEAD(&async_cow->extents);
...@@ -4346,7 +4346,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4346,7 +4346,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
* up a huge file in a single leaf. Most of the time that * up a huge file in a single leaf. Most of the time that
* bytes_deleted is > 0, it will be huge by the time we get here * bytes_deleted is > 0, it will be huge by the time we get here
*/ */
if (be_nice && bytes_deleted > 32 * 1024 * 1024) { if (be_nice && bytes_deleted > SZ_32M) {
if (btrfs_should_end_transaction(trans, root)) { if (btrfs_should_end_transaction(trans, root)) {
err = -EAGAIN; err = -EAGAIN;
goto error; goto error;
...@@ -4589,7 +4589,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4589,7 +4589,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
btrfs_free_path(path); btrfs_free_path(path);
if (be_nice && bytes_deleted > 32 * 1024 * 1024) { if (be_nice && bytes_deleted > SZ_32M) {
unsigned long updates = trans->delayed_ref_updates; unsigned long updates = trans->delayed_ref_updates;
if (updates) { if (updates) {
trans->delayed_ref_updates = 0; trans->delayed_ref_updates = 0;
...@@ -5302,7 +5302,6 @@ void btrfs_evict_inode(struct inode *inode) ...@@ -5302,7 +5302,6 @@ void btrfs_evict_inode(struct inode *inode)
no_delete: no_delete:
btrfs_remove_delayed_node(inode); btrfs_remove_delayed_node(inode);
clear_inode(inode); clear_inode(inode);
return;
} }
/* /*
...@@ -6685,7 +6684,7 @@ static int merge_extent_mapping(struct extent_map_tree *em_tree, ...@@ -6685,7 +6684,7 @@ static int merge_extent_mapping(struct extent_map_tree *em_tree,
} }
static noinline int uncompress_inline(struct btrfs_path *path, static noinline int uncompress_inline(struct btrfs_path *path,
struct inode *inode, struct page *page, struct page *page,
size_t pg_offset, u64 extent_offset, size_t pg_offset, u64 extent_offset,
struct btrfs_file_extent_item *item) struct btrfs_file_extent_item *item)
{ {
...@@ -6881,8 +6880,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -6881,8 +6880,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
if (create == 0 && !PageUptodate(page)) { if (create == 0 && !PageUptodate(page)) {
if (btrfs_file_extent_compression(leaf, item) != if (btrfs_file_extent_compression(leaf, item) !=
BTRFS_COMPRESS_NONE) { BTRFS_COMPRESS_NONE) {
ret = uncompress_inline(path, inode, page, ret = uncompress_inline(path, page, pg_offset,
pg_offset,
extent_offset, item); extent_offset, item);
if (ret) { if (ret) {
err = ret; err = ret;
...@@ -9780,7 +9778,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, ...@@ -9780,7 +9778,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
} }
} }
cur_bytes = min(num_bytes, 256ULL * 1024 * 1024); cur_bytes = min_t(u64, num_bytes, SZ_256M);
cur_bytes = max(cur_bytes, min_size); cur_bytes = max(cur_bytes, min_size);
/* /*
* If we are severely fragmented we could end up with really * If we are severely fragmented we could end up with really
......
...@@ -1025,7 +1025,7 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em) ...@@ -1025,7 +1025,7 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
ret = false; ret = false;
else if ((em->block_start + em->block_len == next->block_start) && else if ((em->block_start + em->block_len == next->block_start) &&
(em->block_len > 128 * 1024 && next->block_len > 128 * 1024)) (em->block_len > SZ_128K && next->block_len > SZ_128K))
ret = false; ret = false;
free_extent_map(next); free_extent_map(next);
...@@ -1271,9 +1271,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, ...@@ -1271,9 +1271,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
int defrag_count = 0; int defrag_count = 0;
int compress_type = BTRFS_COMPRESS_ZLIB; int compress_type = BTRFS_COMPRESS_ZLIB;
u32 extent_thresh = range->extent_thresh; u32 extent_thresh = range->extent_thresh;
unsigned long max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT; unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT;
unsigned long cluster = max_cluster; unsigned long cluster = max_cluster;
u64 new_align = ~((u64)128 * 1024 - 1); u64 new_align = ~((u64)SZ_128K - 1);
struct page **pages = NULL; struct page **pages = NULL;
if (isize == 0) if (isize == 0)
...@@ -1290,7 +1290,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, ...@@ -1290,7 +1290,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
} }
if (extent_thresh == 0) if (extent_thresh == 0)
extent_thresh = 256 * 1024; extent_thresh = SZ_256K;
/* /*
* if we were not given a file, allocate a readahead * if we were not given a file, allocate a readahead
...@@ -1322,7 +1322,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, ...@@ -1322,7 +1322,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (newer_than) { if (newer_than) {
ret = find_new_extents(root, inode, newer_than, ret = find_new_extents(root, inode, newer_than,
&newer_off, 64 * 1024); &newer_off, SZ_64K);
if (!ret) { if (!ret) {
range->start = newer_off; range->start = newer_off;
/* /*
...@@ -1412,9 +1412,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, ...@@ -1412,9 +1412,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
newer_off = max(newer_off + 1, newer_off = max(newer_off + 1,
(u64)i << PAGE_CACHE_SHIFT); (u64)i << PAGE_CACHE_SHIFT);
ret = find_new_extents(root, inode, ret = find_new_extents(root, inode, newer_than,
newer_than, &newer_off, &newer_off, SZ_64K);
64 * 1024);
if (!ret) { if (!ret) {
range->start = newer_off; range->start = newer_off;
i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
...@@ -1580,7 +1579,7 @@ static noinline int btrfs_ioctl_resize(struct file *file, ...@@ -1580,7 +1579,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
new_size = old_size + new_size; new_size = old_size + new_size;
} }
if (new_size < 256 * 1024 * 1024) { if (new_size < SZ_256M) {
ret = -EINVAL; ret = -EINVAL;
goto out_free; goto out_free;
} }
...@@ -2169,7 +2168,7 @@ static noinline int btrfs_ioctl_tree_search_v2(struct file *file, ...@@ -2169,7 +2168,7 @@ static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
struct inode *inode; struct inode *inode;
int ret; int ret;
size_t buf_size; size_t buf_size;
const size_t buf_limit = 16 * 1024 * 1024; const size_t buf_limit = SZ_16M;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
...@@ -3105,7 +3104,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, ...@@ -3105,7 +3104,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
return ret; return ret;
} }
#define BTRFS_MAX_DEDUPE_LEN (16 * 1024 * 1024) #define BTRFS_MAX_DEDUPE_LEN SZ_16M
static long btrfs_ioctl_file_extent_same(struct file *file, static long btrfs_ioctl_file_extent_same(struct file *file,
struct btrfs_ioctl_same_args __user *argp) struct btrfs_ioctl_same_args __user *argp)
...@@ -4533,7 +4532,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, ...@@ -4533,7 +4532,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
goto out; goto out;
} }
size = min_t(u32, loi->size, 64 * 1024); size = min_t(u32, loi->size, SZ_64K);
inodes = init_data_container(size); inodes = init_data_container(size);
if (IS_ERR(inodes)) { if (IS_ERR(inodes)) {
ret = PTR_ERR(inodes); ret = PTR_ERR(inodes);
......
...@@ -56,7 +56,6 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) ...@@ -56,7 +56,6 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
atomic_dec(&eb->spinning_readers); atomic_dec(&eb->spinning_readers);
read_unlock(&eb->lock); read_unlock(&eb->lock);
} }
return;
} }
/* /*
...@@ -96,7 +95,6 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) ...@@ -96,7 +95,6 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
waitqueue_active(&eb->read_lock_wq)) waitqueue_active(&eb->read_lock_wq))
wake_up(&eb->read_lock_wq); wake_up(&eb->read_lock_wq);
} }
return;
} }
/* /*
......
...@@ -503,7 +503,6 @@ static void cache_rbio(struct btrfs_raid_bio *rbio) ...@@ -503,7 +503,6 @@ static void cache_rbio(struct btrfs_raid_bio *rbio)
} }
spin_unlock_irqrestore(&table->cache_lock, flags); spin_unlock_irqrestore(&table->cache_lock, flags);
return;
} }
/* /*
...@@ -906,7 +905,6 @@ static void raid_write_end_io(struct bio *bio) ...@@ -906,7 +905,6 @@ static void raid_write_end_io(struct bio *bio)
err = -EIO; err = -EIO;
rbio_orig_end_io(rbio, err); rbio_orig_end_io(rbio, err);
return;
} }
/* /*
......
...@@ -1514,8 +1514,6 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, ...@@ -1514,8 +1514,6 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
if (sblock->no_io_error_seen) if (sblock->no_io_error_seen)
scrub_recheck_block_checksum(sblock); scrub_recheck_block_checksum(sblock);
return;
} }
static inline int scrub_check_fsid(u8 fsid[], static inline int scrub_check_fsid(u8 fsid[],
......
...@@ -22,8 +22,8 @@ ...@@ -22,8 +22,8 @@
#define BTRFS_SEND_STREAM_MAGIC "btrfs-stream" #define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
#define BTRFS_SEND_STREAM_VERSION 1 #define BTRFS_SEND_STREAM_VERSION 1
#define BTRFS_SEND_BUF_SIZE (1024 * 64) #define BTRFS_SEND_BUF_SIZE SZ_64K
#define BTRFS_SEND_READ_SIZE (1024 * 48) #define BTRFS_SEND_READ_SIZE (48 * SZ_1K)
enum btrfs_tlv_type { enum btrfs_tlv_type {
BTRFS_TLV_U8, BTRFS_TLV_U8,
......
...@@ -1900,7 +1900,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) ...@@ -1900,7 +1900,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
* btrfs starts at an offset of at least 1MB when doing chunk * btrfs starts at an offset of at least 1MB when doing chunk
* allocation. * allocation.
*/ */
skip_space = 1024 * 1024; skip_space = SZ_1M;
/* user can set the offset in fs_info->alloc_start. */ /* user can set the offset in fs_info->alloc_start. */
if (fs_info->alloc_start && if (fs_info->alloc_start &&
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sizes.h>
#include "btrfs-tests.h" #include "btrfs-tests.h"
#include "../extent_io.h" #include "../extent_io.h"
...@@ -71,8 +72,8 @@ static int test_find_delalloc(void) ...@@ -71,8 +72,8 @@ static int test_find_delalloc(void)
struct page *page; struct page *page;
struct page *locked_page = NULL; struct page *locked_page = NULL;
unsigned long index = 0; unsigned long index = 0;
u64 total_dirty = 256 * 1024 * 1024; u64 total_dirty = SZ_256M;
u64 max_bytes = 128 * 1024 * 1024; u64 max_bytes = SZ_128M;
u64 start, end, test_start; u64 start, end, test_start;
u64 found; u64 found;
int ret = -EINVAL; int ret = -EINVAL;
...@@ -136,7 +137,7 @@ static int test_find_delalloc(void) ...@@ -136,7 +137,7 @@ static int test_find_delalloc(void)
* |--- delalloc ---| * |--- delalloc ---|
* |--- search ---| * |--- search ---|
*/ */
test_start = 64 * 1024 * 1024; test_start = SZ_64M;
locked_page = find_lock_page(inode->i_mapping, locked_page = find_lock_page(inode->i_mapping,
test_start >> PAGE_CACHE_SHIFT); test_start >> PAGE_CACHE_SHIFT);
if (!locked_page) { if (!locked_page) {
...@@ -223,8 +224,8 @@ static int test_find_delalloc(void) ...@@ -223,8 +224,8 @@ static int test_find_delalloc(void)
* Now to test where we run into a page that is no longer dirty in the * Now to test where we run into a page that is no longer dirty in the
* range we want to find. * range we want to find.
*/ */
page = find_get_page(inode->i_mapping, (max_bytes + (1 * 1024 * 1024)) page = find_get_page(inode->i_mapping,
>> PAGE_CACHE_SHIFT); (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT);
if (!page) { if (!page) {
test_msg("Couldn't find our page\n"); test_msg("Couldn't find our page\n");
goto out_bits; goto out_bits;
......
此差异已折叠。
...@@ -100,7 +100,7 @@ static void insert_inode_item_key(struct btrfs_root *root) ...@@ -100,7 +100,7 @@ static void insert_inode_item_key(struct btrfs_root *root)
static void setup_file_extents(struct btrfs_root *root) static void setup_file_extents(struct btrfs_root *root)
{ {
int slot = 0; int slot = 0;
u64 disk_bytenr = 1 * 1024 * 1024; u64 disk_bytenr = SZ_1M;
u64 offset = 0; u64 offset = 0;
/* First we want a hole */ /* First we want a hole */
......
...@@ -651,17 +651,20 @@ struct btrfs_trans_handle *btrfs_start_transaction_lflush( ...@@ -651,17 +651,20 @@ struct btrfs_trans_handle *btrfs_start_transaction_lflush(
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
{ {
return start_transaction(root, 0, TRANS_JOIN, 0); return start_transaction(root, 0, TRANS_JOIN,
BTRFS_RESERVE_NO_FLUSH);
} }
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
{ {
return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0); return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
BTRFS_RESERVE_NO_FLUSH);
} }
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
{ {
return start_transaction(root, 0, TRANS_USERSPACE, 0); return start_transaction(root, 0, TRANS_USERSPACE,
BTRFS_RESERVE_NO_FLUSH);
} }
/* /*
...@@ -679,7 +682,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root ...@@ -679,7 +682,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root
*/ */
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
{ {
return start_transaction(root, 0, TRANS_ATTACH, 0); return start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH);
} }
/* /*
...@@ -694,7 +698,8 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root) ...@@ -694,7 +698,8 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
{ {
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
trans = start_transaction(root, 0, TRANS_ATTACH, 0); trans = start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH);
if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT) if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
btrfs_wait_for_commit(root, 0); btrfs_wait_for_commit(root, 0);
......
...@@ -1407,7 +1407,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans, ...@@ -1407,7 +1407,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
* we don't want to overwrite the superblock on the drive, * we don't want to overwrite the superblock on the drive,
* so we make sure to start at an offset of at least 1MB * so we make sure to start at an offset of at least 1MB
*/ */
search_start = max(root->fs_info->alloc_start, 1024ull * 1024); search_start = max_t(u64, root->fs_info->alloc_start, SZ_1M);
return find_free_dev_extent_start(trans->transaction, device, return find_free_dev_extent_start(trans->transaction, device,
num_bytes, search_start, start, len); num_bytes, search_start, start, len);
} }
...@@ -1643,7 +1643,6 @@ static void update_dev_time(char *path_name) ...@@ -1643,7 +1643,6 @@ static void update_dev_time(char *path_name)
return; return;
file_update_time(filp); file_update_time(filp);
filp_close(filp, NULL); filp_close(filp, NULL);
return;
} }
static int btrfs_rm_dev_item(struct btrfs_root *root, static int btrfs_rm_dev_item(struct btrfs_root *root,
...@@ -3407,7 +3406,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) ...@@ -3407,7 +3406,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
list_for_each_entry(device, devices, dev_list) { list_for_each_entry(device, devices, dev_list) {
old_size = btrfs_device_get_total_bytes(device); old_size = btrfs_device_get_total_bytes(device);
size_to_free = div_factor(old_size, 1); size_to_free = div_factor(old_size, 1);
size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); size_to_free = min_t(u64, size_to_free, SZ_1M);
if (!device->writeable || if (!device->writeable ||
btrfs_device_get_total_bytes(device) - btrfs_device_get_total_bytes(device) -
btrfs_device_get_bytes_used(device) > size_to_free || btrfs_device_get_bytes_used(device) > size_to_free ||
...@@ -4460,7 +4459,7 @@ static int btrfs_cmp_device_info(const void *a, const void *b) ...@@ -4460,7 +4459,7 @@ static int btrfs_cmp_device_info(const void *a, const void *b)
static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target) static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
{ {
/* TODO allow them to set a preferred stripe size */ /* TODO allow them to set a preferred stripe size */
return 64 * 1024; return SZ_64K;
} }
static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
...@@ -4528,21 +4527,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, ...@@ -4528,21 +4527,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
ncopies = btrfs_raid_array[index].ncopies; ncopies = btrfs_raid_array[index].ncopies;
if (type & BTRFS_BLOCK_GROUP_DATA) { if (type & BTRFS_BLOCK_GROUP_DATA) {
max_stripe_size = 1024 * 1024 * 1024; max_stripe_size = SZ_1G;
max_chunk_size = 10 * max_stripe_size; max_chunk_size = 10 * max_stripe_size;
if (!devs_max) if (!devs_max)
devs_max = BTRFS_MAX_DEVS(info->chunk_root); devs_max = BTRFS_MAX_DEVS(info->chunk_root);
} else if (type & BTRFS_BLOCK_GROUP_METADATA) { } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
/* for larger filesystems, use larger metadata chunks */ /* for larger filesystems, use larger metadata chunks */
if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024) if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
max_stripe_size = 1024 * 1024 * 1024; max_stripe_size = SZ_1G;
else else
max_stripe_size = 256 * 1024 * 1024; max_stripe_size = SZ_256M;
max_chunk_size = max_stripe_size; max_chunk_size = max_stripe_size;
if (!devs_max) if (!devs_max)
devs_max = BTRFS_MAX_DEVS(info->chunk_root); devs_max = BTRFS_MAX_DEVS(info->chunk_root);
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
max_stripe_size = 32 * 1024 * 1024; max_stripe_size = SZ_32M;
max_chunk_size = 2 * max_stripe_size; max_chunk_size = 2 * max_stripe_size;
if (!devs_max) if (!devs_max)
devs_max = BTRFS_MAX_DEVS_SYS_CHUNK; devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
extern struct mutex uuid_mutex; extern struct mutex uuid_mutex;
#define BTRFS_STRIPE_LEN (64 * 1024) #define BTRFS_STRIPE_LEN SZ_64K
struct buffer_head; struct buffer_head;
struct btrfs_pending_bios { struct btrfs_pending_bios {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册