提交 8c27cb35 编写于 作者: L Linus Torvalds

Merge branch 'for-4.13-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs updates from David Sterba:
 "The core updates improve error handling (mostly related to bios), with
  the usual incremental work on the GFP_NOFS (mis)use removal,
  refactoring or cleanups. Except the two top patches, all have been in
  for-next for an extensive amount of time.

  User visible changes:

   - statx support

   - quota override tunable

   - improved compression thresholds

   - obsoleted mount option alloc_start

  Core updates:

   - bio-related updates:
       - faster bio cloning
       - no allocation failures
       - preallocated flush bios

   - more kvzalloc use, memalloc_nofs protections, GFP_NOFS updates

   - prep work for btree_inode removal

   - dir-item validation

   - qgoup fixes and updates

   - cleanups:
       - removed unused struct members, unused code, refactoring
       - argument refactoring (fs_info/root, caller -> callee sink)
       - SEARCH_TREE ioctl docs"

* 'for-4.13-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (115 commits)
  btrfs: Remove false alert when fiemap range is smaller than on-disk extent
  btrfs: Don't clear SGID when inheriting ACLs
  btrfs: fix integer overflow in calc_reclaim_items_nr
  btrfs: scrub: fix target device intialization while setting up scrub context
  btrfs: qgroup: Fix qgroup reserved space underflow by only freeing reserved ranges
  btrfs: qgroup: Introduce extent changeset for qgroup reserve functions
  btrfs: qgroup: Fix qgroup reserved space underflow caused by buffered write and quotas being enabled
  btrfs: qgroup: Return actually freed bytes for qgroup release or free data
  btrfs: qgroup: Cleanup btrfs_qgroup_prepare_account_extents function
  btrfs: qgroup: Add quick exit for non-fs extents
  Btrfs: rework delayed ref total_bytes_pinned accounting
  Btrfs: return old and new total ref mods when adding delayed refs
  Btrfs: always account pinned bytes when dropping a tree block ref
  Btrfs: update total_bytes_pinned when pinning down extents
  Btrfs: make BUG_ON() in add_pinned_bytes() an ASSERT()
  Btrfs: make add_pinned_bytes() take an s64 num_bytes instead of u64
  btrfs: fix validation of XATTR_ITEM dir items
  btrfs: Verify dir_item in iterate_object_props
  btrfs: Check name_len before in btrfs_del_root_ref
  btrfs: Check name_len before reading btrfs_get_name
  ...
...@@ -78,12 +78,6 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans, ...@@ -78,12 +78,6 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
switch (type) { switch (type) {
case ACL_TYPE_ACCESS: case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS; name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (ret)
return ret;
}
ret = 0;
break; break;
case ACL_TYPE_DEFAULT: case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode)) if (!S_ISDIR(inode->i_mode))
...@@ -119,6 +113,13 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans, ...@@ -119,6 +113,13 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{ {
int ret;
if (type == ACL_TYPE_ACCESS && acl) {
ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (ret)
return ret;
}
return __btrfs_set_acl(NULL, inode, acl, type); return __btrfs_set_acl(NULL, inode, acl, type);
} }
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
* Boston, MA 021110-1307, USA. * Boston, MA 021110-1307, USA.
*/ */
#include <linux/vmalloc.h> #include <linux/mm.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include "ctree.h" #include "ctree.h"
#include "disk-io.h" #include "disk-io.h"
...@@ -2305,7 +2305,7 @@ struct btrfs_data_container *init_data_container(u32 total_bytes) ...@@ -2305,7 +2305,7 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
size_t alloc_bytes; size_t alloc_bytes;
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
data = vmalloc(alloc_bytes); data = kvmalloc(alloc_bytes, GFP_KERNEL);
if (!data) if (!data)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -2339,9 +2339,9 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, ...@@ -2339,9 +2339,9 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
if (IS_ERR(fspath)) if (IS_ERR(fspath))
return (void *)fspath; return (void *)fspath;
ifp = kmalloc(sizeof(*ifp), GFP_NOFS); ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
if (!ifp) { if (!ifp) {
vfree(fspath); kvfree(fspath);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -2356,6 +2356,6 @@ void free_ipath(struct inode_fs_paths *ipath) ...@@ -2356,6 +2356,6 @@ void free_ipath(struct inode_fs_paths *ipath)
{ {
if (!ipath) if (!ipath)
return; return;
vfree(ipath->fspath); kvfree(ipath->fspath);
kfree(ipath); kfree(ipath);
} }
...@@ -94,7 +94,7 @@ ...@@ -94,7 +94,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/genhd.h> #include <linux/genhd.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/vmalloc.h> #include <linux/mm.h>
#include <linux/string.h> #include <linux/string.h>
#include "ctree.h" #include "ctree.h"
#include "disk-io.h" #include "disk-io.h"
...@@ -1638,12 +1638,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, ...@@ -1638,12 +1638,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
struct bio *bio; struct bio *bio;
unsigned int j; unsigned int j;
bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); bio = btrfs_io_bio_alloc(num_pages - i);
if (!bio) {
pr_info("btrfsic: bio_alloc() for %u pages failed!\n",
num_pages - i);
return -1;
}
bio->bi_bdev = block_ctx->dev->bdev; bio->bi_bdev = block_ctx->dev->bdev;
bio->bi_iter.bi_sector = dev_bytenr >> 9; bio->bi_iter.bi_sector = dev_bytenr >> 9;
bio_set_op_attrs(bio, REQ_OP_READ, 0); bio_set_op_attrs(bio, REQ_OP_READ, 0);
...@@ -1668,14 +1663,8 @@ static int btrfsic_read_block(struct btrfsic_state *state, ...@@ -1668,14 +1663,8 @@ static int btrfsic_read_block(struct btrfsic_state *state,
dev_bytenr += (j - i) * PAGE_SIZE; dev_bytenr += (j - i) * PAGE_SIZE;
i = j; i = j;
} }
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++)
block_ctx->datav[i] = kmap(block_ctx->pagev[i]); block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
if (!block_ctx->datav[i]) {
pr_info("btrfsic: kmap() failed (dev %s)!\n",
block_ctx->dev->name);
return -1;
}
}
return block_ctx->len; return block_ctx->len;
} }
...@@ -2822,44 +2811,47 @@ static void __btrfsic_submit_bio(struct bio *bio) ...@@ -2822,44 +2811,47 @@ static void __btrfsic_submit_bio(struct bio *bio)
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev); dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
if (NULL != dev_state && if (NULL != dev_state &&
(bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) { (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
unsigned int i; unsigned int i = 0;
u64 dev_bytenr; u64 dev_bytenr;
u64 cur_bytenr; u64 cur_bytenr;
struct bio_vec *bvec; struct bio_vec bvec;
struct bvec_iter iter;
int bio_is_patched; int bio_is_patched;
char **mapped_datav; char **mapped_datav;
unsigned int segs = bio_segments(bio);
dev_bytenr = 512 * bio->bi_iter.bi_sector; dev_bytenr = 512 * bio->bi_iter.bi_sector;
bio_is_patched = 0; bio_is_patched = 0;
if (dev_state->state->print_mask & if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
bio_op(bio), bio->bi_opf, bio->bi_vcnt, bio_op(bio), bio->bi_opf, segs,
(unsigned long long)bio->bi_iter.bi_sector, (unsigned long long)bio->bi_iter.bi_sector,
dev_bytenr, bio->bi_bdev); dev_bytenr, bio->bi_bdev);
mapped_datav = kmalloc_array(bio->bi_vcnt, mapped_datav = kmalloc_array(segs,
sizeof(*mapped_datav), GFP_NOFS); sizeof(*mapped_datav), GFP_NOFS);
if (!mapped_datav) if (!mapped_datav)
goto leave; goto leave;
cur_bytenr = dev_bytenr; cur_bytenr = dev_bytenr;
bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment(bvec, bio, iter) {
BUG_ON(bvec->bv_len != PAGE_SIZE); BUG_ON(bvec.bv_len != PAGE_SIZE);
mapped_datav[i] = kmap(bvec->bv_page); mapped_datav[i] = kmap(bvec.bv_page);
i++;
if (dev_state->state->print_mask & if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE) BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n", pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
i, cur_bytenr, bvec->bv_len, bvec->bv_offset); i, cur_bytenr, bvec.bv_len, bvec.bv_offset);
cur_bytenr += bvec->bv_len; cur_bytenr += bvec.bv_len;
} }
btrfsic_process_written_block(dev_state, dev_bytenr, btrfsic_process_written_block(dev_state, dev_bytenr,
mapped_datav, bio->bi_vcnt, mapped_datav, segs,
bio, &bio_is_patched, bio, &bio_is_patched,
NULL, bio->bi_opf); NULL, bio->bi_opf);
bio_for_each_segment_all(bvec, bio, i) bio_for_each_segment(bvec, bio, iter)
kunmap(bvec->bv_page); kunmap(bvec.bv_page);
kfree(mapped_datav); kfree(mapped_datav);
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) { } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask & if (dev_state->state->print_mask &
...@@ -2923,13 +2915,10 @@ int btrfsic_mount(struct btrfs_fs_info *fs_info, ...@@ -2923,13 +2915,10 @@ int btrfsic_mount(struct btrfs_fs_info *fs_info,
fs_info->sectorsize, PAGE_SIZE); fs_info->sectorsize, PAGE_SIZE);
return -1; return -1;
} }
state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); state = kvzalloc(sizeof(*state), GFP_KERNEL);
if (!state) { if (!state) {
state = vzalloc(sizeof(*state)); pr_info("btrfs check-integrity: allocation failed!\n");
if (!state) { return -1;
pr_info("btrfs check-integrity: vzalloc() failed!\n");
return -1;
}
} }
if (!btrfsic_is_initialized) { if (!btrfsic_is_initialized) {
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/bit_spinlock.h> #include <linux/bit_spinlock.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sched/mm.h>
#include "ctree.h" #include "ctree.h"
#include "disk-io.h" #include "disk-io.h"
#include "transaction.h" #include "transaction.h"
...@@ -42,48 +43,7 @@ ...@@ -42,48 +43,7 @@
#include "extent_io.h" #include "extent_io.h"
#include "extent_map.h" #include "extent_map.h"
struct compressed_bio { static int btrfs_decompress_bio(struct compressed_bio *cb);
/* number of bios pending for this compressed extent */
refcount_t pending_bios;
/* the pages with the compressed data on them */
struct page **compressed_pages;
/* inode that owns this data */
struct inode *inode;
/* starting offset in the inode for our pages */
u64 start;
/* number of bytes in the inode we're working on */
unsigned long len;
/* number of bytes on disk */
unsigned long compressed_len;
/* the compression algorithm for this bio */
int compress_type;
/* number of compressed pages in the array */
unsigned long nr_pages;
/* IO errors */
int errors;
int mirror_num;
/* for reads, this is the bio we are copying the data into */
struct bio *orig_bio;
/*
* the start of a variable length array of checksums only
* used by reads
*/
u32 sums;
};
static int btrfs_decompress_bio(int type, struct page **pages_in,
u64 disk_start, struct bio *orig_bio,
size_t srclen);
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
unsigned long disk_size) unsigned long disk_size)
...@@ -94,12 +54,6 @@ static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, ...@@ -94,12 +54,6 @@ static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size; (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
} }
static struct bio *compressed_bio_alloc(struct block_device *bdev,
u64 first_byte, gfp_t gfp_flags)
{
return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags);
}
static int check_compressed_csum(struct btrfs_inode *inode, static int check_compressed_csum(struct btrfs_inode *inode,
struct compressed_bio *cb, struct compressed_bio *cb,
u64 disk_start) u64 disk_start)
...@@ -173,11 +127,8 @@ static void end_compressed_bio_read(struct bio *bio) ...@@ -173,11 +127,8 @@ static void end_compressed_bio_read(struct bio *bio)
/* ok, we're the last bio for this extent, lets start /* ok, we're the last bio for this extent, lets start
* the decompression. * the decompression.
*/ */
ret = btrfs_decompress_bio(cb->compress_type, ret = btrfs_decompress_bio(cb);
cb->compressed_pages,
cb->start,
cb->orig_bio,
cb->compressed_len);
csum_failed: csum_failed:
if (ret) if (ret)
cb->errors = 1; cb->errors = 1;
...@@ -355,11 +306,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, ...@@ -355,11 +306,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bdev = fs_info->fs_devices->latest_bdev; bdev = fs_info->fs_devices->latest_bdev;
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); bio = btrfs_bio_alloc(bdev, first_byte);
if (!bio) {
kfree(cb);
return BLK_STS_RESOURCE;
}
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = cb; bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write; bio->bi_end_io = end_compressed_bio_write;
...@@ -406,8 +353,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, ...@@ -406,8 +353,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_put(bio); bio_put(bio);
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); bio = btrfs_bio_alloc(bdev, first_byte);
BUG_ON(!bio);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = cb; bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write; bio->bi_end_io = end_compressed_bio_write;
...@@ -650,9 +596,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -650,9 +596,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
/* include any pages we added in add_ra-bio_pages */ /* include any pages we added in add_ra-bio_pages */
cb->len = bio->bi_iter.bi_size; cb->len = bio->bi_iter.bi_size;
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
if (!comp_bio)
goto fail2;
bio_set_op_attrs (comp_bio, REQ_OP_READ, 0); bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
comp_bio->bi_private = cb; comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read; comp_bio->bi_end_io = end_compressed_bio_read;
...@@ -703,9 +647,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -703,9 +647,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_put(comp_bio); bio_put(comp_bio);
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
GFP_NOFS);
BUG_ON(!comp_bio);
bio_set_op_attrs(comp_bio, REQ_OP_READ, 0); bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
comp_bio->bi_private = cb; comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read; comp_bio->bi_end_io = end_compressed_bio_read;
...@@ -801,6 +743,7 @@ static struct list_head *find_workspace(int type) ...@@ -801,6 +743,7 @@ static struct list_head *find_workspace(int type)
struct list_head *workspace; struct list_head *workspace;
int cpus = num_online_cpus(); int cpus = num_online_cpus();
int idx = type - 1; int idx = type - 1;
unsigned nofs_flag;
struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
...@@ -830,7 +773,15 @@ static struct list_head *find_workspace(int type) ...@@ -830,7 +773,15 @@ static struct list_head *find_workspace(int type)
atomic_inc(total_ws); atomic_inc(total_ws);
spin_unlock(ws_lock); spin_unlock(ws_lock);
/*
* Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
* to turn it off here because we might get called from the restricted
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
workspace = btrfs_compress_op[idx]->alloc_workspace(); workspace = btrfs_compress_op[idx]->alloc_workspace();
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) { if (IS_ERR(workspace)) {
atomic_dec(total_ws); atomic_dec(total_ws);
wake_up(ws_wait); wake_up(ws_wait);
...@@ -961,19 +912,16 @@ int btrfs_compress_pages(int type, struct address_space *mapping, ...@@ -961,19 +912,16 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
* be contiguous. They all correspond to the range of bytes covered by * be contiguous. They all correspond to the range of bytes covered by
* the compressed extent. * the compressed extent.
*/ */
static int btrfs_decompress_bio(int type, struct page **pages_in, static int btrfs_decompress_bio(struct compressed_bio *cb)
u64 disk_start, struct bio *orig_bio,
size_t srclen)
{ {
struct list_head *workspace; struct list_head *workspace;
int ret; int ret;
int type = cb->compress_type;
workspace = find_workspace(type); workspace = find_workspace(type);
ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in,
disk_start, orig_bio,
srclen);
free_workspace(type, workspace); free_workspace(type, workspace);
return ret; return ret;
} }
......
...@@ -34,6 +34,45 @@ ...@@ -34,6 +34,45 @@
/* Maximum size of data before compression */ /* Maximum size of data before compression */
#define BTRFS_MAX_UNCOMPRESSED (SZ_128K) #define BTRFS_MAX_UNCOMPRESSED (SZ_128K)
struct compressed_bio {
/* number of bios pending for this compressed extent */
refcount_t pending_bios;
/* the pages with the compressed data on them */
struct page **compressed_pages;
/* inode that owns this data */
struct inode *inode;
/* starting offset in the inode for our pages */
u64 start;
/* number of bytes in the inode we're working on */
unsigned long len;
/* number of bytes on disk */
unsigned long compressed_len;
/* the compression algorithm for this bio */
int compress_type;
/* number of compressed pages in the array */
unsigned long nr_pages;
/* IO errors */
int errors;
int mirror_num;
/* for reads, this is the bio we are copying the data into */
struct bio *orig_bio;
/*
* the start of a variable length array of checksums only
* used by reads
*/
u32 sums;
};
void btrfs_init_compress(void); void btrfs_init_compress(void);
void btrfs_exit_compress(void); void btrfs_exit_compress(void);
...@@ -78,10 +117,7 @@ struct btrfs_compress_op { ...@@ -78,10 +117,7 @@ struct btrfs_compress_op {
unsigned long *total_out); unsigned long *total_out);
int (*decompress_bio)(struct list_head *workspace, int (*decompress_bio)(struct list_head *workspace,
struct page **pages_in, struct compressed_bio *cb);
u64 disk_start,
struct bio *orig_bio,
size_t srclen);
int (*decompress)(struct list_head *workspace, int (*decompress)(struct list_head *workspace,
unsigned char *data_in, unsigned char *data_in,
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/vmalloc.h> #include <linux/mm.h>
#include "ctree.h" #include "ctree.h"
#include "disk-io.h" #include "disk-io.h"
#include "transaction.h" #include "transaction.h"
...@@ -3667,14 +3667,14 @@ static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info, ...@@ -3667,14 +3667,14 @@ static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
/* make room in the right data area */ /* make room in the right data area */
data_end = leaf_data_end(fs_info, right); data_end = leaf_data_end(fs_info, right);
memmove_extent_buffer(right, memmove_extent_buffer(right,
btrfs_leaf_data(right) + data_end - push_space, BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
btrfs_leaf_data(right) + data_end, BTRFS_LEAF_DATA_OFFSET + data_end,
BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
/* copy from the left data area */ /* copy from the left data area */
copy_extent_buffer(right, left, btrfs_leaf_data(right) + copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
btrfs_leaf_data(left) + leaf_data_end(fs_info, left), BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
push_space); push_space);
memmove_extent_buffer(right, btrfs_item_nr_offset(push_items), memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
...@@ -3888,9 +3888,9 @@ static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info, ...@@ -3888,9 +3888,9 @@ static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
btrfs_item_offset_nr(right, push_items - 1); btrfs_item_offset_nr(right, push_items - 1);
copy_extent_buffer(left, right, btrfs_leaf_data(left) + copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
leaf_data_end(fs_info, left) - push_space, leaf_data_end(fs_info, left) - push_space,
btrfs_leaf_data(right) + BTRFS_LEAF_DATA_OFFSET +
btrfs_item_offset_nr(right, push_items - 1), btrfs_item_offset_nr(right, push_items - 1),
push_space); push_space);
old_left_nritems = btrfs_header_nritems(left); old_left_nritems = btrfs_header_nritems(left);
...@@ -3917,9 +3917,9 @@ static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info, ...@@ -3917,9 +3917,9 @@ static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
if (push_items < right_nritems) { if (push_items < right_nritems) {
push_space = btrfs_item_offset_nr(right, push_items - 1) - push_space = btrfs_item_offset_nr(right, push_items - 1) -
leaf_data_end(fs_info, right); leaf_data_end(fs_info, right);
memmove_extent_buffer(right, btrfs_leaf_data(right) + memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
btrfs_leaf_data(right) + BTRFS_LEAF_DATA_OFFSET +
leaf_data_end(fs_info, right), push_space); leaf_data_end(fs_info, right), push_space);
memmove_extent_buffer(right, btrfs_item_nr_offset(0), memmove_extent_buffer(right, btrfs_item_nr_offset(0),
...@@ -4069,8 +4069,8 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans, ...@@ -4069,8 +4069,8 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
nritems * sizeof(struct btrfs_item)); nritems * sizeof(struct btrfs_item));
copy_extent_buffer(right, l, copy_extent_buffer(right, l,
btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(fs_info) - BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
data_copy_size, btrfs_leaf_data(l) + data_copy_size, BTRFS_LEAF_DATA_OFFSET +
leaf_data_end(fs_info, l), data_copy_size); leaf_data_end(fs_info, l), data_copy_size);
rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid); rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
...@@ -4607,8 +4607,8 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info, ...@@ -4607,8 +4607,8 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
/* shift the data */ /* shift the data */
if (from_end) { if (from_end) {
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
data_end + size_diff, btrfs_leaf_data(leaf) + data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
data_end, old_data_start + new_size - data_end); data_end, old_data_start + new_size - data_end);
} else { } else {
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
...@@ -4634,8 +4634,8 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info, ...@@ -4634,8 +4634,8 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
} }
} }
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
data_end + size_diff, btrfs_leaf_data(leaf) + data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
data_end, old_data_start - data_end); data_end, old_data_start - data_end);
offset = btrfs_disk_key_offset(&disk_key); offset = btrfs_disk_key_offset(&disk_key);
...@@ -4707,8 +4707,8 @@ void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path, ...@@ -4707,8 +4707,8 @@ void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
} }
/* shift the data */ /* shift the data */
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
data_end - data_size, btrfs_leaf_data(leaf) + data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
data_end, old_data - data_end); data_end, old_data - data_end);
data_end = old_data; data_end = old_data;
...@@ -4790,8 +4790,8 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, ...@@ -4790,8 +4790,8 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
(nritems - slot) * sizeof(struct btrfs_item)); (nritems - slot) * sizeof(struct btrfs_item));
/* shift the data */ /* shift the data */
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
data_end - total_data, btrfs_leaf_data(leaf) + data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
data_end, old_data - data_end); data_end, old_data - data_end);
data_end = old_data; data_end = old_data;
} }
...@@ -4983,9 +4983,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -4983,9 +4983,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (slot + nr != nritems) { if (slot + nr != nritems) {
int data_end = leaf_data_end(fs_info, leaf); int data_end = leaf_data_end(fs_info, leaf);
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
data_end + dsize, data_end + dsize,
btrfs_leaf_data(leaf) + data_end, BTRFS_LEAF_DATA_OFFSET + data_end,
last_off - data_end); last_off - data_end);
for (i = slot + nr; i < nritems; i++) { for (i = slot + nr; i < nritems; i++) {
......
...@@ -48,7 +48,6 @@ struct btrfs_trans_handle; ...@@ -48,7 +48,6 @@ struct btrfs_trans_handle;
struct btrfs_transaction; struct btrfs_transaction;
struct btrfs_pending_snapshot; struct btrfs_pending_snapshot;
extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_transaction_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep; extern struct kmem_cache *btrfs_path_cachep;
extern struct kmem_cache *btrfs_free_space_cachep; extern struct kmem_cache *btrfs_free_space_cachep;
...@@ -716,6 +715,10 @@ struct btrfs_delayed_root; ...@@ -716,6 +715,10 @@ struct btrfs_delayed_root;
#define BTRFS_FS_BTREE_ERR 11 #define BTRFS_FS_BTREE_ERR 11
#define BTRFS_FS_LOG1_ERR 12 #define BTRFS_FS_LOG1_ERR 12
#define BTRFS_FS_LOG2_ERR 13 #define BTRFS_FS_LOG2_ERR 13
#define BTRFS_FS_QUOTA_OVERRIDE 14
/* Used to record internally whether fs has been frozen */
#define BTRFS_FS_FROZEN 15
/* /*
* Indicate that a whole-filesystem exclusive operation is running * Indicate that a whole-filesystem exclusive operation is running
* (device replace, resize, device add/delete, balance) * (device replace, resize, device add/delete, balance)
...@@ -748,8 +751,7 @@ struct btrfs_fs_info { ...@@ -748,8 +751,7 @@ struct btrfs_fs_info {
struct rb_root block_group_cache_tree; struct rb_root block_group_cache_tree;
/* keep track of unallocated space */ /* keep track of unallocated space */
spinlock_t free_chunk_lock; atomic64_t free_chunk_space;
u64 free_chunk_space;
struct extent_io_tree freed_extents[2]; struct extent_io_tree freed_extents[2];
struct extent_io_tree *pinned_extents; struct extent_io_tree *pinned_extents;
...@@ -797,17 +799,7 @@ struct btrfs_fs_info { ...@@ -797,17 +799,7 @@ struct btrfs_fs_info {
* so it is also safe. * so it is also safe.
*/ */
u64 max_inline; u64 max_inline;
/*
* Protected by ->chunk_mutex and sb->s_umount.
*
* The reason that we use two lock to protect it is because only
* remount and mount operations can change it and these two operations
* are under sb->s_umount, but the read side (chunk allocation) can not
* acquire sb->s_umount or the deadlock would happen. So we use two
* locks to protect it. On the write side, we must acquire two locks,
* and on the read side, we just need acquire one of them.
*/
u64 alloc_start;
struct btrfs_transaction *running_transaction; struct btrfs_transaction *running_transaction;
wait_queue_head_t transaction_throttle; wait_queue_head_t transaction_throttle;
wait_queue_head_t transaction_wait; wait_queue_head_t transaction_wait;
...@@ -1107,9 +1099,6 @@ struct btrfs_fs_info { ...@@ -1107,9 +1099,6 @@ struct btrfs_fs_info {
*/ */
struct list_head pinned_chunks; struct list_head pinned_chunks;
/* Used to record internally whether fs has been frozen */
int fs_frozen;
/* Cached block sizes */ /* Cached block sizes */
u32 nodesize; u32 nodesize;
u32 sectorsize; u32 sectorsize;
...@@ -1277,21 +1266,20 @@ struct btrfs_root { ...@@ -1277,21 +1266,20 @@ struct btrfs_root {
/* For qgroup metadata space reserve */ /* For qgroup metadata space reserve */
atomic64_t qgroup_meta_rsv; atomic64_t qgroup_meta_rsv;
}; };
static inline u32 btrfs_inode_sectorsize(const struct inode *inode) static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
{ {
return btrfs_sb(inode->i_sb)->sectorsize; return btrfs_sb(inode->i_sb)->sectorsize;
} }
static inline u32 __BTRFS_LEAF_DATA_SIZE(u32 blocksize)
{
return blocksize - sizeof(struct btrfs_header);
}
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info) static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
{ {
return __BTRFS_LEAF_DATA_SIZE(info->nodesize);
return info->nodesize - sizeof(struct btrfs_header);
} }
#define BTRFS_LEAF_DATA_OFFSET offsetof(struct btrfs_leaf, items)
static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info) static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
{ {
return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item); return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
...@@ -1553,8 +1541,27 @@ static inline void btrfs_set_##name(type *s, u##bits val) \ ...@@ -1553,8 +1541,27 @@ static inline void btrfs_set_##name(type *s, u##bits val) \
s->member = cpu_to_le##bits(val); \ s->member = cpu_to_le##bits(val); \
} }
static inline u64 btrfs_device_total_bytes(struct extent_buffer *eb,
struct btrfs_dev_item *s)
{
BUILD_BUG_ON(sizeof(u64) !=
sizeof(((struct btrfs_dev_item *)0))->total_bytes);
return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item,
total_bytes));
}
static inline void btrfs_set_device_total_bytes(struct extent_buffer *eb,
struct btrfs_dev_item *s,
u64 val)
{
BUILD_BUG_ON(sizeof(u64) !=
sizeof(((struct btrfs_dev_item *)0))->total_bytes);
WARN_ON(!IS_ALIGNED(val, eb->fs_info->sectorsize));
btrfs_set_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes), val);
}
BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64); BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64);
BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64);
BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64); BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64);
BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32); BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32);
BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32); BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32);
...@@ -2324,10 +2331,6 @@ static inline int btrfs_super_csum_size(struct btrfs_super_block *s) ...@@ -2324,10 +2331,6 @@ static inline int btrfs_super_csum_size(struct btrfs_super_block *s)
return btrfs_csum_sizes[t]; return btrfs_csum_sizes[t];
} }
static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
{
return offsetof(struct btrfs_leaf, items);
}
/* /*
* The leaf data grows from end-to-front in the node. * The leaf data grows from end-to-front in the node.
...@@ -2538,11 +2541,11 @@ BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right, ...@@ -2538,11 +2541,11 @@ BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right,
/* helper function to cast into the data area of the leaf. */ /* helper function to cast into the data area of the leaf. */
#define btrfs_item_ptr(leaf, slot, type) \ #define btrfs_item_ptr(leaf, slot, type) \
((type *)(btrfs_leaf_data(leaf) + \ ((type *)(BTRFS_LEAF_DATA_OFFSET + \
btrfs_item_offset_nr(leaf, slot))) btrfs_item_offset_nr(leaf, slot)))
#define btrfs_item_ptr_offset(leaf, slot) \ #define btrfs_item_ptr_offset(leaf, slot) \
((unsigned long)(btrfs_leaf_data(leaf) + \ ((unsigned long)(BTRFS_LEAF_DATA_OFFSET + \
btrfs_item_offset_nr(leaf, slot))) btrfs_item_offset_nr(leaf, slot)))
static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
...@@ -2680,7 +2683,9 @@ void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); ...@@ -2680,7 +2683,9 @@ void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info); struct btrfs_fs_info *fs_info);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info);
u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info);
u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info); void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
enum btrfs_reserve_flush_enum { enum btrfs_reserve_flush_enum {
...@@ -2703,9 +2708,13 @@ enum btrfs_flush_state { ...@@ -2703,9 +2708,13 @@ enum btrfs_flush_state {
COMMIT_TRANS = 6, COMMIT_TRANS = 6,
}; };
int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len);
int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes); int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len); int btrfs_check_data_free_space(struct inode *inode,
struct extent_changeset **reserved, u64 start, u64 len);
void btrfs_free_reserved_data_space(struct inode *inode,
struct extent_changeset *reserved, u64 start, u64 len);
void btrfs_delalloc_release_space(struct inode *inode,
struct extent_changeset *reserved, u64 start, u64 len);
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
u64 len); u64 len);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
...@@ -2722,8 +2731,8 @@ void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, ...@@ -2722,8 +2731,8 @@ void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv); struct btrfs_block_rsv *rsv);
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes); void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len); int btrfs_delalloc_reserve_space(struct inode *inode,
void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len); struct extent_changeset **reserved, u64 start, u64 len);
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type); unsigned short type);
...@@ -3031,12 +3040,14 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, ...@@ -3031,12 +3040,14 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
const char *name, u16 name_len, const char *name, u16 name_len,
int mod); int mod);
int verify_dir_item(struct btrfs_fs_info *fs_info, int verify_dir_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf, struct extent_buffer *leaf, int slot,
struct btrfs_dir_item *dir_item); struct btrfs_dir_item *dir_item);
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info, struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, struct btrfs_path *path,
const char *name, const char *name,
int name_len); int name_len);
bool btrfs_is_name_len_valid(struct extent_buffer *leaf, int slot,
unsigned long start, u16 name_len);
/* orphan.c */ /* orphan.c */
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
...@@ -3171,6 +3182,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, ...@@ -3171,6 +3182,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
int btrfs_merge_bio_hook(struct page *page, unsigned long offset, int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio, size_t size, struct bio *bio,
unsigned long bio_flags); unsigned long bio_flags);
void btrfs_set_range_writeback(void *private_data, u64 start, u64 end);
int btrfs_page_mkwrite(struct vm_fault *vmf); int btrfs_page_mkwrite(struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page); int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode); void btrfs_evict_inode(struct inode *inode);
......
...@@ -470,7 +470,8 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans, ...@@ -470,7 +470,8 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
static noinline void static noinline void
update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs, update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_node *existing, struct btrfs_delayed_ref_node *existing,
struct btrfs_delayed_ref_node *update) struct btrfs_delayed_ref_node *update,
int *old_ref_mod_ret)
{ {
struct btrfs_delayed_ref_head *existing_ref; struct btrfs_delayed_ref_head *existing_ref;
struct btrfs_delayed_ref_head *ref; struct btrfs_delayed_ref_head *ref;
...@@ -523,6 +524,8 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs, ...@@ -523,6 +524,8 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
* currently, for refs we just added we know we're a-ok. * currently, for refs we just added we know we're a-ok.
*/ */
old_ref_mod = existing_ref->total_ref_mod; old_ref_mod = existing_ref->total_ref_mod;
if (old_ref_mod_ret)
*old_ref_mod_ret = old_ref_mod;
existing->ref_mod += update->ref_mod; existing->ref_mod += update->ref_mod;
existing_ref->total_ref_mod += update->ref_mod; existing_ref->total_ref_mod += update->ref_mod;
...@@ -550,7 +553,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, ...@@ -550,7 +553,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *ref, struct btrfs_delayed_ref_node *ref,
struct btrfs_qgroup_extent_record *qrecord, struct btrfs_qgroup_extent_record *qrecord,
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
int action, int is_data, int *qrecord_inserted_ret) int action, int is_data, int *qrecord_inserted_ret,
int *old_ref_mod, int *new_ref_mod)
{ {
struct btrfs_delayed_ref_head *existing; struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_head *head_ref = NULL; struct btrfs_delayed_ref_head *head_ref = NULL;
...@@ -638,7 +642,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, ...@@ -638,7 +642,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
if (existing) { if (existing) {
WARN_ON(ref_root && reserved && existing->qgroup_ref_root WARN_ON(ref_root && reserved && existing->qgroup_ref_root
&& existing->qgroup_reserved); && existing->qgroup_reserved);
update_existing_head_ref(delayed_refs, &existing->node, ref); update_existing_head_ref(delayed_refs, &existing->node, ref,
old_ref_mod);
/* /*
* we've updated the existing ref, free the newly * we've updated the existing ref, free the newly
* allocated ref * allocated ref
...@@ -646,6 +651,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, ...@@ -646,6 +651,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
head_ref = existing; head_ref = existing;
} else { } else {
if (old_ref_mod)
*old_ref_mod = 0;
if (is_data && count_mod < 0) if (is_data && count_mod < 0)
delayed_refs->pending_csums += num_bytes; delayed_refs->pending_csums += num_bytes;
delayed_refs->num_heads++; delayed_refs->num_heads++;
...@@ -655,6 +662,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, ...@@ -655,6 +662,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
} }
if (qrecord_inserted_ret) if (qrecord_inserted_ret)
*qrecord_inserted_ret = qrecord_inserted; *qrecord_inserted_ret = qrecord_inserted;
if (new_ref_mod)
*new_ref_mod = head_ref->total_ref_mod;
return head_ref; return head_ref;
} }
...@@ -778,7 +787,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, ...@@ -778,7 +787,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent, u64 bytenr, u64 num_bytes, u64 parent,
u64 ref_root, int level, int action, u64 ref_root, int level, int action,
struct btrfs_delayed_extent_op *extent_op) struct btrfs_delayed_extent_op *extent_op,
int *old_ref_mod, int *new_ref_mod)
{ {
struct btrfs_delayed_tree_ref *ref; struct btrfs_delayed_tree_ref *ref;
struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_head *head_ref;
...@@ -813,7 +823,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, ...@@ -813,7 +823,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
*/ */
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
bytenr, num_bytes, 0, 0, action, 0, bytenr, num_bytes, 0, 0, action, 0,
&qrecord_inserted); &qrecord_inserted, old_ref_mod,
new_ref_mod);
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
num_bytes, parent, ref_root, level, action); num_bytes, parent, ref_root, level, action);
...@@ -838,7 +849,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, ...@@ -838,7 +849,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 bytenr, u64 num_bytes,
u64 parent, u64 ref_root, u64 parent, u64 ref_root,
u64 owner, u64 offset, u64 reserved, int action) u64 owner, u64 offset, u64 reserved, int action,
int *old_ref_mod, int *new_ref_mod)
{ {
struct btrfs_delayed_data_ref *ref; struct btrfs_delayed_data_ref *ref;
struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_head *head_ref;
...@@ -878,7 +890,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, ...@@ -878,7 +890,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
*/ */
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
bytenr, num_bytes, ref_root, reserved, bytenr, num_bytes, ref_root, reserved,
action, 1, &qrecord_inserted); action, 1, &qrecord_inserted,
old_ref_mod, new_ref_mod);
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
num_bytes, parent, ref_root, owner, offset, num_bytes, parent, ref_root, owner, offset,
...@@ -909,7 +922,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, ...@@ -909,7 +922,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr, add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
extent_op->is_data, NULL); extent_op->is_data, NULL, NULL, NULL);
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
return 0; return 0;
......
...@@ -247,12 +247,14 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, ...@@ -247,12 +247,14 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent, u64 bytenr, u64 num_bytes, u64 parent,
u64 ref_root, int level, int action, u64 ref_root, int level, int action,
struct btrfs_delayed_extent_op *extent_op); struct btrfs_delayed_extent_op *extent_op,
int *old_ref_mod, int *new_ref_mod);
int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 bytenr, u64 num_bytes,
u64 parent, u64 ref_root, u64 parent, u64 ref_root,
u64 owner, u64 offset, u64 reserved, int action); u64 owner, u64 offset, u64 reserved, int action,
int *old_ref_mod, int *new_ref_mod);
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 bytenr, u64 num_bytes,
......
...@@ -388,7 +388,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, ...@@ -388,7 +388,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
if (ret) if (ret)
btrfs_err(fs_info, "kobj add dev failed %d", ret); btrfs_err(fs_info, "kobj add dev failed %d", ret);
btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1); btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
/* force writing the updated state information to disk */ /* force writing the updated state information to disk */
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
...@@ -507,7 +507,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, ...@@ -507,7 +507,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret; return ret;
} }
btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1); btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
......
...@@ -395,8 +395,6 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info, ...@@ -395,8 +395,6 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
leaf = path->nodes[0]; leaf = path->nodes[0];
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
if (verify_dir_item(fs_info, leaf, dir_item))
return NULL;
total_len = btrfs_item_size_nr(leaf, path->slots[0]); total_len = btrfs_item_size_nr(leaf, path->slots[0]);
while (cur < total_len) { while (cur < total_len) {
...@@ -405,6 +403,8 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info, ...@@ -405,6 +403,8 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
btrfs_dir_data_len(leaf, dir_item); btrfs_dir_data_len(leaf, dir_item);
name_ptr = (unsigned long)(dir_item + 1); name_ptr = (unsigned long)(dir_item + 1);
if (verify_dir_item(fs_info, leaf, path->slots[0], dir_item))
return NULL;
if (btrfs_dir_name_len(leaf, dir_item) == name_len && if (btrfs_dir_name_len(leaf, dir_item) == name_len &&
memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
return dir_item; return dir_item;
...@@ -453,9 +453,11 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, ...@@ -453,9 +453,11 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
int verify_dir_item(struct btrfs_fs_info *fs_info, int verify_dir_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf, struct extent_buffer *leaf,
int slot,
struct btrfs_dir_item *dir_item) struct btrfs_dir_item *dir_item)
{ {
u16 namelen = BTRFS_NAME_LEN; u16 namelen = BTRFS_NAME_LEN;
int ret;
u8 type = btrfs_dir_type(leaf, dir_item); u8 type = btrfs_dir_type(leaf, dir_item);
if (type >= BTRFS_FT_MAX) { if (type >= BTRFS_FT_MAX) {
...@@ -472,6 +474,12 @@ int verify_dir_item(struct btrfs_fs_info *fs_info, ...@@ -472,6 +474,12 @@ int verify_dir_item(struct btrfs_fs_info *fs_info,
return 1; return 1;
} }
namelen = btrfs_dir_name_len(leaf, dir_item);
ret = btrfs_is_name_len_valid(leaf, slot,
(unsigned long)(dir_item + 1), namelen);
if (!ret)
return 1;
/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */ /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
if ((btrfs_dir_data_len(leaf, dir_item) + if ((btrfs_dir_data_len(leaf, dir_item) +
btrfs_dir_name_len(leaf, dir_item)) > btrfs_dir_name_len(leaf, dir_item)) >
...@@ -484,3 +492,67 @@ int verify_dir_item(struct btrfs_fs_info *fs_info, ...@@ -484,3 +492,67 @@ int verify_dir_item(struct btrfs_fs_info *fs_info,
return 0; return 0;
} }
bool btrfs_is_name_len_valid(struct extent_buffer *leaf, int slot,
unsigned long start, u16 name_len)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_key key;
u32 read_start;
u32 read_end;
u32 item_start;
u32 item_end;
u32 size;
bool ret = true;
ASSERT(start > BTRFS_LEAF_DATA_OFFSET);
read_start = start - BTRFS_LEAF_DATA_OFFSET;
read_end = read_start + name_len;
item_start = btrfs_item_offset_nr(leaf, slot);
item_end = btrfs_item_end_nr(leaf, slot);
btrfs_item_key_to_cpu(leaf, &key, slot);
switch (key.type) {
case BTRFS_DIR_ITEM_KEY:
case BTRFS_XATTR_ITEM_KEY:
case BTRFS_DIR_INDEX_KEY:
size = sizeof(struct btrfs_dir_item);
break;
case BTRFS_INODE_REF_KEY:
size = sizeof(struct btrfs_inode_ref);
break;
case BTRFS_INODE_EXTREF_KEY:
size = sizeof(struct btrfs_inode_extref);
break;
case BTRFS_ROOT_REF_KEY:
case BTRFS_ROOT_BACKREF_KEY:
size = sizeof(struct btrfs_root_ref);
break;
default:
ret = false;
goto out;
}
if (read_start < item_start) {
ret = false;
goto out;
}
if (read_end > item_end) {
ret = false;
goto out;
}
/* there shall be item(s) before name */
if (read_start - item_start < size) {
ret = false;
goto out;
}
out:
if (!ret)
btrfs_crit(fs_info, "invalid dir item name len: %u",
(unsigned int)name_len);
return ret;
}
...@@ -89,7 +89,6 @@ struct btrfs_end_io_wq { ...@@ -89,7 +89,6 @@ struct btrfs_end_io_wq {
struct btrfs_fs_info *info; struct btrfs_fs_info *info;
blk_status_t status; blk_status_t status;
enum btrfs_wq_endio_type metadata; enum btrfs_wq_endio_type metadata;
struct list_head list;
struct btrfs_work work; struct btrfs_work work;
}; };
...@@ -118,9 +117,9 @@ void btrfs_end_io_wq_exit(void) ...@@ -118,9 +117,9 @@ void btrfs_end_io_wq_exit(void)
* just before they are sent down the IO stack. * just before they are sent down the IO stack.
*/ */
struct async_submit_bio { struct async_submit_bio {
struct inode *inode; void *private_data;
struct btrfs_fs_info *fs_info;
struct bio *bio; struct bio *bio;
struct list_head list;
extent_submit_bio_hook_t *submit_bio_start; extent_submit_bio_hook_t *submit_bio_start;
extent_submit_bio_hook_t *submit_bio_done; extent_submit_bio_hook_t *submit_bio_done;
int mirror_num; int mirror_num;
...@@ -871,7 +870,7 @@ static void run_one_async_start(struct btrfs_work *work) ...@@ -871,7 +870,7 @@ static void run_one_async_start(struct btrfs_work *work)
blk_status_t ret; blk_status_t ret;
async = container_of(work, struct async_submit_bio, work); async = container_of(work, struct async_submit_bio, work);
ret = async->submit_bio_start(async->inode, async->bio, ret = async->submit_bio_start(async->private_data, async->bio,
async->mirror_num, async->bio_flags, async->mirror_num, async->bio_flags,
async->bio_offset); async->bio_offset);
if (ret) if (ret)
...@@ -885,7 +884,7 @@ static void run_one_async_done(struct btrfs_work *work) ...@@ -885,7 +884,7 @@ static void run_one_async_done(struct btrfs_work *work)
int limit; int limit;
async = container_of(work, struct async_submit_bio, work); async = container_of(work, struct async_submit_bio, work);
fs_info = BTRFS_I(async->inode)->root->fs_info; fs_info = async->fs_info;
limit = btrfs_async_submit_limit(fs_info); limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3; limit = limit * 2 / 3;
...@@ -904,7 +903,7 @@ static void run_one_async_done(struct btrfs_work *work) ...@@ -904,7 +903,7 @@ static void run_one_async_done(struct btrfs_work *work)
return; return;
} }
async->submit_bio_done(async->inode, async->bio, async->mirror_num, async->submit_bio_done(async->private_data, async->bio, async->mirror_num,
async->bio_flags, async->bio_offset); async->bio_flags, async->bio_offset);
} }
...@@ -916,11 +915,11 @@ static void run_one_async_free(struct btrfs_work *work) ...@@ -916,11 +915,11 @@ static void run_one_async_free(struct btrfs_work *work)
kfree(async); kfree(async);
} }
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct inode *inode, struct bio *bio, int mirror_num, int mirror_num, unsigned long bio_flags,
unsigned long bio_flags, u64 bio_offset, u64 bio_offset, void *private_data,
extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_start,
extent_submit_bio_hook_t *submit_bio_done) extent_submit_bio_hook_t *submit_bio_done)
{ {
struct async_submit_bio *async; struct async_submit_bio *async;
...@@ -928,7 +927,8 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, ...@@ -928,7 +927,8 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
if (!async) if (!async)
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
async->inode = inode; async->private_data = private_data;
async->fs_info = fs_info;
async->bio = bio; async->bio = bio;
async->mirror_num = mirror_num; async->mirror_num = mirror_num;
async->submit_bio_start = submit_bio_start; async->submit_bio_start = submit_bio_start;
...@@ -974,9 +974,9 @@ static blk_status_t btree_csum_one_bio(struct bio *bio) ...@@ -974,9 +974,9 @@ static blk_status_t btree_csum_one_bio(struct bio *bio)
return errno_to_blk_status(ret); return errno_to_blk_status(ret);
} }
static blk_status_t __btree_submit_bio_start(struct inode *inode, static blk_status_t __btree_submit_bio_start(void *private_data, struct bio *bio,
struct bio *bio, int mirror_num, unsigned long bio_flags, int mirror_num, unsigned long bio_flags,
u64 bio_offset) u64 bio_offset)
{ {
/* /*
* when we're called for a write, we're already in the async * when we're called for a write, we're already in the async
...@@ -985,10 +985,11 @@ static blk_status_t __btree_submit_bio_start(struct inode *inode, ...@@ -985,10 +985,11 @@ static blk_status_t __btree_submit_bio_start(struct inode *inode,
return btree_csum_one_bio(bio); return btree_csum_one_bio(bio);
} }
static blk_status_t __btree_submit_bio_done(struct inode *inode, static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio,
struct bio *bio, int mirror_num, unsigned long bio_flags, int mirror_num, unsigned long bio_flags,
u64 bio_offset) u64 bio_offset)
{ {
struct inode *inode = private_data;
blk_status_t ret; blk_status_t ret;
/* /*
...@@ -1014,10 +1015,11 @@ static int check_async_write(unsigned long bio_flags) ...@@ -1014,10 +1015,11 @@ static int check_async_write(unsigned long bio_flags)
return 1; return 1;
} }
static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio, static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
int mirror_num, unsigned long bio_flags, int mirror_num, unsigned long bio_flags,
u64 bio_offset) u64 bio_offset)
{ {
struct inode *inode = private_data;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int async = check_async_write(bio_flags); int async = check_async_write(bio_flags);
blk_status_t ret; blk_status_t ret;
...@@ -1042,8 +1044,8 @@ static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio, ...@@ -1042,8 +1044,8 @@ static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
* kthread helpers are used to submit writes so that * kthread helpers are used to submit writes so that
* checksumming can happen in parallel across all CPUs * checksumming can happen in parallel across all CPUs
*/ */
ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num, 0, ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
bio_offset, bio_offset, private_data,
__btree_submit_bio_start, __btree_submit_bio_start,
__btree_submit_bio_done); __btree_submit_bio_done);
} }
...@@ -1221,10 +1223,10 @@ int btrfs_write_tree_block(struct extent_buffer *buf) ...@@ -1221,10 +1223,10 @@ int btrfs_write_tree_block(struct extent_buffer *buf)
buf->start + buf->len - 1); buf->start + buf->len - 1);
} }
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
{ {
return filemap_fdatawait_range(buf->pages[0]->mapping, filemap_fdatawait_range(buf->pages[0]->mapping,
buf->start, buf->start + buf->len - 1); buf->start, buf->start + buf->len - 1);
} }
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
...@@ -1346,8 +1348,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, ...@@ -1346,8 +1348,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
root->log_transid_committed = -1; root->log_transid_committed = -1;
root->last_log_commit = 0; root->last_log_commit = 0;
if (!dummy) if (!dummy)
extent_io_tree_init(&root->dirty_log_pages, extent_io_tree_init(&root->dirty_log_pages, NULL);
fs_info->btree_inode->i_mapping);
memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->root_item, 0, sizeof(root->root_item));
...@@ -2308,7 +2309,7 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info) ...@@ -2308,7 +2309,7 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
inode->i_mapping->a_ops = &btree_aops; inode->i_mapping->a_ops = &btree_aops;
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping); extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
BTRFS_I(inode)->io_tree.track_uptodate = 0; BTRFS_I(inode)->io_tree.track_uptodate = 0;
extent_map_tree_init(&BTRFS_I(inode)->extent_tree); extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
...@@ -2625,7 +2626,6 @@ int open_ctree(struct super_block *sb, ...@@ -2625,7 +2626,6 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->fs_roots_radix_lock);
spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->delayed_iput_lock);
spin_lock_init(&fs_info->defrag_inodes_lock); spin_lock_init(&fs_info->defrag_inodes_lock);
spin_lock_init(&fs_info->free_chunk_lock);
spin_lock_init(&fs_info->tree_mod_seq_lock); spin_lock_init(&fs_info->tree_mod_seq_lock);
spin_lock_init(&fs_info->super_lock); spin_lock_init(&fs_info->super_lock);
spin_lock_init(&fs_info->qgroup_op_lock); spin_lock_init(&fs_info->qgroup_op_lock);
...@@ -2661,12 +2661,11 @@ int open_ctree(struct super_block *sb, ...@@ -2661,12 +2661,11 @@ int open_ctree(struct super_block *sb,
atomic_set(&fs_info->qgroup_op_seq, 0); atomic_set(&fs_info->qgroup_op_seq, 0);
atomic_set(&fs_info->reada_works_cnt, 0); atomic_set(&fs_info->reada_works_cnt, 0);
atomic64_set(&fs_info->tree_mod_seq, 0); atomic64_set(&fs_info->tree_mod_seq, 0);
fs_info->fs_frozen = 0;
fs_info->sb = sb; fs_info->sb = sb;
fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
fs_info->metadata_ratio = 0; fs_info->metadata_ratio = 0;
fs_info->defrag_inodes = RB_ROOT; fs_info->defrag_inodes = RB_ROOT;
fs_info->free_chunk_space = 0; atomic64_set(&fs_info->free_chunk_space, 0);
fs_info->tree_mod_log = RB_ROOT; fs_info->tree_mod_log = RB_ROOT;
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
...@@ -2703,10 +2702,8 @@ int open_ctree(struct super_block *sb, ...@@ -2703,10 +2702,8 @@ int open_ctree(struct super_block *sb,
fs_info->block_group_cache_tree = RB_ROOT; fs_info->block_group_cache_tree = RB_ROOT;
fs_info->first_logical_byte = (u64)-1; fs_info->first_logical_byte = (u64)-1;
extent_io_tree_init(&fs_info->freed_extents[0], extent_io_tree_init(&fs_info->freed_extents[0], NULL);
fs_info->btree_inode->i_mapping); extent_io_tree_init(&fs_info->freed_extents[1], NULL);
extent_io_tree_init(&fs_info->freed_extents[1],
fs_info->btree_inode->i_mapping);
fs_info->pinned_extents = &fs_info->freed_extents[0]; fs_info->pinned_extents = &fs_info->freed_extents[0];
set_bit(BTRFS_FS_BARRIER, &fs_info->flags); set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
...@@ -3484,65 +3481,61 @@ static int write_dev_supers(struct btrfs_device *device, ...@@ -3484,65 +3481,61 @@ static int write_dev_supers(struct btrfs_device *device,
*/ */
static void btrfs_end_empty_barrier(struct bio *bio) static void btrfs_end_empty_barrier(struct bio *bio)
{ {
if (bio->bi_private) complete(bio->bi_private);
complete(bio->bi_private);
bio_put(bio);
} }
/* /*
* trigger flushes for one the devices. If you pass wait == 0, the flushes are * Submit a flush request to the device if it supports it. Error handling is
* sent down. With wait == 1, it waits for the previous flush. * done in the waiting counterpart.
*
* any device where the flush fails with eopnotsupp are flagged as not-barrier
* capable
*/ */
static blk_status_t write_dev_flush(struct btrfs_device *device, int wait) static void write_dev_flush(struct btrfs_device *device)
{ {
struct request_queue *q = bdev_get_queue(device->bdev); struct request_queue *q = bdev_get_queue(device->bdev);
struct bio *bio; struct bio *bio = device->flush_bio;
blk_status_t ret = 0;
if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
return 0; return;
if (wait) { bio_reset(bio);
bio = device->flush_bio; bio->bi_end_io = btrfs_end_empty_barrier;
if (!bio) bio->bi_bdev = device->bdev;
return 0; bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait;
wait_for_completion(&device->flush_wait); submit_bio(bio);
device->flush_bio_sent = 1;
}
if (bio->bi_status) { /*
ret = bio->bi_status; * If the flush bio has been submitted by write_dev_flush, wait for it.
btrfs_dev_stat_inc_and_print(device, */
BTRFS_DEV_STAT_FLUSH_ERRS); static blk_status_t wait_dev_flush(struct btrfs_device *device)
} {
struct bio *bio = device->flush_bio;
/* drop the reference from the wait == 0 run */ if (!device->flush_bio_sent)
bio_put(bio); return 0;
device->flush_bio = NULL;
return ret; device->flush_bio_sent = 0;
} wait_for_completion_io(&device->flush_wait);
/* return bio->bi_status;
* one reference for us, and we leave it for the }
* caller
*/
device->flush_bio = NULL;
bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
if (!bio)
return BLK_STS_RESOURCE;
bio->bi_end_io = btrfs_end_empty_barrier; static int check_barrier_error(struct btrfs_fs_devices *fsdevs)
bio->bi_bdev = device->bdev; {
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; int dev_flush_error = 0;
init_completion(&device->flush_wait); struct btrfs_device *dev;
bio->bi_private = &device->flush_wait;
device->flush_bio = bio;
bio_get(bio); list_for_each_entry_rcu(dev, &fsdevs->devices, dev_list) {
btrfsic_submit_bio(bio); if (!dev->bdev || dev->last_flush_error)
dev_flush_error++;
}
if (dev_flush_error >
fsdevs->fs_info->num_tolerated_disk_barrier_failures)
return -EIO;
return 0; return 0;
} }
...@@ -3555,7 +3548,6 @@ static int barrier_all_devices(struct btrfs_fs_info *info) ...@@ -3555,7 +3548,6 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
{ {
struct list_head *head; struct list_head *head;
struct btrfs_device *dev; struct btrfs_device *dev;
int errors_send = 0;
int errors_wait = 0; int errors_wait = 0;
blk_status_t ret; blk_status_t ret;
...@@ -3564,16 +3556,13 @@ static int barrier_all_devices(struct btrfs_fs_info *info) ...@@ -3564,16 +3556,13 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
list_for_each_entry_rcu(dev, head, dev_list) { list_for_each_entry_rcu(dev, head, dev_list) {
if (dev->missing) if (dev->missing)
continue; continue;
if (!dev->bdev) { if (!dev->bdev)
errors_send++;
continue; continue;
}
if (!dev->in_fs_metadata || !dev->writeable) if (!dev->in_fs_metadata || !dev->writeable)
continue; continue;
ret = write_dev_flush(dev, 0); write_dev_flush(dev);
if (ret) dev->last_flush_error = 0;
errors_send++;
} }
/* wait for all the barriers */ /* wait for all the barriers */
...@@ -3587,13 +3576,23 @@ static int barrier_all_devices(struct btrfs_fs_info *info) ...@@ -3587,13 +3576,23 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
if (!dev->in_fs_metadata || !dev->writeable) if (!dev->in_fs_metadata || !dev->writeable)
continue; continue;
ret = write_dev_flush(dev, 1); ret = wait_dev_flush(dev);
if (ret) if (ret) {
dev->last_flush_error = ret;
btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_FLUSH_ERRS);
errors_wait++; errors_wait++;
}
}
if (errors_wait) {
/*
* At some point we need the status of all disks
* to arrive at the volume status. So error checking
* is being pushed to a separate loop.
*/
return check_barrier_error(info->fs_devices);
} }
if (errors_send > info->num_tolerated_disk_barrier_failures ||
errors_wait > info->num_tolerated_disk_barrier_failures)
return -EIO;
return 0; return 0;
} }
...@@ -4577,11 +4576,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, ...@@ -4577,11 +4576,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
cur_trans->state =TRANS_STATE_COMPLETED; cur_trans->state =TRANS_STATE_COMPLETED;
wake_up(&cur_trans->commit_wait); wake_up(&cur_trans->commit_wait);
/*
memset(cur_trans, 0, sizeof(*cur_trans));
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
*/
} }
static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
...@@ -4637,6 +4631,12 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) ...@@ -4637,6 +4631,12 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
return 0; return 0;
} }
static struct btrfs_fs_info *btree_fs_info(void *private_data)
{
struct inode *inode = private_data;
return btrfs_sb(inode->i_sb);
}
static const struct extent_io_ops btree_extent_io_ops = { static const struct extent_io_ops btree_extent_io_ops = {
/* mandatory callbacks */ /* mandatory callbacks */
.submit_bio_hook = btree_submit_bio_hook, .submit_bio_hook = btree_submit_bio_hook,
...@@ -4644,6 +4644,8 @@ static const struct extent_io_ops btree_extent_io_ops = { ...@@ -4644,6 +4644,8 @@ static const struct extent_io_ops btree_extent_io_ops = {
/* note we're sharing with inode.c for the merge bio hook */ /* note we're sharing with inode.c for the merge bio hook */
.merge_bio_hook = btrfs_merge_bio_hook, .merge_bio_hook = btrfs_merge_bio_hook,
.readpage_io_failed_hook = btree_io_failed_hook, .readpage_io_failed_hook = btree_io_failed_hook,
.set_range_writeback = btrfs_set_range_writeback,
.tree_fs_info = btree_fs_info,
/* optional callbacks */ /* optional callbacks */
}; };
...@@ -120,14 +120,14 @@ u32 btrfs_csum_data(const char *data, u32 seed, size_t len); ...@@ -120,14 +120,14 @@ u32 btrfs_csum_data(const char *data, u32 seed, size_t len);
void btrfs_csum_final(u32 crc, u8 *result); void btrfs_csum_final(u32 crc, u8 *result);
blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
enum btrfs_wq_endio_type metadata); enum btrfs_wq_endio_type metadata);
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct inode *inode, struct bio *bio, int mirror_num, int mirror_num, unsigned long bio_flags,
unsigned long bio_flags, u64 bio_offset, u64 bio_offset, void *private_data,
extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_start,
extent_submit_bio_hook_t *submit_bio_done); extent_submit_bio_hook_t *submit_bio_done);
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
int btrfs_write_tree_block(struct extent_buffer *buf); int btrfs_write_tree_block(struct extent_buffer *buf);
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); void btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info); struct btrfs_fs_info *fs_info);
int btrfs_add_log_tree(struct btrfs_trans_handle *trans, int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
......
...@@ -282,6 +282,11 @@ static int btrfs_get_name(struct dentry *parent, char *name, ...@@ -282,6 +282,11 @@ static int btrfs_get_name(struct dentry *parent, char *name,
name_len = btrfs_inode_ref_name_len(leaf, iref); name_len = btrfs_inode_ref_name_len(leaf, iref);
} }
ret = btrfs_is_name_len_valid(leaf, path->slots[0], name_ptr, name_len);
if (!ret) {
btrfs_free_path(path);
return -EIO;
}
read_extent_buffer(leaf, name, name_ptr, name_len); read_extent_buffer(leaf, name, name_ptr, name_len);
btrfs_free_path(path); btrfs_free_path(path);
......
此差异已折叠。
...@@ -87,19 +87,9 @@ void btrfs_leak_debug_check(void) ...@@ -87,19 +87,9 @@ void btrfs_leak_debug_check(void)
static inline void __btrfs_debug_check_extent_io_range(const char *caller, static inline void __btrfs_debug_check_extent_io_range(const char *caller,
struct extent_io_tree *tree, u64 start, u64 end) struct extent_io_tree *tree, u64 start, u64 end)
{ {
struct inode *inode; if (tree->ops && tree->ops->check_extent_io_range)
u64 isize; tree->ops->check_extent_io_range(tree->private_data, caller,
start, end);
if (!tree->mapping)
return;
inode = tree->mapping->host;
isize = i_size_read(inode);
if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
"%s: ino %llu isize %llu odd range [%llu,%llu]",
caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
}
} }
#else #else
#define btrfs_leak_debug_add(new, head) do {} while (0) #define btrfs_leak_debug_add(new, head) do {} while (0)
...@@ -154,9 +144,9 @@ static noinline void flush_write_bio(void *data); ...@@ -154,9 +144,9 @@ static noinline void flush_write_bio(void *data);
static inline struct btrfs_fs_info * static inline struct btrfs_fs_info *
tree_fs_info(struct extent_io_tree *tree) tree_fs_info(struct extent_io_tree *tree)
{ {
if (!tree->mapping) if (tree->ops)
return NULL; return tree->ops->tree_fs_info(tree->private_data);
return btrfs_sb(tree->mapping->host->i_sb); return NULL;
} }
int __init extent_io_init(void) int __init extent_io_init(void)
...@@ -214,13 +204,13 @@ void extent_io_exit(void) ...@@ -214,13 +204,13 @@ void extent_io_exit(void)
} }
void extent_io_tree_init(struct extent_io_tree *tree, void extent_io_tree_init(struct extent_io_tree *tree,
struct address_space *mapping) void *private_data)
{ {
tree->state = RB_ROOT; tree->state = RB_ROOT;
tree->ops = NULL; tree->ops = NULL;
tree->dirty_bytes = 0; tree->dirty_bytes = 0;
spin_lock_init(&tree->lock); spin_lock_init(&tree->lock);
tree->mapping = mapping; tree->private_data = private_data;
} }
static struct extent_state *alloc_extent_state(gfp_t mask) static struct extent_state *alloc_extent_state(gfp_t mask)
...@@ -370,8 +360,7 @@ static void merge_cb(struct extent_io_tree *tree, struct extent_state *new, ...@@ -370,8 +360,7 @@ static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
struct extent_state *other) struct extent_state *other)
{ {
if (tree->ops && tree->ops->merge_extent_hook) if (tree->ops && tree->ops->merge_extent_hook)
tree->ops->merge_extent_hook(tree->mapping->host, new, tree->ops->merge_extent_hook(tree->private_data, new, other);
other);
} }
/* /*
...@@ -422,15 +411,14 @@ static void set_state_cb(struct extent_io_tree *tree, ...@@ -422,15 +411,14 @@ static void set_state_cb(struct extent_io_tree *tree,
struct extent_state *state, unsigned *bits) struct extent_state *state, unsigned *bits)
{ {
if (tree->ops && tree->ops->set_bit_hook) if (tree->ops && tree->ops->set_bit_hook)
tree->ops->set_bit_hook(tree->mapping->host, state, bits); tree->ops->set_bit_hook(tree->private_data, state, bits);
} }
static void clear_state_cb(struct extent_io_tree *tree, static void clear_state_cb(struct extent_io_tree *tree,
struct extent_state *state, unsigned *bits) struct extent_state *state, unsigned *bits)
{ {
if (tree->ops && tree->ops->clear_bit_hook) if (tree->ops && tree->ops->clear_bit_hook)
tree->ops->clear_bit_hook(BTRFS_I(tree->mapping->host), tree->ops->clear_bit_hook(tree->private_data, state, bits);
state, bits);
} }
static void set_state_bits(struct extent_io_tree *tree, static void set_state_bits(struct extent_io_tree *tree,
...@@ -479,7 +467,7 @@ static void split_cb(struct extent_io_tree *tree, struct extent_state *orig, ...@@ -479,7 +467,7 @@ static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
u64 split) u64 split)
{ {
if (tree->ops && tree->ops->split_extent_hook) if (tree->ops && tree->ops->split_extent_hook)
tree->ops->split_extent_hook(tree->mapping->host, orig, split); tree->ops->split_extent_hook(tree->private_data, orig, split);
} }
/* /*
...@@ -1403,17 +1391,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) ...@@ -1403,17 +1391,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
*/ */
static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
{ {
unsigned long index = start >> PAGE_SHIFT; tree->ops->set_range_writeback(tree->private_data, start, end);
unsigned long end_index = end >> PAGE_SHIFT;
struct page *page;
while (index <= end_index) {
page = find_get_page(tree->mapping, index);
BUG_ON(!page); /* Pages should be in the extent_io_tree */
set_page_writeback(page);
put_page(page);
index++;
}
} }
/* find the first state struct with 'bits' set after 'start', and /* find the first state struct with 'bits' set after 'start', and
...@@ -1962,11 +1940,12 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) ...@@ -1962,11 +1940,12 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
SetPageUptodate(page); SetPageUptodate(page);
} }
int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec) int free_io_failure(struct extent_io_tree *failure_tree,
struct extent_io_tree *io_tree,
struct io_failure_record *rec)
{ {
int ret; int ret;
int err = 0; int err = 0;
struct extent_io_tree *failure_tree = &inode->io_failure_tree;
set_state_failrec(failure_tree, rec->start, NULL); set_state_failrec(failure_tree, rec->start, NULL);
ret = clear_extent_bits(failure_tree, rec->start, ret = clear_extent_bits(failure_tree, rec->start,
...@@ -1975,7 +1954,7 @@ int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec) ...@@ -1975,7 +1954,7 @@ int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec)
if (ret) if (ret)
err = ret; err = ret;
ret = clear_extent_bits(&inode->io_tree, rec->start, ret = clear_extent_bits(io_tree, rec->start,
rec->start + rec->len - 1, rec->start + rec->len - 1,
EXTENT_DAMAGED); EXTENT_DAMAGED);
if (ret && !err) if (ret && !err)
...@@ -1995,11 +1974,10 @@ int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec) ...@@ -1995,11 +1974,10 @@ int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec)
* currently, there can be no more than two copies of every data bit. thus, * currently, there can be no more than two copies of every data bit. thus,
* exactly one rewrite is required. * exactly one rewrite is required.
*/ */
int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length, int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
u64 logical, struct page *page, u64 length, u64 logical, struct page *page,
unsigned int pg_offset, int mirror_num) unsigned int pg_offset, int mirror_num)
{ {
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct bio *bio; struct bio *bio;
struct btrfs_device *dev; struct btrfs_device *dev;
u64 map_length = 0; u64 map_length = 0;
...@@ -2010,9 +1988,7 @@ int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length, ...@@ -2010,9 +1988,7 @@ int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length,
ASSERT(!(fs_info->sb->s_flags & MS_RDONLY)); ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
BUG_ON(!mirror_num); BUG_ON(!mirror_num);
bio = btrfs_io_bio_alloc(GFP_NOFS, 1); bio = btrfs_io_bio_alloc(1);
if (!bio)
return -EIO;
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
map_length = length; map_length = length;
...@@ -2071,7 +2047,7 @@ int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length, ...@@ -2071,7 +2047,7 @@ int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length,
btrfs_info_rl_in_rcu(fs_info, btrfs_info_rl_in_rcu(fs_info,
"read error corrected: ino %llu off %llu (dev %s sector %llu)", "read error corrected: ino %llu off %llu (dev %s sector %llu)",
btrfs_ino(inode), start, ino, start,
rcu_str_deref(dev->name), sector); rcu_str_deref(dev->name), sector);
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
bio_put(bio); bio_put(bio);
...@@ -2091,8 +2067,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info, ...@@ -2091,8 +2067,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i]; struct page *p = eb->pages[i];
ret = repair_io_failure(BTRFS_I(fs_info->btree_inode), start, ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
PAGE_SIZE, start, p,
start - page_offset(p), mirror_num); start - page_offset(p), mirror_num);
if (ret) if (ret)
break; break;
...@@ -2106,24 +2081,24 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info, ...@@ -2106,24 +2081,24 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
* each time an IO finishes, we do a fast check in the IO failure tree * each time an IO finishes, we do a fast check in the IO failure tree
* to see if we need to process or clean up an io_failure_record * to see if we need to process or clean up an io_failure_record
*/ */
int clean_io_failure(struct btrfs_inode *inode, u64 start, struct page *page, int clean_io_failure(struct btrfs_fs_info *fs_info,
unsigned int pg_offset) struct extent_io_tree *failure_tree,
struct extent_io_tree *io_tree, u64 start,
struct page *page, u64 ino, unsigned int pg_offset)
{ {
u64 private; u64 private;
struct io_failure_record *failrec; struct io_failure_record *failrec;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_state *state; struct extent_state *state;
int num_copies; int num_copies;
int ret; int ret;
private = 0; private = 0;
ret = count_range_bits(&inode->io_failure_tree, &private, ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
(u64)-1, 1, EXTENT_DIRTY, 0); EXTENT_DIRTY, 0);
if (!ret) if (!ret)
return 0; return 0;
ret = get_state_failrec(&inode->io_failure_tree, start, ret = get_state_failrec(failure_tree, start, &failrec);
&failrec);
if (ret) if (ret)
return 0; return 0;
...@@ -2139,25 +2114,25 @@ int clean_io_failure(struct btrfs_inode *inode, u64 start, struct page *page, ...@@ -2139,25 +2114,25 @@ int clean_io_failure(struct btrfs_inode *inode, u64 start, struct page *page,
if (fs_info->sb->s_flags & MS_RDONLY) if (fs_info->sb->s_flags & MS_RDONLY)
goto out; goto out;
spin_lock(&inode->io_tree.lock); spin_lock(&io_tree->lock);
state = find_first_extent_bit_state(&inode->io_tree, state = find_first_extent_bit_state(io_tree,
failrec->start, failrec->start,
EXTENT_LOCKED); EXTENT_LOCKED);
spin_unlock(&inode->io_tree.lock); spin_unlock(&io_tree->lock);
if (state && state->start <= failrec->start && if (state && state->start <= failrec->start &&
state->end >= failrec->start + failrec->len - 1) { state->end >= failrec->start + failrec->len - 1) {
num_copies = btrfs_num_copies(fs_info, failrec->logical, num_copies = btrfs_num_copies(fs_info, failrec->logical,
failrec->len); failrec->len);
if (num_copies > 1) { if (num_copies > 1) {
repair_io_failure(inode, start, failrec->len, repair_io_failure(fs_info, ino, start, failrec->len,
failrec->logical, page, failrec->logical, page, pg_offset,
pg_offset, failrec->failed_mirror); failrec->failed_mirror);
} }
} }
out: out:
free_io_failure(inode, failrec); free_io_failure(failure_tree, io_tree, failrec);
return 0; return 0;
} }
...@@ -2357,10 +2332,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, ...@@ -2357,10 +2332,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
struct btrfs_io_bio *btrfs_failed_bio; struct btrfs_io_bio *btrfs_failed_bio;
struct btrfs_io_bio *btrfs_bio; struct btrfs_io_bio *btrfs_bio;
bio = btrfs_io_bio_alloc(GFP_NOFS, 1); bio = btrfs_io_bio_alloc(1);
if (!bio)
return NULL;
bio->bi_end_io = endio_func; bio->bi_end_io = endio_func;
bio->bi_iter.bi_sector = failrec->logical >> 9; bio->bi_iter.bi_sector = failrec->logical >> 9;
bio->bi_bdev = fs_info->fs_devices->latest_bdev; bio->bi_bdev = fs_info->fs_devices->latest_bdev;
...@@ -2398,6 +2370,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, ...@@ -2398,6 +2370,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
struct io_failure_record *failrec; struct io_failure_record *failrec;
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
struct bio *bio; struct bio *bio;
int read_mode = 0; int read_mode = 0;
blk_status_t status; blk_status_t status;
...@@ -2411,7 +2384,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, ...@@ -2411,7 +2384,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror); ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
if (!ret) { if (!ret) {
free_io_failure(BTRFS_I(inode), failrec); free_io_failure(failure_tree, tree, failrec);
return -EIO; return -EIO;
} }
...@@ -2424,7 +2397,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, ...@@ -2424,7 +2397,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
(int)phy_offset, failed_bio->bi_end_io, (int)phy_offset, failed_bio->bi_end_io,
NULL); NULL);
if (!bio) { if (!bio) {
free_io_failure(BTRFS_I(inode), failrec); free_io_failure(failure_tree, tree, failrec);
return -EIO; return -EIO;
} }
bio_set_op_attrs(bio, REQ_OP_READ, read_mode); bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
...@@ -2433,10 +2406,10 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, ...@@ -2433,10 +2406,10 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
"Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d", "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
read_mode, failrec->this_mirror, failrec->in_validation); read_mode, failrec->this_mirror, failrec->in_validation);
status = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror, status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
failrec->bio_flags, 0); failrec->bio_flags, 0);
if (status) { if (status) {
free_io_failure(BTRFS_I(inode), failrec); free_io_failure(failure_tree, tree, failrec);
bio_put(bio); bio_put(bio);
ret = blk_status_to_errno(status); ret = blk_status_to_errno(status);
} }
...@@ -2542,7 +2515,7 @@ static void end_bio_extent_readpage(struct bio *bio) ...@@ -2542,7 +2515,7 @@ static void end_bio_extent_readpage(struct bio *bio)
struct bio_vec *bvec; struct bio_vec *bvec;
int uptodate = !bio->bi_status; int uptodate = !bio->bi_status;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
struct extent_io_tree *tree; struct extent_io_tree *tree, *failure_tree;
u64 offset = 0; u64 offset = 0;
u64 start; u64 start;
u64 end; u64 end;
...@@ -2563,6 +2536,7 @@ static void end_bio_extent_readpage(struct bio *bio) ...@@ -2563,6 +2536,7 @@ static void end_bio_extent_readpage(struct bio *bio)
(u64)bio->bi_iter.bi_sector, bio->bi_status, (u64)bio->bi_iter.bi_sector, bio->bi_status,
io_bio->mirror_num); io_bio->mirror_num);
tree = &BTRFS_I(inode)->io_tree; tree = &BTRFS_I(inode)->io_tree;
failure_tree = &BTRFS_I(inode)->io_failure_tree;
/* We always issue full-page reads, but if some block /* We always issue full-page reads, but if some block
* in a page fails to read, blk_update_request() will * in a page fails to read, blk_update_request() will
...@@ -2592,8 +2566,10 @@ static void end_bio_extent_readpage(struct bio *bio) ...@@ -2592,8 +2566,10 @@ static void end_bio_extent_readpage(struct bio *bio)
if (ret) if (ret)
uptodate = 0; uptodate = 0;
else else
clean_io_failure(BTRFS_I(inode), start, clean_io_failure(BTRFS_I(inode)->root->fs_info,
page, 0); failure_tree, tree, start,
page,
btrfs_ino(BTRFS_I(inode)), 0);
} }
if (likely(uptodate)) if (likely(uptodate))
...@@ -2682,67 +2658,70 @@ static void end_bio_extent_readpage(struct bio *bio) ...@@ -2682,67 +2658,70 @@ static void end_bio_extent_readpage(struct bio *bio)
} }
/* /*
* this allocates from the btrfs_bioset. We're returning a bio right now * Initialize the members up to but not including 'bio'. Use after allocating a
* but you can call btrfs_io_bio for the appropriate container_of magic * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
* 'bio' because use of __GFP_ZERO is not supported.
*/ */
struct bio * static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
gfp_t gfp_flags)
{ {
struct btrfs_io_bio *btrfs_bio; memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
struct bio *bio; }
bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
if (bio == NULL && (current->flags & PF_MEMALLOC)) { /*
while (!bio && (nr_vecs /= 2)) { * The following helpers allocate a bio. As it's backed by a bioset, it'll
bio = bio_alloc_bioset(gfp_flags, * never fail. We're returning a bio right now but you can call btrfs_io_bio
nr_vecs, btrfs_bioset); * for the appropriate container_of magic
} */
} struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte)
{
struct bio *bio;
if (bio) { bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, btrfs_bioset);
bio->bi_bdev = bdev; bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = first_sector; bio->bi_iter.bi_sector = first_byte >> 9;
btrfs_bio = btrfs_io_bio(bio); btrfs_io_bio_init(btrfs_io_bio(bio));
btrfs_bio->csum = NULL;
btrfs_bio->csum_allocated = NULL;
btrfs_bio->end_io = NULL;
}
return bio; return bio;
} }
struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask) struct bio *btrfs_bio_clone(struct bio *bio)
{ {
struct btrfs_io_bio *btrfs_bio; struct btrfs_io_bio *btrfs_bio;
struct bio *new; struct bio *new;
new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset); /* Bio allocation backed by a bioset does not fail */
if (new) { new = bio_clone_fast(bio, GFP_NOFS, btrfs_bioset);
btrfs_bio = btrfs_io_bio(new); btrfs_bio = btrfs_io_bio(new);
btrfs_bio->csum = NULL; btrfs_io_bio_init(btrfs_bio);
btrfs_bio->csum_allocated = NULL; btrfs_bio->iter = bio->bi_iter;
btrfs_bio->end_io = NULL;
}
return new; return new;
} }
/* this also allocates from the btrfs_bioset */ struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
{ {
struct btrfs_io_bio *btrfs_bio;
struct bio *bio; struct bio *bio;
bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset); /* Bio allocation backed by a bioset does not fail */
if (bio) { bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, btrfs_bioset);
btrfs_bio = btrfs_io_bio(bio); btrfs_io_bio_init(btrfs_io_bio(bio));
btrfs_bio->csum = NULL;
btrfs_bio->csum_allocated = NULL;
btrfs_bio->end_io = NULL;
}
return bio; return bio;
} }
struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
{
struct bio *bio;
struct btrfs_io_bio *btrfs_bio;
/* this will never fail when it's backed by a bioset */
bio = bio_clone_fast(orig, GFP_NOFS, btrfs_bioset);
ASSERT(bio);
btrfs_bio = btrfs_io_bio(bio);
btrfs_io_bio_init(btrfs_bio);
bio_trim(bio, offset >> 9, size >> 9);
btrfs_bio->iter = bio->bi_iter;
return bio;
}
static int __must_check submit_one_bio(struct bio *bio, int mirror_num, static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
unsigned long bio_flags) unsigned long bio_flags)
...@@ -2759,7 +2738,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num, ...@@ -2759,7 +2738,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
bio_get(bio); bio_get(bio);
if (tree->ops) if (tree->ops)
ret = tree->ops->submit_bio_hook(page->mapping->host, bio, ret = tree->ops->submit_bio_hook(tree->private_data, bio,
mirror_num, bio_flags, start); mirror_num, bio_flags, start);
else else
btrfsic_submit_bio(bio); btrfsic_submit_bio(bio);
...@@ -2822,11 +2801,7 @@ static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree, ...@@ -2822,11 +2801,7 @@ static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree,
} }
} }
bio = btrfs_bio_alloc(bdev, sector, BIO_MAX_PAGES, bio = btrfs_bio_alloc(bdev, sector << 9);
GFP_NOFS | __GFP_HIGH);
if (!bio)
return -ENOMEM;
bio_add_page(bio, page, page_size, offset); bio_add_page(bio, page, page_size, offset);
bio->bi_end_io = end_io_func; bio->bi_end_io = end_io_func;
bio->bi_private = tree; bio->bi_private = tree;
...@@ -3762,7 +3737,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, ...@@ -3762,7 +3737,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
* header 0 1 2 .. N ... data_N .. data_2 data_1 data_0 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
*/ */
start = btrfs_item_nr_offset(nritems); start = btrfs_item_nr_offset(nritems);
end = btrfs_leaf_data(eb) + leaf_data_end(fs_info, eb); end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, eb);
memzero_extent_buffer(eb, start, end - start); memzero_extent_buffer(eb, start, end - start);
} }
...@@ -4468,29 +4443,25 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo, ...@@ -4468,29 +4443,25 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
} }
/* /*
* Sanity check for fiemap cache * Emit last fiemap cache
* *
* All fiemap cache should be submitted by emit_fiemap_extent() * The last fiemap cache may still be cached in the following case:
* Iteration should be terminated either by last fiemap extent or * 0 4k 8k
* fieinfo->fi_extents_max. * |<- Fiemap range ->|
* So no cached fiemap should exist. * |<------------ First extent ----------->|
*
* In this case, the first extent range will be cached but not emitted.
* So we must emit it before ending extent_fiemap().
*/ */
static int check_fiemap_cache(struct btrfs_fs_info *fs_info, static int emit_last_fiemap_cache(struct btrfs_fs_info *fs_info,
struct fiemap_extent_info *fieinfo, struct fiemap_extent_info *fieinfo,
struct fiemap_cache *cache) struct fiemap_cache *cache)
{ {
int ret; int ret;
if (!cache->cached) if (!cache->cached)
return 0; return 0;
/* Small and recoverbale problem, only to info developer */
#ifdef CONFIG_BTRFS_DEBUG
WARN_ON(1);
#endif
btrfs_warn(fs_info,
"unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
cache->offset, cache->phys, cache->len, cache->flags);
ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
cache->len, cache->flags); cache->len, cache->flags);
cache->cached = false; cache->cached = false;
...@@ -4706,7 +4677,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -4706,7 +4677,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
} }
out_free: out_free:
if (!ret) if (!ret)
ret = check_fiemap_cache(root->fs_info, fieinfo, &cache); ret = emit_last_fiemap_cache(root->fs_info, fieinfo, &cache);
free_extent_map(em); free_extent_map(em);
out: out:
btrfs_free_path(path); btrfs_free_path(path);
......
...@@ -92,9 +92,9 @@ struct btrfs_inode; ...@@ -92,9 +92,9 @@ struct btrfs_inode;
struct btrfs_io_bio; struct btrfs_io_bio;
struct io_failure_record; struct io_failure_record;
typedef blk_status_t (extent_submit_bio_hook_t)(struct inode *inode, typedef blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *bio,
struct bio *bio, int mirror_num, unsigned long bio_flags, int mirror_num, unsigned long bio_flags,
u64 bio_offset); u64 bio_offset);
struct extent_io_ops { struct extent_io_ops {
/* /*
* The following callbacks must be allways defined, the function * The following callbacks must be allways defined, the function
...@@ -108,32 +108,36 @@ struct extent_io_ops { ...@@ -108,32 +108,36 @@ struct extent_io_ops {
size_t size, struct bio *bio, size_t size, struct bio *bio,
unsigned long bio_flags); unsigned long bio_flags);
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
struct btrfs_fs_info *(*tree_fs_info)(void *private_data);
void (*set_range_writeback)(void *private_data, u64 start, u64 end);
/* /*
* Optional hooks, called if the pointer is not NULL * Optional hooks, called if the pointer is not NULL
*/ */
int (*fill_delalloc)(struct inode *inode, struct page *locked_page, int (*fill_delalloc)(void *private_data, struct page *locked_page,
u64 start, u64 end, int *page_started, u64 start, u64 end, int *page_started,
unsigned long *nr_written); unsigned long *nr_written);
int (*writepage_start_hook)(struct page *page, u64 start, u64 end); int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate); struct extent_state *state, int uptodate);
void (*set_bit_hook)(struct inode *inode, struct extent_state *state, void (*set_bit_hook)(void *private_data, struct extent_state *state,
unsigned *bits); unsigned *bits);
void (*clear_bit_hook)(struct btrfs_inode *inode, void (*clear_bit_hook)(void *private_data,
struct extent_state *state, struct extent_state *state,
unsigned *bits); unsigned *bits);
void (*merge_extent_hook)(struct inode *inode, void (*merge_extent_hook)(void *private_data,
struct extent_state *new, struct extent_state *new,
struct extent_state *other); struct extent_state *other);
void (*split_extent_hook)(struct inode *inode, void (*split_extent_hook)(void *private_data,
struct extent_state *orig, u64 split); struct extent_state *orig, u64 split);
void (*check_extent_io_range)(void *private_data, const char *caller,
u64 start, u64 end);
}; };
struct extent_io_tree { struct extent_io_tree {
struct rb_root state; struct rb_root state;
struct address_space *mapping; void *private_data;
u64 dirty_bytes; u64 dirty_bytes;
int track_uptodate; int track_uptodate;
spinlock_t lock; spinlock_t lock;
...@@ -205,12 +209,46 @@ struct extent_buffer { ...@@ -205,12 +209,46 @@ struct extent_buffer {
*/ */
struct extent_changeset { struct extent_changeset {
/* How many bytes are set/cleared in this operation */ /* How many bytes are set/cleared in this operation */
u64 bytes_changed; unsigned int bytes_changed;
/* Changed ranges */ /* Changed ranges */
struct ulist range_changed; struct ulist range_changed;
}; };
static inline void extent_changeset_init(struct extent_changeset *changeset)
{
changeset->bytes_changed = 0;
ulist_init(&changeset->range_changed);
}
static inline struct extent_changeset *extent_changeset_alloc(void)
{
struct extent_changeset *ret;
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
extent_changeset_init(ret);
return ret;
}
static inline void extent_changeset_release(struct extent_changeset *changeset)
{
if (!changeset)
return;
changeset->bytes_changed = 0;
ulist_release(&changeset->range_changed);
}
static inline void extent_changeset_free(struct extent_changeset *changeset)
{
if (!changeset)
return;
extent_changeset_release(changeset);
kfree(changeset);
}
static inline void extent_set_compress_type(unsigned long *bio_flags, static inline void extent_set_compress_type(unsigned long *bio_flags,
int compress_type) int compress_type)
{ {
...@@ -230,8 +268,7 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, ...@@ -230,8 +268,7 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
u64 start, u64 len, u64 start, u64 len,
int create); int create);
void extent_io_tree_init(struct extent_io_tree *tree, void extent_io_tree_init(struct extent_io_tree *tree, void *private_data);
struct address_space *mapping);
int try_release_extent_mapping(struct extent_map_tree *map, int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page, struct extent_io_tree *tree, struct page *page,
gfp_t mask); gfp_t mask);
...@@ -459,20 +496,21 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, ...@@ -459,20 +496,21 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
u64 delalloc_end, struct page *locked_page, u64 delalloc_end, struct page *locked_page,
unsigned bits_to_clear, unsigned bits_to_clear,
unsigned long page_ops); unsigned long page_ops);
struct bio * struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
gfp_t gfp_flags); struct bio *btrfs_bio_clone(struct bio *bio);
struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs); struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
struct btrfs_fs_info; struct btrfs_fs_info;
struct btrfs_inode; struct btrfs_inode;
int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length, int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
u64 logical, struct page *page, u64 length, u64 logical, struct page *page,
unsigned int pg_offset, int mirror_num); unsigned int pg_offset, int mirror_num);
int clean_io_failure(struct btrfs_inode *inode, u64 start, int clean_io_failure(struct btrfs_fs_info *fs_info,
struct page *page, unsigned int pg_offset); struct extent_io_tree *failure_tree,
struct extent_io_tree *io_tree, u64 start,
struct page *page, u64 ino, unsigned int pg_offset);
void end_extent_writepage(struct page *page, int err, u64 start, u64 end); void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
int repair_eb_io_failure(struct btrfs_fs_info *fs_info, int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int mirror_num); struct extent_buffer *eb, int mirror_num);
...@@ -507,7 +545,9 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, ...@@ -507,7 +545,9 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
struct io_failure_record *failrec, struct io_failure_record *failrec,
struct page *page, int pg_offset, int icsum, struct page *page, int pg_offset, int icsum,
bio_end_io_t *endio_func, void *data); bio_end_io_t *endio_func, void *data);
int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec); int free_io_failure(struct extent_io_tree *failure_tree,
struct extent_io_tree *io_tree,
struct io_failure_record *rec);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
noinline u64 find_lock_delalloc_range(struct inode *inode, noinline u64 find_lock_delalloc_range(struct inode *inode,
struct extent_io_tree *tree, struct extent_io_tree *tree,
......
...@@ -164,7 +164,8 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio ...@@ -164,7 +164,8 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio
u64 logical_offset, u32 *dst, int dio) u64 logical_offset, u32 *dst, int dio)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio_vec *bvec; struct bio_vec bvec;
struct bvec_iter iter;
struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
struct btrfs_csum_item *item = NULL; struct btrfs_csum_item *item = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
...@@ -177,7 +178,7 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio ...@@ -177,7 +178,7 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio
u64 page_bytes_left; u64 page_bytes_left;
u32 diff; u32 diff;
int nblocks; int nblocks;
int count = 0, i; int count = 0;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
...@@ -206,8 +207,6 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio ...@@ -206,8 +207,6 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio
if (bio->bi_iter.bi_size > PAGE_SIZE * 8) if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
path->reada = READA_FORWARD; path->reada = READA_FORWARD;
WARN_ON(bio->bi_vcnt <= 0);
/* /*
* the free space stuff is only read when it hasn't been * the free space stuff is only read when it hasn't been
* updated in the current transaction. So, we can safely * updated in the current transaction. So, we can safely
...@@ -223,13 +222,13 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio ...@@ -223,13 +222,13 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio
if (dio) if (dio)
offset = logical_offset; offset = logical_offset;
bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment(bvec, bio, iter) {
page_bytes_left = bvec->bv_len; page_bytes_left = bvec.bv_len;
if (count) if (count)
goto next; goto next;
if (!dio) if (!dio)
offset = page_offset(bvec->bv_page) + bvec->bv_offset; offset = page_offset(bvec.bv_page) + bvec.bv_offset;
count = btrfs_find_ordered_sum(inode, offset, disk_bytenr, count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
(u32 *)csum, nblocks); (u32 *)csum, nblocks);
if (count) if (count)
...@@ -440,15 +439,15 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, ...@@ -440,15 +439,15 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
struct btrfs_ordered_sum *sums; struct btrfs_ordered_sum *sums;
struct btrfs_ordered_extent *ordered = NULL; struct btrfs_ordered_extent *ordered = NULL;
char *data; char *data;
struct bio_vec *bvec; struct bvec_iter iter;
struct bio_vec bvec;
int index; int index;
int nr_sectors; int nr_sectors;
int i, j;
unsigned long total_bytes = 0; unsigned long total_bytes = 0;
unsigned long this_sum_bytes = 0; unsigned long this_sum_bytes = 0;
int i;
u64 offset; u64 offset;
WARN_ON(bio->bi_vcnt <= 0);
sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
GFP_NOFS); GFP_NOFS);
if (!sums) if (!sums)
...@@ -465,19 +464,19 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, ...@@ -465,19 +464,19 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
index = 0; index = 0;
bio_for_each_segment_all(bvec, bio, j) { bio_for_each_segment(bvec, bio, iter) {
if (!contig) if (!contig)
offset = page_offset(bvec->bv_page) + bvec->bv_offset; offset = page_offset(bvec.bv_page) + bvec.bv_offset;
if (!ordered) { if (!ordered) {
ordered = btrfs_lookup_ordered_extent(inode, offset); ordered = btrfs_lookup_ordered_extent(inode, offset);
BUG_ON(!ordered); /* Logic error */ BUG_ON(!ordered); /* Logic error */
} }
data = kmap_atomic(bvec->bv_page); data = kmap_atomic(bvec.bv_page);
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
bvec->bv_len + fs_info->sectorsize bvec.bv_len + fs_info->sectorsize
- 1); - 1);
for (i = 0; i < nr_sectors; i++) { for (i = 0; i < nr_sectors; i++) {
...@@ -504,12 +503,12 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, ...@@ -504,12 +503,12 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+ total_bytes; + total_bytes;
index = 0; index = 0;
data = kmap_atomic(bvec->bv_page); data = kmap_atomic(bvec.bv_page);
} }
sums->sums[index] = ~(u32)0; sums->sums[index] = ~(u32)0;
sums->sums[index] sums->sums[index]
= btrfs_csum_data(data + bvec->bv_offset = btrfs_csum_data(data + bvec.bv_offset
+ (i * fs_info->sectorsize), + (i * fs_info->sectorsize),
sums->sums[index], sums->sums[index],
fs_info->sectorsize); fs_info->sectorsize);
......
...@@ -1581,6 +1581,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1581,6 +1581,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL; struct page **pages = NULL;
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
u64 release_bytes = 0; u64 release_bytes = 0;
u64 lockstart; u64 lockstart;
u64 lockend; u64 lockend;
...@@ -1628,7 +1629,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1628,7 +1629,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
reserve_bytes = round_up(write_bytes + sector_offset, reserve_bytes = round_up(write_bytes + sector_offset,
fs_info->sectorsize); fs_info->sectorsize);
ret = btrfs_check_data_free_space(inode, pos, write_bytes); extent_changeset_release(data_reserved);
ret = btrfs_check_data_free_space(inode, &data_reserved, pos,
write_bytes);
if (ret < 0) { if (ret < 0) {
if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
BTRFS_INODE_PREALLOC)) && BTRFS_INODE_PREALLOC)) &&
...@@ -1657,8 +1660,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1657,8 +1660,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
reserve_bytes); reserve_bytes);
if (ret) { if (ret) {
if (!only_release_metadata) if (!only_release_metadata)
btrfs_free_reserved_data_space(inode, pos, btrfs_free_reserved_data_space(inode,
write_bytes); data_reserved, pos,
write_bytes);
else else
btrfs_end_write_no_snapshoting(root); btrfs_end_write_no_snapshoting(root);
break; break;
...@@ -1740,8 +1744,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1740,8 +1744,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
__pos = round_down(pos, __pos = round_down(pos,
fs_info->sectorsize) + fs_info->sectorsize) +
(dirty_pages << PAGE_SHIFT); (dirty_pages << PAGE_SHIFT);
btrfs_delalloc_release_space(inode, __pos, btrfs_delalloc_release_space(inode,
release_bytes); data_reserved, __pos,
release_bytes);
} }
} }
...@@ -1796,12 +1801,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1796,12 +1801,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
btrfs_delalloc_release_metadata(BTRFS_I(inode), btrfs_delalloc_release_metadata(BTRFS_I(inode),
release_bytes); release_bytes);
} else { } else {
btrfs_delalloc_release_space(inode, btrfs_delalloc_release_space(inode, data_reserved,
round_down(pos, fs_info->sectorsize), round_down(pos, fs_info->sectorsize),
release_bytes); release_bytes);
} }
} }
extent_changeset_free(data_reserved);
return num_written ? num_written : ret; return num_written ? num_written : ret;
} }
...@@ -2405,10 +2411,13 @@ static int fill_holes(struct btrfs_trans_handle *trans, ...@@ -2405,10 +2411,13 @@ static int fill_holes(struct btrfs_trans_handle *trans,
*/ */
static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len) static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map *em; struct extent_map *em;
int ret = 0; int ret = 0;
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, *start, *len, 0); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
round_down(*start, fs_info->sectorsize),
round_up(*len, fs_info->sectorsize), 0);
if (IS_ERR(em)) if (IS_ERR(em))
return PTR_ERR(em); return PTR_ERR(em);
...@@ -2784,6 +2793,7 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -2784,6 +2793,7 @@ static long btrfs_fallocate(struct file *file, int mode,
{ {
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
struct falloc_range *range; struct falloc_range *range;
struct falloc_range *tmp; struct falloc_range *tmp;
struct list_head reserve_list; struct list_head reserve_list;
...@@ -2913,8 +2923,8 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -2913,8 +2923,8 @@ static long btrfs_fallocate(struct file *file, int mode,
free_extent_map(em); free_extent_map(em);
break; break;
} }
ret = btrfs_qgroup_reserve_data(inode, cur_offset, ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
last_byte - cur_offset); cur_offset, last_byte - cur_offset);
if (ret < 0) { if (ret < 0) {
free_extent_map(em); free_extent_map(em);
break; break;
...@@ -2925,8 +2935,8 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -2925,8 +2935,8 @@ static long btrfs_fallocate(struct file *file, int mode,
* range, free reserved data space first, otherwise * range, free reserved data space first, otherwise
* it'll result in false ENOSPC error. * it'll result in false ENOSPC error.
*/ */
btrfs_free_reserved_data_space(inode, cur_offset, btrfs_free_reserved_data_space(inode, data_reserved,
last_byte - cur_offset); cur_offset, last_byte - cur_offset);
} }
free_extent_map(em); free_extent_map(em);
cur_offset = last_byte; cur_offset = last_byte;
...@@ -2945,8 +2955,9 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -2945,8 +2955,9 @@ static long btrfs_fallocate(struct file *file, int mode,
range->len, i_blocksize(inode), range->len, i_blocksize(inode),
offset + len, &alloc_hint); offset + len, &alloc_hint);
else else
btrfs_free_reserved_data_space(inode, range->start, btrfs_free_reserved_data_space(inode,
range->len); data_reserved, range->start,
range->len);
list_del(&range->list); list_del(&range->list);
kfree(range); kfree(range);
} }
...@@ -2984,8 +2995,9 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -2984,8 +2995,9 @@ static long btrfs_fallocate(struct file *file, int mode,
inode_unlock(inode); inode_unlock(inode);
/* Let go of our reservation. */ /* Let go of our reservation. */
if (ret != 0) if (ret != 0)
btrfs_free_reserved_data_space(inode, alloc_start, btrfs_free_reserved_data_space(inode, data_reserved,
alloc_end - cur_offset); alloc_start, alloc_end - cur_offset);
extent_changeset_free(data_reserved);
return ret; return ret;
} }
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/vmalloc.h> #include <linux/sched/mm.h>
#include "ctree.h" #include "ctree.h"
#include "disk-io.h" #include "disk-io.h"
#include "locking.h" #include "locking.h"
...@@ -153,21 +153,21 @@ static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize) ...@@ -153,21 +153,21 @@ static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize)
static u8 *alloc_bitmap(u32 bitmap_size) static u8 *alloc_bitmap(u32 bitmap_size)
{ {
void *mem; u8 *ret;
unsigned int nofs_flag;
/* /*
* The allocation size varies, observed numbers were < 4K up to 16K. * GFP_NOFS doesn't work with kvmalloc(), but we really can't recurse
* Using vmalloc unconditionally would be too heavy, we'll try * into the filesystem as the free space bitmap can be modified in the
* contiguous allocations first. * critical section of a transaction commit.
*
* TODO: push the memalloc_nofs_{save,restore}() to the caller where we
* know that recursion is unsafe.
*/ */
if (bitmap_size <= PAGE_SIZE) nofs_flag = memalloc_nofs_save();
return kzalloc(bitmap_size, GFP_NOFS); ret = kvzalloc(bitmap_size, GFP_KERNEL);
memalloc_nofs_restore(nofs_flag);
mem = kzalloc(bitmap_size, GFP_NOFS | __GFP_NOWARN); return ret;
if (mem)
return mem;
return __vmalloc(bitmap_size, GFP_NOFS | __GFP_ZERO, PAGE_KERNEL);
} }
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
...@@ -1188,11 +1188,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) ...@@ -1188,11 +1188,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
ret = btrfs_commit_transaction(trans); return btrfs_commit_transaction(trans);
if (ret)
return ret;
return 0;
abort: abort:
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
...@@ -1277,11 +1273,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info) ...@@ -1277,11 +1273,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
free_extent_buffer(free_space_root->commit_root); free_extent_buffer(free_space_root->commit_root);
kfree(free_space_root); kfree(free_space_root);
ret = btrfs_commit_transaction(trans); return btrfs_commit_transaction(trans);
if (ret)
return ret;
return 0;
abort: abort:
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
......
...@@ -400,6 +400,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ...@@ -400,6 +400,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
struct btrfs_path *path; struct btrfs_path *path;
struct inode *inode; struct inode *inode;
struct btrfs_block_rsv *rsv; struct btrfs_block_rsv *rsv;
struct extent_changeset *data_reserved = NULL;
u64 num_bytes; u64 num_bytes;
u64 alloc_hint = 0; u64 alloc_hint = 0;
int ret; int ret;
...@@ -492,7 +493,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ...@@ -492,7 +493,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
/* Just to make sure we have enough space */ /* Just to make sure we have enough space */
prealloc += 8 * PAGE_SIZE; prealloc += 8 * PAGE_SIZE;
ret = btrfs_delalloc_reserve_space(inode, 0, prealloc); ret = btrfs_delalloc_reserve_space(inode, &data_reserved, 0, prealloc);
if (ret) if (ret)
goto out_put; goto out_put;
...@@ -516,6 +517,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ...@@ -516,6 +517,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
trans->bytes_reserved = num_bytes; trans->bytes_reserved = num_bytes;
btrfs_free_path(path); btrfs_free_path(path);
extent_changeset_free(data_reserved);
return ret; return ret;
} }
......
此差异已折叠。
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include <linux/bit_spinlock.h> #include <linux/bit_spinlock.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/xattr.h> #include <linux/xattr.h>
#include <linux/vmalloc.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/uuid.h> #include <linux/uuid.h>
...@@ -689,7 +689,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, ...@@ -689,7 +689,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
if (ret) if (ret)
goto dec_and_free; goto dec_and_free;
btrfs_wait_ordered_extents(root, -1, 0, (u64)-1); btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
btrfs_init_block_rsv(&pending_snapshot->block_rsv, btrfs_init_block_rsv(&pending_snapshot->block_rsv,
BTRFS_BLOCK_RSV_TEMP); BTRFS_BLOCK_RSV_TEMP);
...@@ -1127,6 +1127,7 @@ static int cluster_pages_for_defrag(struct inode *inode, ...@@ -1127,6 +1127,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
struct extent_io_tree *tree; struct extent_io_tree *tree;
struct extent_changeset *data_reserved = NULL;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
file_end = (isize - 1) >> PAGE_SHIFT; file_end = (isize - 1) >> PAGE_SHIFT;
...@@ -1135,7 +1136,7 @@ static int cluster_pages_for_defrag(struct inode *inode, ...@@ -1135,7 +1136,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
ret = btrfs_delalloc_reserve_space(inode, ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
start_index << PAGE_SHIFT, start_index << PAGE_SHIFT,
page_cnt << PAGE_SHIFT); page_cnt << PAGE_SHIFT);
if (ret) if (ret)
...@@ -1226,7 +1227,7 @@ static int cluster_pages_for_defrag(struct inode *inode, ...@@ -1226,7 +1227,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
spin_lock(&BTRFS_I(inode)->lock); spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++; BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock); spin_unlock(&BTRFS_I(inode)->lock);
btrfs_delalloc_release_space(inode, btrfs_delalloc_release_space(inode, data_reserved,
start_index << PAGE_SHIFT, start_index << PAGE_SHIFT,
(page_cnt - i_done) << PAGE_SHIFT); (page_cnt - i_done) << PAGE_SHIFT);
} }
...@@ -1247,15 +1248,17 @@ static int cluster_pages_for_defrag(struct inode *inode, ...@@ -1247,15 +1248,17 @@ static int cluster_pages_for_defrag(struct inode *inode,
unlock_page(pages[i]); unlock_page(pages[i]);
put_page(pages[i]); put_page(pages[i]);
} }
extent_changeset_free(data_reserved);
return i_done; return i_done;
out: out:
for (i = 0; i < i_done; i++) { for (i = 0; i < i_done; i++) {
unlock_page(pages[i]); unlock_page(pages[i]);
put_page(pages[i]); put_page(pages[i]);
} }
btrfs_delalloc_release_space(inode, btrfs_delalloc_release_space(inode, data_reserved,
start_index << PAGE_SHIFT, start_index << PAGE_SHIFT,
page_cnt << PAGE_SHIFT); page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return ret; return ret;
} }
...@@ -4588,7 +4591,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, ...@@ -4588,7 +4591,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
out: out:
btrfs_free_path(path); btrfs_free_path(path);
vfree(inodes); kvfree(inodes);
kfree(loi); kfree(loi);
return ret; return ret;
...@@ -4897,7 +4900,6 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) ...@@ -4897,7 +4900,6 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
goto out; goto out;
} }
/* FIXME: check if the IDs really exist */
if (sa->assign) { if (sa->assign) {
ret = btrfs_add_qgroup_relation(trans, fs_info, ret = btrfs_add_qgroup_relation(trans, fs_info,
sa->src, sa->dst); sa->src, sa->dst);
...@@ -4956,7 +4958,6 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg) ...@@ -4956,7 +4958,6 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
goto out; goto out;
} }
/* FIXME: check if the IDs really exist */
if (sa->create) { if (sa->create) {
ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid); ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid);
} else { } else {
...@@ -5010,7 +5011,6 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg) ...@@ -5010,7 +5011,6 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
qgroupid = root->root_key.objectid; qgroupid = root->root_key.objectid;
} }
/* FIXME: check if the IDs really exist */
ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim); ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim);
err = btrfs_end_transaction(trans); err = btrfs_end_transaction(trans);
......
...@@ -18,13 +18,14 @@ ...@@ -18,13 +18,14 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/mm.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/lzo.h> #include <linux/lzo.h>
#include <linux/refcount.h>
#include "compression.h" #include "compression.h"
#define LZO_LEN 4 #define LZO_LEN 4
...@@ -40,9 +41,9 @@ static void lzo_free_workspace(struct list_head *ws) ...@@ -40,9 +41,9 @@ static void lzo_free_workspace(struct list_head *ws)
{ {
struct workspace *workspace = list_entry(ws, struct workspace, list); struct workspace *workspace = list_entry(ws, struct workspace, list);
vfree(workspace->buf); kvfree(workspace->buf);
vfree(workspace->cbuf); kvfree(workspace->cbuf);
vfree(workspace->mem); kvfree(workspace->mem);
kfree(workspace); kfree(workspace);
} }
...@@ -50,13 +51,13 @@ static struct list_head *lzo_alloc_workspace(void) ...@@ -50,13 +51,13 @@ static struct list_head *lzo_alloc_workspace(void)
{ {
struct workspace *workspace; struct workspace *workspace;
workspace = kzalloc(sizeof(*workspace), GFP_NOFS); workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
if (!workspace) if (!workspace)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
workspace->mem = vmalloc(LZO1X_MEM_COMPRESS); workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); workspace->buf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); workspace->cbuf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
if (!workspace->mem || !workspace->buf || !workspace->cbuf) if (!workspace->mem || !workspace->buf || !workspace->cbuf)
goto fail; goto fail;
...@@ -141,7 +142,7 @@ static int lzo_compress_pages(struct list_head *ws, ...@@ -141,7 +142,7 @@ static int lzo_compress_pages(struct list_head *ws,
ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
&out_len, workspace->mem); &out_len, workspace->mem);
if (ret != LZO_E_OK) { if (ret != LZO_E_OK) {
pr_debug("BTRFS: deflate in loop returned %d\n", pr_debug("BTRFS: lzo in loop returned %d\n",
ret); ret);
ret = -EIO; ret = -EIO;
goto out; goto out;
...@@ -229,8 +230,10 @@ static int lzo_compress_pages(struct list_head *ws, ...@@ -229,8 +230,10 @@ static int lzo_compress_pages(struct list_head *ws,
in_len = min(bytes_left, PAGE_SIZE); in_len = min(bytes_left, PAGE_SIZE);
} }
if (tot_out > tot_in) if (tot_out >= tot_in) {
ret = -E2BIG;
goto out; goto out;
}
/* store the size of all chunks of compressed data */ /* store the size of all chunks of compressed data */
cpage_out = kmap(pages[0]); cpage_out = kmap(pages[0]);
...@@ -254,16 +257,13 @@ static int lzo_compress_pages(struct list_head *ws, ...@@ -254,16 +257,13 @@ static int lzo_compress_pages(struct list_head *ws,
return ret; return ret;
} }
static int lzo_decompress_bio(struct list_head *ws, static int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
struct page **pages_in,
u64 disk_start,
struct bio *orig_bio,
size_t srclen)
{ {
struct workspace *workspace = list_entry(ws, struct workspace, list); struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0, ret2; int ret = 0, ret2;
char *data_in; char *data_in;
unsigned long page_in_index = 0; unsigned long page_in_index = 0;
size_t srclen = cb->compressed_len;
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE); unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start; unsigned long buf_start;
unsigned long buf_offset = 0; unsigned long buf_offset = 0;
...@@ -278,6 +278,9 @@ static int lzo_decompress_bio(struct list_head *ws, ...@@ -278,6 +278,9 @@ static int lzo_decompress_bio(struct list_head *ws,
unsigned long tot_len; unsigned long tot_len;
char *buf; char *buf;
bool may_late_unmap, need_unmap; bool may_late_unmap, need_unmap;
struct page **pages_in = cb->compressed_pages;
u64 disk_start = cb->start;
struct bio *orig_bio = cb->orig_bio;
data_in = kmap(pages_in[0]); data_in = kmap(pages_in[0]);
tot_len = read_compress_length(data_in); tot_len = read_compress_length(data_in);
......
...@@ -663,7 +663,7 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work) ...@@ -663,7 +663,7 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
* wait for all the ordered extents in a root. This is done when balancing * wait for all the ordered extents in a root. This is done when balancing
* space between drives. * space between drives.
*/ */
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len) const u64 range_start, const u64 range_len)
{ {
struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_fs_info *fs_info = root->fs_info;
...@@ -671,7 +671,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, ...@@ -671,7 +671,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
LIST_HEAD(skipped); LIST_HEAD(skipped);
LIST_HEAD(works); LIST_HEAD(works);
struct btrfs_ordered_extent *ordered, *next; struct btrfs_ordered_extent *ordered, *next;
int count = 0; u64 count = 0;
const u64 range_end = range_start + range_len; const u64 range_end = range_start + range_len;
mutex_lock(&root->ordered_extent_mutex); mutex_lock(&root->ordered_extent_mutex);
...@@ -701,7 +701,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, ...@@ -701,7 +701,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
cond_resched(); cond_resched();
spin_lock(&root->ordered_extent_lock); spin_lock(&root->ordered_extent_lock);
if (nr != -1) if (nr != U64_MAX)
nr--; nr--;
count++; count++;
} }
...@@ -720,13 +720,13 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, ...@@ -720,13 +720,13 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
return count; return count;
} }
int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len) const u64 range_start, const u64 range_len)
{ {
struct btrfs_root *root; struct btrfs_root *root;
struct list_head splice; struct list_head splice;
int done; u64 total_done = 0;
int total_done = 0; u64 done;
INIT_LIST_HEAD(&splice); INIT_LIST_HEAD(&splice);
...@@ -748,9 +748,8 @@ int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, ...@@ -748,9 +748,8 @@ int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
total_done += done; total_done += done;
spin_lock(&fs_info->ordered_root_lock); spin_lock(&fs_info->ordered_root_lock);
if (nr != -1) { if (nr != U64_MAX) {
nr -= done; nr -= done;
WARN_ON(nr < 0);
} }
} }
list_splice_tail(&splice, &fs_info->ordered_roots); list_splice_tail(&splice, &fs_info->ordered_roots);
......
...@@ -200,9 +200,9 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, ...@@ -200,9 +200,9 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered); struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
u32 *sum, int len); u32 *sum, int len);
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len); const u64 range_start, const u64 range_len);
int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len); const u64 range_start, const u64 range_len);
void btrfs_get_logged_extents(struct btrfs_inode *inode, void btrfs_get_logged_extents(struct btrfs_inode *inode,
struct list_head *logged_list, struct list_head *logged_list,
......
...@@ -261,8 +261,11 @@ void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l) ...@@ -261,8 +261,11 @@ void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l)
case BTRFS_BLOCK_GROUP_ITEM_KEY: case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i, bi = btrfs_item_ptr(l, i,
struct btrfs_block_group_item); struct btrfs_block_group_item);
pr_info("\t\tblock group used %llu\n", pr_info(
btrfs_disk_block_group_used(l, bi)); "\t\tblock group used %llu chunk_objectid %llu flags %llu\n",
btrfs_disk_block_group_used(l, bi),
btrfs_disk_block_group_chunk_objectid(l, bi),
btrfs_disk_block_group_flags(l, bi));
break; break;
case BTRFS_CHUNK_ITEM_KEY: case BTRFS_CHUNK_ITEM_KEY:
print_chunk(l, btrfs_item_ptr(l, i, print_chunk(l, btrfs_item_ptr(l, i,
......
...@@ -164,6 +164,7 @@ static int iterate_object_props(struct btrfs_root *root, ...@@ -164,6 +164,7 @@ static int iterate_object_props(struct btrfs_root *root,
size_t), size_t),
void *ctx) void *ctx)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int ret; int ret;
char *name_buf = NULL; char *name_buf = NULL;
char *value_buf = NULL; char *value_buf = NULL;
...@@ -214,6 +215,12 @@ static int iterate_object_props(struct btrfs_root *root, ...@@ -214,6 +215,12 @@ static int iterate_object_props(struct btrfs_root *root,
name_ptr = (unsigned long)(di + 1); name_ptr = (unsigned long)(di + 1);
data_ptr = name_ptr + name_len; data_ptr = name_ptr + name_len;
if (verify_dir_item(fs_info, leaf,
path->slots[0], di)) {
ret = -EIO;
goto out;
}
if (name_len <= XATTR_BTRFS_PREFIX_LEN || if (name_len <= XATTR_BTRFS_PREFIX_LEN ||
memcmp_extent_buffer(leaf, XATTR_BTRFS_PREFIX, memcmp_extent_buffer(leaf, XATTR_BTRFS_PREFIX,
name_ptr, name_ptr,
......
...@@ -1406,38 +1406,6 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, ...@@ -1406,38 +1406,6 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
return ret; return ret;
} }
int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_qgroup_extent_record *record;
struct btrfs_delayed_ref_root *delayed_refs;
struct rb_node *node;
u64 qgroup_to_skip;
int ret = 0;
delayed_refs = &trans->transaction->delayed_refs;
qgroup_to_skip = delayed_refs->qgroup_to_skip;
/*
* No need to do lock, since this function will only be called in
* btrfs_commit_transaction().
*/
node = rb_first(&delayed_refs->dirty_extent_root);
while (node) {
record = rb_entry(node, struct btrfs_qgroup_extent_record,
node);
if (WARN_ON(!record->old_roots))
ret = btrfs_find_all_roots(NULL, fs_info,
record->bytenr, 0, &record->old_roots);
if (ret < 0)
break;
if (qgroup_to_skip)
ulist_del(record->old_roots, qgroup_to_skip, 0);
node = rb_next(node);
}
return ret;
}
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record) struct btrfs_qgroup_extent_record *record)
...@@ -1559,6 +1527,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, ...@@ -1559,6 +1527,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
if (ret) if (ret)
return ret; return ret;
} }
cond_resched();
return 0; return 0;
} }
...@@ -1918,6 +1887,35 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info, ...@@ -1918,6 +1887,35 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
return 0; return 0;
} }
/*
* Check if the @roots potentially is a list of fs tree roots
*
* Return 0 for definitely not a fs/subvol tree roots ulist
* Return 1 for possible fs/subvol tree roots in the list (considering an empty
* one as well)
*/
static int maybe_fs_roots(struct ulist *roots)
{
struct ulist_node *unode;
struct ulist_iterator uiter;
/* Empty one, still possible for fs roots */
if (!roots || roots->nnodes == 0)
return 1;
ULIST_ITER_INIT(&uiter);
unode = ulist_next(roots, &uiter);
if (!unode)
return 1;
/*
* If it contains fs tree roots, then it must belong to fs/subvol
* trees.
* If it contains a non-fs tree, it won't be shared with fs/subvol trees.
*/
return is_fstree(unode->val);
}
int int
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, struct btrfs_fs_info *fs_info,
...@@ -1934,10 +1932,20 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, ...@@ -1934,10 +1932,20 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return 0; return 0;
if (new_roots) if (new_roots) {
if (!maybe_fs_roots(new_roots))
goto out_free;
nr_new_roots = new_roots->nnodes; nr_new_roots = new_roots->nnodes;
if (old_roots) }
if (old_roots) {
if (!maybe_fs_roots(old_roots))
goto out_free;
nr_old_roots = old_roots->nnodes; nr_old_roots = old_roots->nnodes;
}
/* Quick exit, either not fs tree roots, or won't affect any qgroup */
if (nr_old_roots == 0 && nr_new_roots == 0)
goto out_free;
BUG_ON(!fs_info->quota_root); BUG_ON(!fs_info->quota_root);
...@@ -2016,6 +2024,19 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans, ...@@ -2016,6 +2024,19 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
trace_btrfs_qgroup_account_extents(fs_info, record); trace_btrfs_qgroup_account_extents(fs_info, record);
if (!ret) { if (!ret) {
/*
* Old roots should be searched when inserting qgroup
* extent record
*/
if (WARN_ON(!record->old_roots)) {
/* Search commit root to find old_roots */
ret = btrfs_find_all_roots(NULL, fs_info,
record->bytenr, 0,
&record->old_roots);
if (ret < 0)
goto cleanup;
}
/* /*
* Use SEQ_LAST as time_seq to do special search, which * Use SEQ_LAST as time_seq to do special search, which
* doesn't lock tree or delayed_refs and search current * doesn't lock tree or delayed_refs and search current
...@@ -2025,8 +2046,11 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans, ...@@ -2025,8 +2046,11 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
record->bytenr, SEQ_LAST, &new_roots); record->bytenr, SEQ_LAST, &new_roots);
if (ret < 0) if (ret < 0)
goto cleanup; goto cleanup;
if (qgroup_to_skip) if (qgroup_to_skip) {
ulist_del(new_roots, qgroup_to_skip, 0); ulist_del(new_roots, qgroup_to_skip, 0);
ulist_del(record->old_roots, qgroup_to_skip,
0);
}
ret = btrfs_qgroup_account_extent(trans, fs_info, ret = btrfs_qgroup_account_extent(trans, fs_info,
record->bytenr, record->num_bytes, record->bytenr, record->num_bytes,
record->old_roots, new_roots); record->old_roots, new_roots);
...@@ -2338,6 +2362,11 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce) ...@@ -2338,6 +2362,11 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
if (num_bytes == 0) if (num_bytes == 0)
return 0; return 0;
if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
capable(CAP_SYS_RESOURCE))
enforce = false;
retry: retry:
spin_lock(&fs_info->qgroup_lock); spin_lock(&fs_info->qgroup_lock);
quota_root = fs_info->quota_root; quota_root = fs_info->quota_root;
...@@ -2376,7 +2405,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce) ...@@ -2376,7 +2405,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
ret = btrfs_start_delalloc_inodes(root, 0); ret = btrfs_start_delalloc_inodes(root, 0);
if (ret) if (ret)
return ret; return ret;
btrfs_wait_ordered_extents(root, -1, 0, (u64)-1); btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
trans = btrfs_join_transaction(root); trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) if (IS_ERR(trans))
return PTR_ERR(trans); return PTR_ERR(trans);
...@@ -2806,55 +2835,130 @@ btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) ...@@ -2806,55 +2835,130 @@ btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
* Return <0 for error (including -EQUOT) * Return <0 for error (including -EQUOT)
* *
* NOTE: this function may sleep for memory allocation. * NOTE: this function may sleep for memory allocation.
* if btrfs_qgroup_reserve_data() is called multiple times with
* same @reserved, caller must ensure when error happens it's OK
* to free *ALL* reserved space.
*/ */
int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len) int btrfs_qgroup_reserve_data(struct inode *inode,
struct extent_changeset **reserved_ret, u64 start,
u64 len)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_changeset changeset;
struct ulist_node *unode; struct ulist_node *unode;
struct ulist_iterator uiter; struct ulist_iterator uiter;
struct extent_changeset *reserved;
u64 orig_reserved;
u64 to_reserve;
int ret; int ret;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
!is_fstree(root->objectid) || len == 0) !is_fstree(root->objectid) || len == 0)
return 0; return 0;
changeset.bytes_changed = 0; /* @reserved parameter is mandatory for qgroup */
ulist_init(&changeset.range_changed); if (WARN_ON(!reserved_ret))
return -EINVAL;
if (!*reserved_ret) {
*reserved_ret = extent_changeset_alloc();
if (!*reserved_ret)
return -ENOMEM;
}
reserved = *reserved_ret;
/* Record already reserved space */
orig_reserved = reserved->bytes_changed;
ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start, ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
start + len -1, EXTENT_QGROUP_RESERVED, &changeset); start + len -1, EXTENT_QGROUP_RESERVED, reserved);
/* Newly reserved space */
to_reserve = reserved->bytes_changed - orig_reserved;
trace_btrfs_qgroup_reserve_data(inode, start, len, trace_btrfs_qgroup_reserve_data(inode, start, len,
changeset.bytes_changed, to_reserve, QGROUP_RESERVE);
QGROUP_RESERVE);
if (ret < 0) if (ret < 0)
goto cleanup; goto cleanup;
ret = qgroup_reserve(root, changeset.bytes_changed, true); ret = qgroup_reserve(root, to_reserve, true);
if (ret < 0) if (ret < 0)
goto cleanup; goto cleanup;
ulist_release(&changeset.range_changed);
return ret; return ret;
cleanup: cleanup:
/* cleanup already reserved ranges */ /* cleanup *ALL* already reserved ranges */
ULIST_ITER_INIT(&uiter); ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(&changeset.range_changed, &uiter))) while ((unode = ulist_next(&reserved->range_changed, &uiter)))
clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val, clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL, unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL,
GFP_NOFS); GFP_NOFS);
ulist_release(&changeset.range_changed); extent_changeset_release(reserved);
return ret;
}
/* Free ranges specified by @reserved, normally in error path */
static int qgroup_free_reserved_data(struct inode *inode,
struct extent_changeset *reserved, u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct ulist_node *unode;
struct ulist_iterator uiter;
struct extent_changeset changeset;
int freed = 0;
int ret;
extent_changeset_init(&changeset);
len = round_up(start + len, root->fs_info->sectorsize);
start = round_down(start, root->fs_info->sectorsize);
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
u64 range_start = unode->val;
/* unode->aux is the inclusive end */
u64 range_len = unode->aux - range_start + 1;
u64 free_start;
u64 free_len;
extent_changeset_release(&changeset);
/* Only free range in range [start, start + len) */
if (range_start >= start + len ||
range_start + range_len <= start)
continue;
free_start = max(range_start, start);
free_len = min(start + len, range_start + range_len) -
free_start;
/*
* TODO: To also modify reserved->ranges_reserved to reflect
* the modification.
*
* However as long as we free qgroup reserved according to
* EXTENT_QGROUP_RESERVED, we won't double free.
* So not need to rush.
*/
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
free_start, free_start + free_len - 1,
EXTENT_QGROUP_RESERVED, &changeset);
if (ret < 0)
goto out;
freed += changeset.bytes_changed;
}
btrfs_qgroup_free_refroot(root->fs_info, root->objectid, freed);
ret = freed;
out:
extent_changeset_release(&changeset);
return ret; return ret;
} }
static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len, static int __btrfs_qgroup_release_data(struct inode *inode,
int free) struct extent_changeset *reserved, u64 start, u64 len,
int free)
{ {
struct extent_changeset changeset; struct extent_changeset changeset;
int trace_op = QGROUP_RELEASE; int trace_op = QGROUP_RELEASE;
int ret; int ret;
changeset.bytes_changed = 0; /* In release case, we shouldn't have @reserved */
ulist_init(&changeset.range_changed); WARN_ON(!free && reserved);
if (free && reserved)
return qgroup_free_reserved_data(inode, reserved, start, len);
extent_changeset_init(&changeset);
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
start + len -1, EXTENT_QGROUP_RESERVED, &changeset); start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
if (ret < 0) if (ret < 0)
...@@ -2868,8 +2972,9 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len, ...@@ -2868,8 +2972,9 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
BTRFS_I(inode)->root->objectid, BTRFS_I(inode)->root->objectid,
changeset.bytes_changed); changeset.bytes_changed);
ret = changeset.bytes_changed;
out: out:
ulist_release(&changeset.range_changed); extent_changeset_release(&changeset);
return ret; return ret;
} }
...@@ -2878,14 +2983,17 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len, ...@@ -2878,14 +2983,17 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
* *
* Should be called when a range of pages get invalidated before reaching disk. * Should be called when a range of pages get invalidated before reaching disk.
* Or for error cleanup case. * Or for error cleanup case.
* if @reserved is given, only reserved range in [@start, @start + @len) will
* be freed.
* *
* For data written to disk, use btrfs_qgroup_release_data(). * For data written to disk, use btrfs_qgroup_release_data().
* *
* NOTE: This function may sleep for memory allocation. * NOTE: This function may sleep for memory allocation.
*/ */
int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len) int btrfs_qgroup_free_data(struct inode *inode,
struct extent_changeset *reserved, u64 start, u64 len)
{ {
return __btrfs_qgroup_release_data(inode, start, len, 1); return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
} }
/* /*
...@@ -2905,7 +3013,7 @@ int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len) ...@@ -2905,7 +3013,7 @@ int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len)
*/ */
int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len) int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
{ {
return __btrfs_qgroup_release_data(inode, start, len, 0); return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
} }
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
...@@ -2969,8 +3077,7 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode) ...@@ -2969,8 +3077,7 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
struct ulist_iterator iter; struct ulist_iterator iter;
int ret; int ret;
changeset.bytes_changed = 0; extent_changeset_init(&changeset);
ulist_init(&changeset.range_changed);
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
EXTENT_QGROUP_RESERVED, &changeset); EXTENT_QGROUP_RESERVED, &changeset);
...@@ -2987,5 +3094,5 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode) ...@@ -2987,5 +3094,5 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
changeset.bytes_changed); changeset.bytes_changed);
} }
ulist_release(&changeset.range_changed); extent_changeset_release(&changeset);
} }
...@@ -134,8 +134,7 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, ...@@ -134,8 +134,7 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info); int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info); void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
struct btrfs_delayed_extent_op; struct btrfs_delayed_extent_op;
int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
/* /*
* Inform qgroup to trace one dirty extent, its info is recorded in @record. * Inform qgroup to trace one dirty extent, its info is recorded in @record.
* So qgroup can account it at transaction committing time. * So qgroup can account it at transaction committing time.
...@@ -243,9 +242,11 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, ...@@ -243,9 +242,11 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
#endif #endif
/* New io_tree based accurate qgroup reserve API */ /* New io_tree based accurate qgroup reserve API */
int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len); int btrfs_qgroup_reserve_data(struct inode *inode,
struct extent_changeset **reserved, u64 start, u64 len);
int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len); int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len);
int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len); int btrfs_qgroup_free_data(struct inode *inode,
struct extent_changeset *reserved, u64 start, u64 len);
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
bool enforce); bool enforce);
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/list_sort.h> #include <linux/list_sort.h>
#include <linux/raid/xor.h> #include <linux/raid/xor.h>
#include <linux/vmalloc.h> #include <linux/mm.h>
#include <asm/div64.h> #include <asm/div64.h>
#include "ctree.h" #include "ctree.h"
#include "extent_map.h" #include "extent_map.h"
...@@ -218,12 +218,9 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) ...@@ -218,12 +218,9 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
* of a failing mount. * of a failing mount.
*/ */
table_size = sizeof(*table) + sizeof(*h) * num_entries; table_size = sizeof(*table) + sizeof(*h) * num_entries;
table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); table = kvzalloc(table_size, GFP_KERNEL);
if (!table) { if (!table)
table = vzalloc(table_size); return -ENOMEM;
if (!table)
return -ENOMEM;
}
spin_lock_init(&table->cache_lock); spin_lock_init(&table->cache_lock);
INIT_LIST_HEAD(&table->stripe_cache); INIT_LIST_HEAD(&table->stripe_cache);
...@@ -1101,10 +1098,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, ...@@ -1101,10 +1098,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
} }
/* put a new bio on the list */ /* put a new bio on the list */
bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
if (!bio)
return -ENOMEM;
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_bdev = stripe->dev->bdev; bio->bi_bdev = stripe->dev->bdev;
bio->bi_iter.bi_sector = disk_start >> 9; bio->bi_iter.bi_sector = disk_start >> 9;
......
...@@ -66,7 +66,6 @@ struct reada_extctl { ...@@ -66,7 +66,6 @@ struct reada_extctl {
struct reada_extent { struct reada_extent {
u64 logical; u64 logical;
struct btrfs_key top; struct btrfs_key top;
int err;
struct list_head extctl; struct list_head extctl;
int refcnt; int refcnt;
spinlock_t lock; spinlock_t lock;
......
...@@ -3093,11 +3093,12 @@ int prealloc_file_extent_cluster(struct inode *inode, ...@@ -3093,11 +3093,12 @@ int prealloc_file_extent_cluster(struct inode *inode,
u64 prealloc_start = cluster->start - offset; u64 prealloc_start = cluster->start - offset;
u64 prealloc_end = cluster->end - offset; u64 prealloc_end = cluster->end - offset;
u64 cur_offset; u64 cur_offset;
struct extent_changeset *data_reserved = NULL;
BUG_ON(cluster->start != cluster->boundary[0]); BUG_ON(cluster->start != cluster->boundary[0]);
inode_lock(inode); inode_lock(inode);
ret = btrfs_check_data_free_space(inode, prealloc_start, ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start,
prealloc_end + 1 - prealloc_start); prealloc_end + 1 - prealloc_start);
if (ret) if (ret)
goto out; goto out;
...@@ -3113,8 +3114,8 @@ int prealloc_file_extent_cluster(struct inode *inode, ...@@ -3113,8 +3114,8 @@ int prealloc_file_extent_cluster(struct inode *inode,
lock_extent(&BTRFS_I(inode)->io_tree, start, end); lock_extent(&BTRFS_I(inode)->io_tree, start, end);
num_bytes = end + 1 - start; num_bytes = end + 1 - start;
if (cur_offset < start) if (cur_offset < start)
btrfs_free_reserved_data_space(inode, cur_offset, btrfs_free_reserved_data_space(inode, data_reserved,
start - cur_offset); cur_offset, start - cur_offset);
ret = btrfs_prealloc_file_range(inode, 0, start, ret = btrfs_prealloc_file_range(inode, 0, start,
num_bytes, num_bytes, num_bytes, num_bytes,
end + 1, &alloc_hint); end + 1, &alloc_hint);
...@@ -3125,10 +3126,11 @@ int prealloc_file_extent_cluster(struct inode *inode, ...@@ -3125,10 +3126,11 @@ int prealloc_file_extent_cluster(struct inode *inode,
nr++; nr++;
} }
if (cur_offset < prealloc_end) if (cur_offset < prealloc_end)
btrfs_free_reserved_data_space(inode, cur_offset, btrfs_free_reserved_data_space(inode, data_reserved,
prealloc_end + 1 - cur_offset); cur_offset, prealloc_end + 1 - cur_offset);
out: out:
inode_unlock(inode); inode_unlock(inode);
extent_changeset_free(data_reserved);
return ret; return ret;
} }
...@@ -4269,8 +4271,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) ...@@ -4269,8 +4271,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&rc->reloc_roots); INIT_LIST_HEAD(&rc->reloc_roots);
backref_cache_init(&rc->backref_cache); backref_cache_init(&rc->backref_cache);
mapping_tree_init(&rc->reloc_root_tree); mapping_tree_init(&rc->reloc_root_tree);
extent_io_tree_init(&rc->processed_blocks, extent_io_tree_init(&rc->processed_blocks, NULL);
fs_info->btree_inode->i_mapping);
return rc; return rc;
} }
...@@ -4372,7 +4373,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) ...@@ -4372,7 +4373,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
btrfs_wait_block_group_reservations(rc->block_group); btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group); btrfs_wait_nocow_writers(rc->block_group);
btrfs_wait_ordered_roots(fs_info, -1, btrfs_wait_ordered_roots(fs_info, U64_MAX,
rc->block_group->key.objectid, rc->block_group->key.objectid,
rc->block_group->key.offset); rc->block_group->key.offset);
......
...@@ -390,6 +390,13 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, ...@@ -390,6 +390,13 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid); WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid);
WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len); WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len);
ptr = (unsigned long)(ref + 1); ptr = (unsigned long)(ref + 1);
ret = btrfs_is_name_len_valid(leaf, path->slots[0], ptr,
name_len);
if (!ret) {
err = -EIO;
goto out;
}
WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len)); WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len));
*sequence = btrfs_root_ref_sequence(leaf, ref); *sequence = btrfs_root_ref_sequence(leaf, ref);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -87,7 +87,7 @@ static int test_find_delalloc(u32 sectorsize) ...@@ -87,7 +87,7 @@ static int test_find_delalloc(u32 sectorsize)
return -ENOMEM; return -ENOMEM;
} }
extent_io_tree_init(&tmp, &inode->i_data); extent_io_tree_init(&tmp, inode);
/* /*
* First go through and create and mark all of our pages dirty, we pin * First go through and create and mark all of our pages dirty, we pin
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -336,7 +336,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) ...@@ -336,7 +336,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
u32 this_len = sizeof(*di) + name_len + data_len; u32 this_len = sizeof(*di) + name_len + data_len;
unsigned long name_ptr = (unsigned long)(di + 1); unsigned long name_ptr = (unsigned long)(di + 1);
if (verify_dir_item(fs_info, leaf, di)) { if (verify_dir_item(fs_info, leaf, slot, di)) {
ret = -EIO; ret = -EIO;
goto err; goto err;
} }
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册