提交 83ebade3 编写于 作者: C Chris Mason
...@@ -48,6 +48,9 @@ struct btrfs_worker_thread { ...@@ -48,6 +48,9 @@ struct btrfs_worker_thread {
/* number of things on the pending list */ /* number of things on the pending list */
atomic_t num_pending; atomic_t num_pending;
/* reference counter for this struct */
atomic_t refs;
unsigned long sequence; unsigned long sequence;
/* protects the pending list. */ /* protects the pending list. */
...@@ -93,17 +96,40 @@ static void check_busy_worker(struct btrfs_worker_thread *worker) ...@@ -93,17 +96,40 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
} }
} }
static noinline int run_ordered_completions(struct btrfs_workers *workers, static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
struct btrfs_work *work)
{ {
struct btrfs_workers *workers = worker->workers;
unsigned long flags; unsigned long flags;
rmb();
if (!workers->atomic_start_pending)
return;
spin_lock_irqsave(&workers->lock, flags);
if (!workers->atomic_start_pending)
goto out;
workers->atomic_start_pending = 0;
if (workers->num_workers >= workers->max_workers)
goto out;
spin_unlock_irqrestore(&workers->lock, flags);
btrfs_start_workers(workers, 1);
return;
out:
spin_unlock_irqrestore(&workers->lock, flags);
}
static noinline int run_ordered_completions(struct btrfs_workers *workers,
struct btrfs_work *work)
{
if (!workers->ordered) if (!workers->ordered)
return 0; return 0;
set_bit(WORK_DONE_BIT, &work->flags); set_bit(WORK_DONE_BIT, &work->flags);
spin_lock_irqsave(&workers->lock, flags); spin_lock(&workers->order_lock);
while (1) { while (1) {
if (!list_empty(&workers->prio_order_list)) { if (!list_empty(&workers->prio_order_list)) {
...@@ -126,45 +152,117 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers, ...@@ -126,45 +152,117 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
break; break;
spin_unlock_irqrestore(&workers->lock, flags); spin_unlock(&workers->order_lock);
work->ordered_func(work); work->ordered_func(work);
/* now take the lock again and call the freeing code */ /* now take the lock again and call the freeing code */
spin_lock_irqsave(&workers->lock, flags); spin_lock(&workers->order_lock);
list_del(&work->order_list); list_del(&work->order_list);
work->ordered_free(work); work->ordered_free(work);
} }
spin_unlock_irqrestore(&workers->lock, flags); spin_unlock(&workers->order_lock);
return 0; return 0;
} }
static void put_worker(struct btrfs_worker_thread *worker)
{
if (atomic_dec_and_test(&worker->refs))
kfree(worker);
}
static int try_worker_shutdown(struct btrfs_worker_thread *worker)
{
int freeit = 0;
spin_lock_irq(&worker->lock);
spin_lock_irq(&worker->workers->lock);
if (worker->workers->num_workers > 1 &&
worker->idle &&
!worker->working &&
!list_empty(&worker->worker_list) &&
list_empty(&worker->prio_pending) &&
list_empty(&worker->pending)) {
freeit = 1;
list_del_init(&worker->worker_list);
worker->workers->num_workers--;
}
spin_unlock_irq(&worker->workers->lock);
spin_unlock_irq(&worker->lock);
if (freeit)
put_worker(worker);
return freeit;
}
static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
struct list_head *prio_head,
struct list_head *head)
{
struct btrfs_work *work = NULL;
struct list_head *cur = NULL;
if(!list_empty(prio_head))
cur = prio_head->next;
smp_mb();
if (!list_empty(&worker->prio_pending))
goto refill;
if (!list_empty(head))
cur = head->next;
if (cur)
goto out;
refill:
spin_lock_irq(&worker->lock);
list_splice_tail_init(&worker->prio_pending, prio_head);
list_splice_tail_init(&worker->pending, head);
if (!list_empty(prio_head))
cur = prio_head->next;
else if (!list_empty(head))
cur = head->next;
spin_unlock_irq(&worker->lock);
if (!cur)
goto out_fail;
out:
work = list_entry(cur, struct btrfs_work, list);
out_fail:
return work;
}
/* /*
* main loop for servicing work items * main loop for servicing work items
*/ */
static int worker_loop(void *arg) static int worker_loop(void *arg)
{ {
struct btrfs_worker_thread *worker = arg; struct btrfs_worker_thread *worker = arg;
struct list_head *cur; struct list_head head;
struct list_head prio_head;
struct btrfs_work *work; struct btrfs_work *work;
INIT_LIST_HEAD(&head);
INIT_LIST_HEAD(&prio_head);
do { do {
spin_lock_irq(&worker->lock); again:
again_locked:
while (1) { while (1) {
if (!list_empty(&worker->prio_pending))
cur = worker->prio_pending.next;
else if (!list_empty(&worker->pending)) work = get_next_work(worker, &prio_head, &head);
cur = worker->pending.next; if (!work)
else
break; break;
work = list_entry(cur, struct btrfs_work, list);
list_del(&work->list); list_del(&work->list);
clear_bit(WORK_QUEUED_BIT, &work->flags); clear_bit(WORK_QUEUED_BIT, &work->flags);
work->worker = worker; work->worker = worker;
spin_unlock_irq(&worker->lock);
work->func(work); work->func(work);
...@@ -175,9 +273,13 @@ static int worker_loop(void *arg) ...@@ -175,9 +273,13 @@ static int worker_loop(void *arg)
*/ */
run_ordered_completions(worker->workers, work); run_ordered_completions(worker->workers, work);
spin_lock_irq(&worker->lock); check_pending_worker_creates(worker);
check_idle_worker(worker);
} }
spin_lock_irq(&worker->lock);
check_idle_worker(worker);
if (freezing(current)) { if (freezing(current)) {
worker->working = 0; worker->working = 0;
spin_unlock_irq(&worker->lock); spin_unlock_irq(&worker->lock);
...@@ -216,8 +318,10 @@ static int worker_loop(void *arg) ...@@ -216,8 +318,10 @@ static int worker_loop(void *arg)
spin_lock_irq(&worker->lock); spin_lock_irq(&worker->lock);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (!list_empty(&worker->pending) || if (!list_empty(&worker->pending) ||
!list_empty(&worker->prio_pending)) !list_empty(&worker->prio_pending)) {
goto again_locked; spin_unlock_irq(&worker->lock);
goto again;
}
/* /*
* this makes sure we get a wakeup when someone * this makes sure we get a wakeup when someone
...@@ -226,8 +330,13 @@ static int worker_loop(void *arg) ...@@ -226,8 +330,13 @@ static int worker_loop(void *arg)
worker->working = 0; worker->working = 0;
spin_unlock_irq(&worker->lock); spin_unlock_irq(&worker->lock);
if (!kthread_should_stop()) if (!kthread_should_stop()) {
schedule(); schedule_timeout(HZ * 120);
if (!worker->working &&
try_worker_shutdown(worker)) {
return 0;
}
}
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
} }
...@@ -242,16 +351,30 @@ int btrfs_stop_workers(struct btrfs_workers *workers) ...@@ -242,16 +351,30 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
{ {
struct list_head *cur; struct list_head *cur;
struct btrfs_worker_thread *worker; struct btrfs_worker_thread *worker;
int can_stop;
spin_lock_irq(&workers->lock);
list_splice_init(&workers->idle_list, &workers->worker_list); list_splice_init(&workers->idle_list, &workers->worker_list);
while (!list_empty(&workers->worker_list)) { while (!list_empty(&workers->worker_list)) {
cur = workers->worker_list.next; cur = workers->worker_list.next;
worker = list_entry(cur, struct btrfs_worker_thread, worker = list_entry(cur, struct btrfs_worker_thread,
worker_list); worker_list);
kthread_stop(worker->task);
list_del(&worker->worker_list); atomic_inc(&worker->refs);
kfree(worker); workers->num_workers -= 1;
if (!list_empty(&worker->worker_list)) {
list_del_init(&worker->worker_list);
put_worker(worker);
can_stop = 1;
} else
can_stop = 0;
spin_unlock_irq(&workers->lock);
if (can_stop)
kthread_stop(worker->task);
spin_lock_irq(&workers->lock);
put_worker(worker);
} }
spin_unlock_irq(&workers->lock);
return 0; return 0;
} }
...@@ -266,10 +389,13 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max) ...@@ -266,10 +389,13 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
INIT_LIST_HEAD(&workers->order_list); INIT_LIST_HEAD(&workers->order_list);
INIT_LIST_HEAD(&workers->prio_order_list); INIT_LIST_HEAD(&workers->prio_order_list);
spin_lock_init(&workers->lock); spin_lock_init(&workers->lock);
spin_lock_init(&workers->order_lock);
workers->max_workers = max; workers->max_workers = max;
workers->idle_thresh = 32; workers->idle_thresh = 32;
workers->name = name; workers->name = name;
workers->ordered = 0; workers->ordered = 0;
workers->atomic_start_pending = 0;
workers->atomic_worker_start = 0;
} }
/* /*
...@@ -293,7 +419,9 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) ...@@ -293,7 +419,9 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
INIT_LIST_HEAD(&worker->prio_pending); INIT_LIST_HEAD(&worker->prio_pending);
INIT_LIST_HEAD(&worker->worker_list); INIT_LIST_HEAD(&worker->worker_list);
spin_lock_init(&worker->lock); spin_lock_init(&worker->lock);
atomic_set(&worker->num_pending, 0); atomic_set(&worker->num_pending, 0);
atomic_set(&worker->refs, 1);
worker->workers = workers; worker->workers = workers;
worker->task = kthread_run(worker_loop, worker, worker->task = kthread_run(worker_loop, worker,
"btrfs-%s-%d", workers->name, "btrfs-%s-%d", workers->name,
...@@ -303,7 +431,6 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) ...@@ -303,7 +431,6 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
kfree(worker); kfree(worker);
goto fail; goto fail;
} }
spin_lock_irq(&workers->lock); spin_lock_irq(&workers->lock);
list_add_tail(&worker->worker_list, &workers->idle_list); list_add_tail(&worker->worker_list, &workers->idle_list);
worker->idle = 1; worker->idle = 1;
...@@ -367,28 +494,18 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) ...@@ -367,28 +494,18 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
{ {
struct btrfs_worker_thread *worker; struct btrfs_worker_thread *worker;
unsigned long flags; unsigned long flags;
struct list_head *fallback;
again: again:
spin_lock_irqsave(&workers->lock, flags); spin_lock_irqsave(&workers->lock, flags);
worker = next_worker(workers); worker = next_worker(workers);
spin_unlock_irqrestore(&workers->lock, flags);
if (!worker) { if (!worker) {
spin_lock_irqsave(&workers->lock, flags);
if (workers->num_workers >= workers->max_workers) { if (workers->num_workers >= workers->max_workers) {
struct list_head *fallback = NULL; goto fallback;
/* } else if (workers->atomic_worker_start) {
* we have failed to find any workers, just workers->atomic_start_pending = 1;
* return the force one goto fallback;
*/
if (!list_empty(&workers->worker_list))
fallback = workers->worker_list.next;
if (!list_empty(&workers->idle_list))
fallback = workers->idle_list.next;
BUG_ON(!fallback);
worker = list_entry(fallback,
struct btrfs_worker_thread, worker_list);
spin_unlock_irqrestore(&workers->lock, flags);
} else { } else {
spin_unlock_irqrestore(&workers->lock, flags); spin_unlock_irqrestore(&workers->lock, flags);
/* we're below the limit, start another worker */ /* we're below the limit, start another worker */
...@@ -396,6 +513,23 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) ...@@ -396,6 +513,23 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
goto again; goto again;
} }
} }
spin_unlock_irqrestore(&workers->lock, flags);
return worker;
fallback:
fallback = NULL;
/*
* we have failed to find any workers, just
* return the first one we can find.
*/
if (!list_empty(&workers->worker_list))
fallback = workers->worker_list.next;
if (!list_empty(&workers->idle_list))
fallback = workers->idle_list.next;
BUG_ON(!fallback);
worker = list_entry(fallback,
struct btrfs_worker_thread, worker_list);
spin_unlock_irqrestore(&workers->lock, flags);
return worker; return worker;
} }
...@@ -435,9 +569,9 @@ int btrfs_requeue_work(struct btrfs_work *work) ...@@ -435,9 +569,9 @@ int btrfs_requeue_work(struct btrfs_work *work)
worker->working = 1; worker->working = 1;
} }
spin_unlock_irqrestore(&worker->lock, flags);
if (wake) if (wake)
wake_up_process(worker->task); wake_up_process(worker->task);
spin_unlock_irqrestore(&worker->lock, flags);
out: out:
return 0; return 0;
...@@ -463,14 +597,18 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) ...@@ -463,14 +597,18 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
worker = find_worker(workers); worker = find_worker(workers);
if (workers->ordered) { if (workers->ordered) {
spin_lock_irqsave(&workers->lock, flags); /*
* you're not allowed to do ordered queues from an
* interrupt handler
*/
spin_lock(&workers->order_lock);
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) { if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
list_add_tail(&work->order_list, list_add_tail(&work->order_list,
&workers->prio_order_list); &workers->prio_order_list);
} else { } else {
list_add_tail(&work->order_list, &workers->order_list); list_add_tail(&work->order_list, &workers->order_list);
} }
spin_unlock_irqrestore(&workers->lock, flags); spin_unlock(&workers->order_lock);
} else { } else {
INIT_LIST_HEAD(&work->order_list); INIT_LIST_HEAD(&work->order_list);
} }
...@@ -492,10 +630,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) ...@@ -492,10 +630,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
wake = 1; wake = 1;
worker->working = 1; worker->working = 1;
spin_unlock_irqrestore(&worker->lock, flags);
if (wake) if (wake)
wake_up_process(worker->task); wake_up_process(worker->task);
spin_unlock_irqrestore(&worker->lock, flags);
out: out:
return 0; return 0;
} }
...@@ -73,6 +73,15 @@ struct btrfs_workers { ...@@ -73,6 +73,15 @@ struct btrfs_workers {
/* force completions in the order they were queued */ /* force completions in the order they were queued */
int ordered; int ordered;
/* more workers required, but in an interrupt handler */
int atomic_start_pending;
/*
* are we allowed to sleep while starting workers or are we required
* to start them at a later time?
*/
int atomic_worker_start;
/* list with all the work threads. The workers on the idle thread /* list with all the work threads. The workers on the idle thread
* may be actively servicing jobs, but they haven't yet hit the * may be actively servicing jobs, but they haven't yet hit the
* idle thresh limit above. * idle thresh limit above.
...@@ -90,6 +99,9 @@ struct btrfs_workers { ...@@ -90,6 +99,9 @@ struct btrfs_workers {
/* lock for finding the next worker thread to queue on */ /* lock for finding the next worker thread to queue on */
spinlock_t lock; spinlock_t lock;
/* lock for the ordered lists */
spinlock_t order_lock;
/* extra name for this worker, used for current->name */ /* extra name for this worker, used for current->name */
char *name; char *name;
}; };
......
...@@ -506,10 +506,10 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -506,10 +506,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
*/ */
set_page_extent_mapped(page); set_page_extent_mapped(page);
lock_extent(tree, last_offset, end, GFP_NOFS); lock_extent(tree, last_offset, end, GFP_NOFS);
spin_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, last_offset, em = lookup_extent_mapping(em_tree, last_offset,
PAGE_CACHE_SIZE); PAGE_CACHE_SIZE);
spin_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
if (!em || last_offset < em->start || if (!em || last_offset < em->start ||
(last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
...@@ -593,11 +593,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -593,11 +593,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
em_tree = &BTRFS_I(inode)->extent_tree; em_tree = &BTRFS_I(inode)->extent_tree;
/* we need the actual starting offset of this extent in the file */ /* we need the actual starting offset of this extent in the file */
spin_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, em = lookup_extent_mapping(em_tree,
page_offset(bio->bi_io_vec->bv_page), page_offset(bio->bi_io_vec->bv_page),
PAGE_CACHE_SIZE); PAGE_CACHE_SIZE);
spin_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
compressed_len = em->block_len; compressed_len = em->block_len;
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
......
...@@ -2290,7 +2290,7 @@ extern struct file_operations btrfs_file_operations; ...@@ -2290,7 +2290,7 @@ extern struct file_operations btrfs_file_operations;
int btrfs_drop_extents(struct btrfs_trans_handle *trans, int btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode, struct btrfs_root *root, struct inode *inode,
u64 start, u64 end, u64 locked_end, u64 start, u64 end, u64 locked_end,
u64 inline_limit, u64 *hint_block); u64 inline_limit, u64 *hint_block, int drop_cache);
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct inode *inode, u64 start, u64 end); struct inode *inode, u64 start, u64 end);
......
...@@ -123,15 +123,15 @@ static struct extent_map *btree_get_extent(struct inode *inode, ...@@ -123,15 +123,15 @@ static struct extent_map *btree_get_extent(struct inode *inode,
struct extent_map *em; struct extent_map *em;
int ret; int ret;
spin_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len); em = lookup_extent_mapping(em_tree, start, len);
if (em) { if (em) {
em->bdev = em->bdev =
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
spin_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
goto out; goto out;
} }
spin_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
em = alloc_extent_map(GFP_NOFS); em = alloc_extent_map(GFP_NOFS);
if (!em) { if (!em) {
...@@ -144,7 +144,7 @@ static struct extent_map *btree_get_extent(struct inode *inode, ...@@ -144,7 +144,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
em->block_start = 0; em->block_start = 0;
em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
spin_lock(&em_tree->lock); write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em); ret = add_extent_mapping(em_tree, em);
if (ret == -EEXIST) { if (ret == -EEXIST) {
u64 failed_start = em->start; u64 failed_start = em->start;
...@@ -163,7 +163,7 @@ static struct extent_map *btree_get_extent(struct inode *inode, ...@@ -163,7 +163,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
free_extent_map(em); free_extent_map(em);
em = NULL; em = NULL;
} }
spin_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
if (ret) if (ret)
em = ERR_PTR(ret); em = ERR_PTR(ret);
...@@ -1325,9 +1325,9 @@ static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) ...@@ -1325,9 +1325,9 @@ static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
offset = page_offset(page); offset = page_offset(page);
em_tree = &BTRFS_I(inode)->extent_tree; em_tree = &BTRFS_I(inode)->extent_tree;
spin_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
spin_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
if (!em) { if (!em) {
__unplug_io_fn(bdi, page); __unplug_io_fn(bdi, page);
return; return;
...@@ -1698,7 +1698,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1698,7 +1698,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
err = -EINVAL; err = -EINVAL;
goto fail_iput; goto fail_iput;
} }
printk("thread pool is %d\n", fs_info->thread_pool_size);
/* /*
* we need to start all the end_io workers up front because the * we need to start all the end_io workers up front because the
* queue work function gets called at interrupt time, and so it * queue work function gets called at interrupt time, and so it
...@@ -1743,20 +1743,22 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1743,20 +1743,22 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->endio_workers.idle_thresh = 4; fs_info->endio_workers.idle_thresh = 4;
fs_info->endio_meta_workers.idle_thresh = 4; fs_info->endio_meta_workers.idle_thresh = 4;
fs_info->endio_write_workers.idle_thresh = 64; fs_info->endio_write_workers.idle_thresh = 2;
fs_info->endio_meta_write_workers.idle_thresh = 64; fs_info->endio_meta_write_workers.idle_thresh = 2;
fs_info->endio_workers.atomic_worker_start = 1;
fs_info->endio_meta_workers.atomic_worker_start = 1;
fs_info->endio_write_workers.atomic_worker_start = 1;
fs_info->endio_meta_write_workers.atomic_worker_start = 1;
btrfs_start_workers(&fs_info->workers, 1); btrfs_start_workers(&fs_info->workers, 1);
btrfs_start_workers(&fs_info->submit_workers, 1); btrfs_start_workers(&fs_info->submit_workers, 1);
btrfs_start_workers(&fs_info->delalloc_workers, 1); btrfs_start_workers(&fs_info->delalloc_workers, 1);
btrfs_start_workers(&fs_info->fixup_workers, 1); btrfs_start_workers(&fs_info->fixup_workers, 1);
btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); btrfs_start_workers(&fs_info->endio_workers, 1);
btrfs_start_workers(&fs_info->endio_meta_workers, btrfs_start_workers(&fs_info->endio_meta_workers, 1);
fs_info->thread_pool_size); btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
btrfs_start_workers(&fs_info->endio_meta_write_workers, btrfs_start_workers(&fs_info->endio_write_workers, 1);
fs_info->thread_pool_size);
btrfs_start_workers(&fs_info->endio_write_workers,
fs_info->thread_pool_size);
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
......
...@@ -5396,9 +5396,9 @@ static noinline int relocate_data_extent(struct inode *reloc_inode, ...@@ -5396,9 +5396,9 @@ static noinline int relocate_data_extent(struct inode *reloc_inode,
lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
while (1) { while (1) {
int ret; int ret;
spin_lock(&em_tree->lock); write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em); ret = add_extent_mapping(em_tree, em);
spin_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
if (ret != -EEXIST) { if (ret != -EEXIST) {
free_extent_map(em); free_extent_map(em);
break; break;
......
此差异已折叠。
...@@ -13,10 +13,8 @@ ...@@ -13,10 +13,8 @@
#define EXTENT_DEFRAG (1 << 6) #define EXTENT_DEFRAG (1 << 6)
#define EXTENT_DEFRAG_DONE (1 << 7) #define EXTENT_DEFRAG_DONE (1 << 7)
#define EXTENT_BUFFER_FILLED (1 << 8) #define EXTENT_BUFFER_FILLED (1 << 8)
#define EXTENT_ORDERED (1 << 9) #define EXTENT_BOUNDARY (1 << 9)
#define EXTENT_ORDERED_METADATA (1 << 10) #define EXTENT_NODATASUM (1 << 10)
#define EXTENT_BOUNDARY (1 << 11)
#define EXTENT_NODATASUM (1 << 12)
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
/* flags for bio submission */ /* flags for bio submission */
...@@ -142,6 +140,8 @@ int try_release_extent_state(struct extent_map_tree *map, ...@@ -142,6 +140,8 @@ int try_release_extent_state(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page, struct extent_io_tree *tree, struct page *page,
gfp_t mask); gfp_t mask);
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, struct extent_state **cached, gfp_t mask);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask); gfp_t mask);
...@@ -155,11 +155,12 @@ u64 count_range_bits(struct extent_io_tree *tree, ...@@ -155,11 +155,12 @@ u64 count_range_bits(struct extent_io_tree *tree,
u64 max_bytes, unsigned long bits); u64 max_bytes, unsigned long bits);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
int bits, int filled); int bits, int filled, struct extent_state *cached_state);
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask); int bits, gfp_t mask);
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int bits, int wake, int delete, gfp_t mask); int bits, int wake, int delete, struct extent_state **cached,
gfp_t mask);
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask); int bits, gfp_t mask);
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
...@@ -282,5 +283,6 @@ int extent_clear_unlock_delalloc(struct inode *inode, ...@@ -282,5 +283,6 @@ int extent_clear_unlock_delalloc(struct inode *inode,
int clear_unlock, int clear_unlock,
int clear_delalloc, int clear_dirty, int clear_delalloc, int clear_dirty,
int set_writeback, int set_writeback,
int end_writeback); int end_writeback,
int set_private2);
#endif #endif
...@@ -36,7 +36,7 @@ void extent_map_exit(void) ...@@ -36,7 +36,7 @@ void extent_map_exit(void)
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
{ {
tree->map.rb_node = NULL; tree->map.rb_node = NULL;
spin_lock_init(&tree->lock); rwlock_init(&tree->lock);
} }
/** /**
...@@ -198,6 +198,56 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next) ...@@ -198,6 +198,56 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
return 0; return 0;
} }
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
{
int ret = 0;
struct extent_map *merge = NULL;
struct rb_node *rb;
struct extent_map *em;
write_lock(&tree->lock);
em = lookup_extent_mapping(tree, start, len);
WARN_ON(em->start != start || !em);
if (!em)
goto out;
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
if (em->start != 0) {
rb = rb_prev(&em->rb_node);
if (rb)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && mergable_maps(merge, em)) {
em->start = merge->start;
em->len += merge->len;
em->block_len += merge->block_len;
em->block_start = merge->block_start;
merge->in_tree = 0;
rb_erase(&merge->rb_node, &tree->map);
free_extent_map(merge);
}
}
rb = rb_next(&em->rb_node);
if (rb)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && mergable_maps(em, merge)) {
em->len += merge->len;
em->block_len += merge->len;
rb_erase(&merge->rb_node, &tree->map);
merge->in_tree = 0;
free_extent_map(merge);
}
free_extent_map(em);
out:
write_unlock(&tree->lock);
return ret;
}
/** /**
* add_extent_mapping - add new extent map to the extent tree * add_extent_mapping - add new extent map to the extent tree
* @tree: tree to insert new map in * @tree: tree to insert new map in
...@@ -222,7 +272,6 @@ int add_extent_mapping(struct extent_map_tree *tree, ...@@ -222,7 +272,6 @@ int add_extent_mapping(struct extent_map_tree *tree,
ret = -EEXIST; ret = -EEXIST;
goto out; goto out;
} }
assert_spin_locked(&tree->lock);
rb = tree_insert(&tree->map, em->start, &em->rb_node); rb = tree_insert(&tree->map, em->start, &em->rb_node);
if (rb) { if (rb) {
ret = -EEXIST; ret = -EEXIST;
...@@ -285,7 +334,6 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, ...@@ -285,7 +334,6 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
struct rb_node *next = NULL; struct rb_node *next = NULL;
u64 end = range_end(start, len); u64 end = range_end(start, len);
assert_spin_locked(&tree->lock);
rb_node = __tree_search(&tree->map, start, &prev, &next); rb_node = __tree_search(&tree->map, start, &prev, &next);
if (!rb_node && prev) { if (!rb_node && prev) {
em = rb_entry(prev, struct extent_map, rb_node); em = rb_entry(prev, struct extent_map, rb_node);
...@@ -331,7 +379,6 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) ...@@ -331,7 +379,6 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
int ret = 0; int ret = 0;
WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
assert_spin_locked(&tree->lock);
rb_erase(&em->rb_node, &tree->map); rb_erase(&em->rb_node, &tree->map);
em->in_tree = 0; em->in_tree = 0;
return ret; return ret;
......
...@@ -31,7 +31,7 @@ struct extent_map { ...@@ -31,7 +31,7 @@ struct extent_map {
struct extent_map_tree { struct extent_map_tree {
struct rb_root map; struct rb_root map;
spinlock_t lock; rwlock_t lock;
}; };
static inline u64 extent_map_end(struct extent_map *em) static inline u64 extent_map_end(struct extent_map *em)
...@@ -59,4 +59,5 @@ struct extent_map *alloc_extent_map(gfp_t mask); ...@@ -59,4 +59,5 @@ struct extent_map *alloc_extent_map(gfp_t mask);
void free_extent_map(struct extent_map *em); void free_extent_map(struct extent_map *em);
int __init extent_map_init(void); int __init extent_map_init(void);
void extent_map_exit(void); void extent_map_exit(void);
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len);
#endif #endif
...@@ -112,8 +112,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, ...@@ -112,8 +112,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
int err = 0; int err = 0;
int i; int i;
struct inode *inode = fdentry(file)->d_inode; struct inode *inode = fdentry(file)->d_inode;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
u64 hint_byte;
u64 num_bytes; u64 num_bytes;
u64 start_pos; u64 start_pos;
u64 end_of_last_block; u64 end_of_last_block;
...@@ -125,22 +123,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, ...@@ -125,22 +123,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
root->sectorsize - 1) & ~((u64)root->sectorsize - 1); root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
end_of_last_block = start_pos + num_bytes - 1; end_of_last_block = start_pos + num_bytes - 1;
lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
trans = btrfs_join_transaction(root, 1);
if (!trans) {
err = -ENOMEM;
goto out_unlock;
}
btrfs_set_trans_block_group(trans, inode);
hint_byte = 0;
set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
/* check for reserved extents on each page, we don't want
* to reset the delalloc bit on things that already have
* extents reserved.
*/
btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
struct page *p = pages[i]; struct page *p = pages[i];
...@@ -155,9 +137,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, ...@@ -155,9 +137,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
* at this time. * at this time.
*/ */
} }
err = btrfs_end_transaction(trans, root);
out_unlock:
unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
return err; return err;
} }
...@@ -189,18 +168,18 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, ...@@ -189,18 +168,18 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
if (!split2) if (!split2)
split2 = alloc_extent_map(GFP_NOFS); split2 = alloc_extent_map(GFP_NOFS);
spin_lock(&em_tree->lock); write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len); em = lookup_extent_mapping(em_tree, start, len);
if (!em) { if (!em) {
spin_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
break; break;
} }
flags = em->flags; flags = em->flags;
if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
spin_unlock(&em_tree->lock);
if (em->start <= start && if (em->start <= start &&
(!testend || em->start + em->len >= start + len)) { (!testend || em->start + em->len >= start + len)) {
free_extent_map(em); free_extent_map(em);
write_unlock(&em_tree->lock);
break; break;
} }
if (start < em->start) { if (start < em->start) {
...@@ -210,6 +189,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, ...@@ -210,6 +189,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
start = em->start + em->len; start = em->start + em->len;
} }
free_extent_map(em); free_extent_map(em);
write_unlock(&em_tree->lock);
continue; continue;
} }
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
...@@ -260,7 +240,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, ...@@ -260,7 +240,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
free_extent_map(split); free_extent_map(split);
split = NULL; split = NULL;
} }
spin_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
/* once for us */ /* once for us */
free_extent_map(em); free_extent_map(em);
...@@ -289,7 +269,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, ...@@ -289,7 +269,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans, noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode, struct btrfs_root *root, struct inode *inode,
u64 start, u64 end, u64 locked_end, u64 start, u64 end, u64 locked_end,
u64 inline_limit, u64 *hint_byte) u64 inline_limit, u64 *hint_byte, int drop_cache)
{ {
u64 extent_end = 0; u64 extent_end = 0;
u64 search_start = start; u64 search_start = start;
...@@ -314,7 +294,8 @@ noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans, ...@@ -314,7 +294,8 @@ noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
int ret; int ret;
inline_limit = 0; inline_limit = 0;
btrfs_drop_extent_cache(inode, start, end - 1, 0); if (drop_cache)
btrfs_drop_extent_cache(inode, start, end - 1, 0);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
......
...@@ -231,7 +231,8 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, ...@@ -231,7 +231,8 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
} }
ret = btrfs_drop_extents(trans, root, inode, start, ret = btrfs_drop_extents(trans, root, inode, start,
aligned_end, aligned_end, start, &hint_byte); aligned_end, aligned_end, start,
&hint_byte, 1);
BUG_ON(ret); BUG_ON(ret);
if (isize > actual_end) if (isize > actual_end)
...@@ -240,7 +241,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, ...@@ -240,7 +241,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
inline_len, compressed_size, inline_len, compressed_size,
compressed_pages); compressed_pages);
BUG_ON(ret); BUG_ON(ret);
btrfs_drop_extent_cache(inode, start, aligned_end, 0); btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
return 0; return 0;
} }
...@@ -425,7 +426,7 @@ static noinline int compress_file_range(struct inode *inode, ...@@ -425,7 +426,7 @@ static noinline int compress_file_range(struct inode *inode,
extent_clear_unlock_delalloc(inode, extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree, &BTRFS_I(inode)->io_tree,
start, end, NULL, 1, 0, start, end, NULL, 1, 0,
0, 1, 1, 1); 0, 1, 1, 1, 0);
ret = 0; ret = 0;
goto free_pages_out; goto free_pages_out;
} }
...@@ -611,9 +612,9 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -611,9 +612,9 @@ static noinline int submit_compressed_extents(struct inode *inode,
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
while (1) { while (1) {
spin_lock(&em_tree->lock); write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em); ret = add_extent_mapping(em_tree, em);
spin_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
if (ret != -EEXIST) { if (ret != -EEXIST) {
free_extent_map(em); free_extent_map(em);
break; break;
...@@ -640,7 +641,7 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -640,7 +641,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
async_extent->start, async_extent->start,
async_extent->start + async_extent->start +
async_extent->ram_size - 1, async_extent->ram_size - 1,
NULL, 1, 1, 0, 1, 1, 0); NULL, 1, 1, 0, 1, 1, 0, 0);
ret = btrfs_submit_compressed_write(inode, ret = btrfs_submit_compressed_write(inode,
async_extent->start, async_extent->start,
...@@ -713,7 +714,7 @@ static noinline int cow_file_range(struct inode *inode, ...@@ -713,7 +714,7 @@ static noinline int cow_file_range(struct inode *inode,
extent_clear_unlock_delalloc(inode, extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree, &BTRFS_I(inode)->io_tree,
start, end, NULL, 1, 1, start, end, NULL, 1, 1,
1, 1, 1, 1); 1, 1, 1, 1, 0);
*nr_written = *nr_written + *nr_written = *nr_written +
(end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
*page_started = 1; *page_started = 1;
...@@ -747,9 +748,9 @@ static noinline int cow_file_range(struct inode *inode, ...@@ -747,9 +748,9 @@ static noinline int cow_file_range(struct inode *inode,
set_bit(EXTENT_FLAG_PINNED, &em->flags); set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (1) { while (1) {
spin_lock(&em_tree->lock); write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em); ret = add_extent_mapping(em_tree, em);
spin_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
if (ret != -EEXIST) { if (ret != -EEXIST) {
free_extent_map(em); free_extent_map(em);
break; break;
...@@ -776,11 +777,14 @@ static noinline int cow_file_range(struct inode *inode, ...@@ -776,11 +777,14 @@ static noinline int cow_file_range(struct inode *inode,
/* we're not doing compressed IO, don't unlock the first /* we're not doing compressed IO, don't unlock the first
* page (which the caller expects to stay locked), don't * page (which the caller expects to stay locked), don't
* clear any dirty bits and don't set any writeback bits * clear any dirty bits and don't set any writeback bits
*
* Do set the Private2 bit so we know this page was properly
* setup for writepage
*/ */
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
start, start + ram_size - 1, start, start + ram_size - 1,
locked_page, unlock, 1, locked_page, unlock, 1,
1, 0, 0, 0); 1, 0, 0, 0, 1);
disk_num_bytes -= cur_alloc_size; disk_num_bytes -= cur_alloc_size;
num_bytes -= cur_alloc_size; num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset; alloc_hint = ins.objectid + ins.offset;
...@@ -853,7 +857,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, ...@@ -853,7 +857,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
int limit = 10 * 1024 * 1042; int limit = 10 * 1024 * 1042;
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
EXTENT_DELALLOC, 1, 0, GFP_NOFS); EXTENT_DELALLOC, 1, 0, NULL, GFP_NOFS);
while (start < end) { while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
async_cow->inode = inode; async_cow->inode = inode;
...@@ -1080,9 +1084,9 @@ static noinline int run_delalloc_nocow(struct inode *inode, ...@@ -1080,9 +1084,9 @@ static noinline int run_delalloc_nocow(struct inode *inode,
em->bdev = root->fs_info->fs_devices->latest_bdev; em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags); set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (1) { while (1) {
spin_lock(&em_tree->lock); write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em); ret = add_extent_mapping(em_tree, em);
spin_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
if (ret != -EEXIST) { if (ret != -EEXIST) {
free_extent_map(em); free_extent_map(em);
break; break;
...@@ -1101,7 +1105,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, ...@@ -1101,7 +1105,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
cur_offset, cur_offset + num_bytes - 1, cur_offset, cur_offset + num_bytes - 1,
locked_page, 1, 1, 1, 0, 0, 0); locked_page, 1, 1, 1, 0, 0, 0, 1);
cur_offset = extent_end; cur_offset = extent_end;
if (cur_offset > end) if (cur_offset > end)
break; break;
...@@ -1374,10 +1378,8 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) ...@@ -1374,10 +1378,8 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
/* already ordered? We're done */ /* already ordered? We're done */
if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, if (PagePrivate2(page))
EXTENT_ORDERED, 0)) {
goto out; goto out;
}
ordered = btrfs_lookup_ordered_extent(inode, page_start); ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) { if (ordered) {
...@@ -1413,11 +1415,9 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) ...@@ -1413,11 +1415,9 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
struct btrfs_writepage_fixup *fixup; struct btrfs_writepage_fixup *fixup;
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end, /* this page is properly in the ordered list */
EXTENT_ORDERED, 0); if (TestClearPagePrivate2(page))
if (ret)
return 0; return 0;
if (PageChecked(page)) if (PageChecked(page))
...@@ -1455,9 +1455,19 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, ...@@ -1455,9 +1455,19 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
BUG_ON(!path); BUG_ON(!path);
path->leave_spinning = 1; path->leave_spinning = 1;
/*
* we may be replacing one extent in the tree with another.
* The new extent is pinned in the extent map, and we don't want
* to drop it from the cache until it is completely in the btree.
*
* So, tell btrfs_drop_extents to leave this extent in the cache.
* the caller is expected to unpin it and allow it to be merged
* with the others.
*/
ret = btrfs_drop_extents(trans, root, inode, file_pos, ret = btrfs_drop_extents(trans, root, inode, file_pos,
file_pos + num_bytes, locked_end, file_pos + num_bytes, locked_end,
file_pos, &hint); file_pos, &hint, 0);
BUG_ON(ret); BUG_ON(ret);
ins.objectid = inode->i_ino; ins.objectid = inode->i_ino;
...@@ -1485,7 +1495,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, ...@@ -1485,7 +1495,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf); btrfs_mark_buffer_dirty(leaf);
inode_add_bytes(inode, num_bytes); inode_add_bytes(inode, num_bytes);
btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
ins.objectid = disk_bytenr; ins.objectid = disk_bytenr;
ins.offset = disk_num_bytes; ins.offset = disk_num_bytes;
...@@ -1596,6 +1605,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ...@@ -1596,6 +1605,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
ordered_extent->len, ordered_extent->len,
compressed, 0, 0, compressed, 0, 0,
BTRFS_FILE_EXTENT_REG); BTRFS_FILE_EXTENT_REG);
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered_extent->file_offset,
ordered_extent->len);
BUG_ON(ret); BUG_ON(ret);
} }
unlock_extent(io_tree, ordered_extent->file_offset, unlock_extent(io_tree, ordered_extent->file_offset,
...@@ -1623,6 +1635,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ...@@ -1623,6 +1635,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate) struct extent_state *state, int uptodate)
{ {
ClearPagePrivate2(page);
return btrfs_finish_ordered_io(page->mapping->host, start, end); return btrfs_finish_ordered_io(page->mapping->host, start, end);
} }
...@@ -1669,13 +1682,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, ...@@ -1669,13 +1682,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
failrec->last_mirror = 0; failrec->last_mirror = 0;
failrec->bio_flags = 0; failrec->bio_flags = 0;
spin_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, failrec->len); em = lookup_extent_mapping(em_tree, start, failrec->len);
if (em->start > start || em->start + em->len < start) { if (em->start > start || em->start + em->len < start) {
free_extent_map(em); free_extent_map(em);
em = NULL; em = NULL;
} }
spin_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
if (!em || IS_ERR(em)) { if (!em || IS_ERR(em)) {
kfree(failrec); kfree(failrec);
...@@ -1794,7 +1807,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, ...@@ -1794,7 +1807,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
return 0; return 0;
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) { test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
GFP_NOFS); GFP_NOFS);
return 0; return 0;
...@@ -2935,7 +2948,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) ...@@ -2935,7 +2948,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
cur_offset, cur_offset,
cur_offset + hole_size, cur_offset + hole_size,
block_end, block_end,
cur_offset, &hint_byte); cur_offset, &hint_byte, 1);
if (err) if (err)
break; break;
err = btrfs_insert_file_extent(trans, root, err = btrfs_insert_file_extent(trans, root,
...@@ -4064,11 +4077,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -4064,11 +4077,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
int compressed; int compressed;
again: again:
spin_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len); em = lookup_extent_mapping(em_tree, start, len);
if (em) if (em)
em->bdev = root->fs_info->fs_devices->latest_bdev; em->bdev = root->fs_info->fs_devices->latest_bdev;
spin_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
if (em) { if (em) {
if (em->start > start || em->start + em->len <= start) if (em->start > start || em->start + em->len <= start)
...@@ -4215,6 +4228,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -4215,6 +4228,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
map = kmap(page); map = kmap(page);
read_extent_buffer(leaf, map + pg_offset, ptr, read_extent_buffer(leaf, map + pg_offset, ptr,
copy_size); copy_size);
if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
memset(map + pg_offset + copy_size, 0,
PAGE_CACHE_SIZE - pg_offset -
copy_size);
}
kunmap(page); kunmap(page);
} }
flush_dcache_page(page); flush_dcache_page(page);
...@@ -4259,7 +4277,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -4259,7 +4277,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
} }
err = 0; err = 0;
spin_lock(&em_tree->lock); write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em); ret = add_extent_mapping(em_tree, em);
/* it is possible that someone inserted the extent into the tree /* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that * while we had the lock dropped. It is also possible that
...@@ -4299,7 +4317,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -4299,7 +4317,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
err = 0; err = 0;
} }
} }
spin_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
out: out:
if (path) if (path)
btrfs_free_path(path); btrfs_free_path(path);
...@@ -4398,13 +4416,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) ...@@ -4398,13 +4416,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
u64 page_start = page_offset(page); u64 page_start = page_offset(page);
u64 page_end = page_start + PAGE_CACHE_SIZE - 1; u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
/*
* we have the page locked, so new writeback can't start,
* and the dirty bit won't be cleared while we are here.
*
* Wait for IO on this page so that we can safely clear
* the PagePrivate2 bit and do ordered accounting
*/
wait_on_page_writeback(page); wait_on_page_writeback(page);
tree = &BTRFS_I(page->mapping->host)->io_tree; tree = &BTRFS_I(page->mapping->host)->io_tree;
if (offset) { if (offset) {
btrfs_releasepage(page, GFP_NOFS); btrfs_releasepage(page, GFP_NOFS);
return; return;
} }
lock_extent(tree, page_start, page_end, GFP_NOFS); lock_extent(tree, page_start, page_end, GFP_NOFS);
ordered = btrfs_lookup_ordered_extent(page->mapping->host, ordered = btrfs_lookup_ordered_extent(page->mapping->host,
page_offset(page)); page_offset(page));
...@@ -4415,16 +4441,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) ...@@ -4415,16 +4441,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
*/ */
clear_extent_bit(tree, page_start, page_end, clear_extent_bit(tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_LOCKED, 1, 0, GFP_NOFS); EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
btrfs_finish_ordered_io(page->mapping->host, /*
page_start, page_end); * whoever cleared the private bit is responsible
* for the finish_ordered_io
*/
if (TestClearPagePrivate2(page)) {
btrfs_finish_ordered_io(page->mapping->host,
page_start, page_end);
}
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
lock_extent(tree, page_start, page_end, GFP_NOFS); lock_extent(tree, page_start, page_end, GFP_NOFS);
} }
clear_extent_bit(tree, page_start, page_end, clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
EXTENT_ORDERED, 1, 1, NULL, GFP_NOFS);
1, 1, GFP_NOFS);
__btrfs_releasepage(page, GFP_NOFS); __btrfs_releasepage(page, GFP_NOFS);
ClearPageChecked(page); ClearPageChecked(page);
...@@ -4521,11 +4552,14 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -4521,11 +4552,14 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
ClearPageChecked(page); ClearPageChecked(page);
set_page_dirty(page); set_page_dirty(page);
SetPageUptodate(page);
BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
unlock_extent(io_tree, page_start, page_end, GFP_NOFS); unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
out_unlock: out_unlock:
if (!ret)
return VM_FAULT_LOCKED;
unlock_page(page); unlock_page(page);
out: out:
return ret; return ret;
...@@ -5058,6 +5092,8 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans, ...@@ -5058,6 +5092,8 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
0, 0, 0, 0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC); BTRFS_FILE_EXTENT_PREALLOC);
BUG_ON(ret); BUG_ON(ret);
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
num_bytes -= ins.offset; num_bytes -= ins.offset;
cur_offset += ins.offset; cur_offset += ins.offset;
alloc_hint = ins.objectid + ins.offset; alloc_hint = ins.objectid + ins.offset;
......
...@@ -596,9 +596,8 @@ static int btrfs_defrag_file(struct file *file) ...@@ -596,9 +596,8 @@ static int btrfs_defrag_file(struct file *file)
clear_page_dirty_for_io(page); clear_page_dirty_for_io(page);
btrfs_set_extent_delalloc(inode, page_start, page_end); btrfs_set_extent_delalloc(inode, page_start, page_end);
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
set_page_dirty(page); set_page_dirty(page);
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
...@@ -976,7 +975,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -976,7 +975,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
/* punch hole in destination first */ /* punch hole in destination first */
btrfs_drop_extents(trans, root, inode, off, off + len, btrfs_drop_extents(trans, root, inode, off, off + len,
off + len, 0, &hint_byte); off + len, 0, &hint_byte, 1);
/* clone data */ /* clone data */
key.objectid = src->i_ino; key.objectid = src->i_ino;
......
...@@ -159,8 +159,6 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, ...@@ -159,8 +159,6 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
* *
* len is the length of the extent * len is the length of the extent
* *
* This also sets the EXTENT_ORDERED bit on the range in the inode.
*
* The tree is given a single reference on the ordered extent that was * The tree is given a single reference on the ordered extent that was
* inserted. * inserted.
*/ */
...@@ -181,6 +179,7 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, ...@@ -181,6 +179,7 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
entry->start = start; entry->start = start;
entry->len = len; entry->len = len;
entry->disk_len = disk_len; entry->disk_len = disk_len;
entry->bytes_left = len;
entry->inode = inode; entry->inode = inode;
if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
set_bit(type, &entry->flags); set_bit(type, &entry->flags);
...@@ -195,9 +194,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, ...@@ -195,9 +194,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
&entry->rb_node); &entry->rb_node);
BUG_ON(node); BUG_ON(node);
set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset,
entry_end(entry) - 1, GFP_NOFS);
spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
list_add_tail(&entry->root_extent_list, list_add_tail(&entry->root_extent_list,
&BTRFS_I(inode)->root->fs_info->ordered_extents); &BTRFS_I(inode)->root->fs_info->ordered_extents);
...@@ -241,13 +237,10 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, ...@@ -241,13 +237,10 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
struct btrfs_ordered_inode_tree *tree; struct btrfs_ordered_inode_tree *tree;
struct rb_node *node; struct rb_node *node;
struct btrfs_ordered_extent *entry; struct btrfs_ordered_extent *entry;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
int ret; int ret;
tree = &BTRFS_I(inode)->ordered_tree; tree = &BTRFS_I(inode)->ordered_tree;
mutex_lock(&tree->mutex); mutex_lock(&tree->mutex);
clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1,
GFP_NOFS);
node = tree_search(tree, file_offset); node = tree_search(tree, file_offset);
if (!node) { if (!node) {
ret = 1; ret = 1;
...@@ -260,11 +253,16 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, ...@@ -260,11 +253,16 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
goto out; goto out;
} }
ret = test_range_bit(io_tree, entry->file_offset, if (io_size > entry->bytes_left) {
entry->file_offset + entry->len - 1, printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
EXTENT_ORDERED, 0); (unsigned long long)entry->bytes_left,
if (ret == 0) (unsigned long long)io_size);
}
entry->bytes_left -= io_size;
if (entry->bytes_left == 0)
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
else
ret = 1;
out: out:
mutex_unlock(&tree->mutex); mutex_unlock(&tree->mutex);
return ret == 0; return ret == 0;
...@@ -476,6 +474,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) ...@@ -476,6 +474,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
u64 orig_end; u64 orig_end;
u64 wait_end; u64 wait_end;
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
int found;
if (start + len < start) { if (start + len < start) {
orig_end = INT_LIMIT(loff_t); orig_end = INT_LIMIT(loff_t);
...@@ -502,6 +501,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) ...@@ -502,6 +501,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
orig_end >> PAGE_CACHE_SHIFT); orig_end >> PAGE_CACHE_SHIFT);
end = orig_end; end = orig_end;
found = 0;
while (1) { while (1) {
ordered = btrfs_lookup_first_ordered_extent(inode, end); ordered = btrfs_lookup_first_ordered_extent(inode, end);
if (!ordered) if (!ordered)
...@@ -514,6 +514,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) ...@@ -514,6 +514,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
break; break;
} }
found++;
btrfs_start_ordered_extent(inode, ordered, 1); btrfs_start_ordered_extent(inode, ordered, 1);
end = ordered->file_offset; end = ordered->file_offset;
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
...@@ -521,8 +522,8 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) ...@@ -521,8 +522,8 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
break; break;
end--; end--;
} }
if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
EXTENT_ORDERED | EXTENT_DELALLOC, 0)) { EXTENT_DELALLOC, 0, NULL)) {
schedule_timeout(1); schedule_timeout(1);
goto again; goto again;
} }
...@@ -613,7 +614,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, ...@@ -613,7 +614,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
*/ */
if (test_range_bit(io_tree, disk_i_size, if (test_range_bit(io_tree, disk_i_size,
ordered->file_offset + ordered->len - 1, ordered->file_offset + ordered->len - 1,
EXTENT_DELALLOC, 0)) { EXTENT_DELALLOC, 0, NULL)) {
goto out; goto out;
} }
/* /*
...@@ -664,7 +665,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, ...@@ -664,7 +665,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
*/ */
if (i_size_test > entry_end(ordered) && if (i_size_test > entry_end(ordered) &&
!test_range_bit(io_tree, entry_end(ordered), i_size_test - 1, !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1,
EXTENT_DELALLOC, 0)) { EXTENT_DELALLOC, 0, NULL)) {
new_i_size = min_t(u64, i_size_test, i_size_read(inode)); new_i_size = min_t(u64, i_size_test, i_size_read(inode));
} }
BTRFS_I(inode)->disk_i_size = new_i_size; BTRFS_I(inode)->disk_i_size = new_i_size;
......
...@@ -85,6 +85,9 @@ struct btrfs_ordered_extent { ...@@ -85,6 +85,9 @@ struct btrfs_ordered_extent {
/* extent length on disk */ /* extent length on disk */
u64 disk_len; u64 disk_len;
/* number of bytes that still need writing */
u64 bytes_left;
/* flags (described above) */ /* flags (described above) */
unsigned long flags; unsigned long flags;
......
...@@ -2180,7 +2180,7 @@ static int tree_block_processed(u64 bytenr, u32 blocksize, ...@@ -2180,7 +2180,7 @@ static int tree_block_processed(u64 bytenr, u32 blocksize,
struct reloc_control *rc) struct reloc_control *rc)
{ {
if (test_range_bit(&rc->processed_blocks, bytenr, if (test_range_bit(&rc->processed_blocks, bytenr,
bytenr + blocksize - 1, EXTENT_DIRTY, 1)) bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
return 1; return 1;
return 0; return 0;
} }
...@@ -2646,9 +2646,9 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key) ...@@ -2646,9 +2646,9 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key)
lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
while (1) { while (1) {
int ret; int ret;
spin_lock(&em_tree->lock); write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em); ret = add_extent_mapping(em_tree, em);
spin_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
if (ret != -EEXIST) { if (ret != -EEXIST) {
free_extent_map(em); free_extent_map(em);
break; break;
......
...@@ -534,7 +534,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, ...@@ -534,7 +534,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
saved_nbytes = inode_get_bytes(inode); saved_nbytes = inode_get_bytes(inode);
/* drop any overlapping extents */ /* drop any overlapping extents */
ret = btrfs_drop_extents(trans, root, inode, ret = btrfs_drop_extents(trans, root, inode,
start, extent_end, extent_end, start, &alloc_hint); start, extent_end, extent_end, start, &alloc_hint, 1);
BUG_ON(ret); BUG_ON(ret);
if (found_type == BTRFS_FILE_EXTENT_REG || if (found_type == BTRFS_FILE_EXTENT_REG ||
......
...@@ -276,7 +276,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) ...@@ -276,7 +276,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
* is now congested. Back off and let other work structs * is now congested. Back off and let other work structs
* run instead * run instead
*/ */
if (pending && bdi_write_congested(bdi) && batch_run > 32 && if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
fs_info->fs_devices->open_devices > 1) { fs_info->fs_devices->open_devices > 1) {
struct io_context *ioc; struct io_context *ioc;
...@@ -1749,9 +1749,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, ...@@ -1749,9 +1749,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
* step two, delete the device extents and the * step two, delete the device extents and the
* chunk tree entries * chunk tree entries
*/ */
spin_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_offset, 1); em = lookup_extent_mapping(em_tree, chunk_offset, 1);
spin_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
BUG_ON(em->start > chunk_offset || BUG_ON(em->start > chunk_offset ||
em->start + em->len < chunk_offset); em->start + em->len < chunk_offset);
...@@ -1780,9 +1780,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, ...@@ -1780,9 +1780,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
BUG_ON(ret); BUG_ON(ret);
spin_lock(&em_tree->lock); write_lock(&em_tree->lock);
remove_extent_mapping(em_tree, em); remove_extent_mapping(em_tree, em);
spin_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
kfree(map); kfree(map);
em->bdev = NULL; em->bdev = NULL;
...@@ -2294,9 +2294,9 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, ...@@ -2294,9 +2294,9 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
em->block_len = em->len; em->block_len = em->len;
em_tree = &extent_root->fs_info->mapping_tree.map_tree; em_tree = &extent_root->fs_info->mapping_tree.map_tree;
spin_lock(&em_tree->lock); write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em); ret = add_extent_mapping(em_tree, em);
spin_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
BUG_ON(ret); BUG_ON(ret);
free_extent_map(em); free_extent_map(em);
...@@ -2491,9 +2491,9 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) ...@@ -2491,9 +2491,9 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
int readonly = 0; int readonly = 0;
int i; int i;
spin_lock(&map_tree->map_tree.lock); read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
spin_unlock(&map_tree->map_tree.lock); read_unlock(&map_tree->map_tree.lock);
if (!em) if (!em)
return 1; return 1;
...@@ -2518,11 +2518,11 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) ...@@ -2518,11 +2518,11 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
struct extent_map *em; struct extent_map *em;
while (1) { while (1) {
spin_lock(&tree->map_tree.lock); write_lock(&tree->map_tree.lock);
em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
if (em) if (em)
remove_extent_mapping(&tree->map_tree, em); remove_extent_mapping(&tree->map_tree, em);
spin_unlock(&tree->map_tree.lock); write_unlock(&tree->map_tree.lock);
if (!em) if (!em)
break; break;
kfree(em->bdev); kfree(em->bdev);
...@@ -2540,9 +2540,9 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len) ...@@ -2540,9 +2540,9 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
struct extent_map_tree *em_tree = &map_tree->map_tree; struct extent_map_tree *em_tree = &map_tree->map_tree;
int ret; int ret;
spin_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, len); em = lookup_extent_mapping(em_tree, logical, len);
spin_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
BUG_ON(!em); BUG_ON(!em);
BUG_ON(em->start > logical || em->start + em->len < logical); BUG_ON(em->start > logical || em->start + em->len < logical);
...@@ -2604,9 +2604,9 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, ...@@ -2604,9 +2604,9 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
atomic_set(&multi->error, 0); atomic_set(&multi->error, 0);
} }
spin_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, *length); em = lookup_extent_mapping(em_tree, logical, *length);
spin_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
if (!em && unplug_page) if (!em && unplug_page)
return 0; return 0;
...@@ -2763,9 +2763,9 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, ...@@ -2763,9 +2763,9 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
u64 stripe_nr; u64 stripe_nr;
int i, j, nr = 0; int i, j, nr = 0;
spin_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_start, 1); em = lookup_extent_mapping(em_tree, chunk_start, 1);
spin_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
BUG_ON(!em || em->start != chunk_start); BUG_ON(!em || em->start != chunk_start);
map = (struct map_lookup *)em->bdev; map = (struct map_lookup *)em->bdev;
...@@ -3053,9 +3053,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, ...@@ -3053,9 +3053,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
logical = key->offset; logical = key->offset;
length = btrfs_chunk_length(leaf, chunk); length = btrfs_chunk_length(leaf, chunk);
spin_lock(&map_tree->map_tree.lock); read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
spin_unlock(&map_tree->map_tree.lock); read_unlock(&map_tree->map_tree.lock);
/* already mapped? */ /* already mapped? */
if (em && em->start <= logical && em->start + em->len > logical) { if (em && em->start <= logical && em->start + em->len > logical) {
...@@ -3114,9 +3114,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, ...@@ -3114,9 +3114,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
map->stripes[i].dev->in_fs_metadata = 1; map->stripes[i].dev->in_fs_metadata = 1;
} }
spin_lock(&map_tree->map_tree.lock); write_lock(&map_tree->map_tree.lock);
ret = add_extent_mapping(&map_tree->map_tree, em); ret = add_extent_mapping(&map_tree->map_tree, em);
spin_unlock(&map_tree->map_tree.lock); write_unlock(&map_tree->map_tree.lock);
BUG_ON(ret); BUG_ON(ret);
free_extent_map(em); free_extent_map(em);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册