提交 119ba0f8 编写于 作者: K Kent Overstreet

bcache: Convert allocator thread to kthread

Using a workqueue when we just want a single thread is a bit silly.
Signed-off-by: NKent Overstreet <koverstreet@google.com>
上级 a9dd53ad
...@@ -63,6 +63,7 @@ ...@@ -63,6 +63,7 @@
#include "bcache.h" #include "bcache.h"
#include "btree.h" #include "btree.h"
#include <linux/kthread.h>
#include <linux/random.h> #include <linux/random.h>
#define MAX_IN_FLIGHT_DISCARDS 8U #define MAX_IN_FLIGHT_DISCARDS 8U
...@@ -151,7 +152,7 @@ static void discard_finish(struct work_struct *w) ...@@ -151,7 +152,7 @@ static void discard_finish(struct work_struct *w)
mutex_unlock(&ca->set->bucket_lock); mutex_unlock(&ca->set->bucket_lock);
closure_wake_up(&ca->set->bucket_wait); closure_wake_up(&ca->set->bucket_wait);
wake_up(&ca->set->alloc_wait); wake_up_process(ca->alloc_thread);
closure_put(&ca->set->cl); closure_put(&ca->set->cl);
} }
...@@ -358,30 +359,26 @@ static void invalidate_buckets(struct cache *ca) ...@@ -358,30 +359,26 @@ static void invalidate_buckets(struct cache *ca)
#define allocator_wait(ca, cond) \ #define allocator_wait(ca, cond) \
do { \ do { \
DEFINE_WAIT(__wait); \
\
while (1) { \ while (1) { \
prepare_to_wait(&ca->set->alloc_wait, \ set_current_state(TASK_INTERRUPTIBLE); \
&__wait, TASK_INTERRUPTIBLE); \
if (cond) \ if (cond) \
break; \ break; \
\ \
mutex_unlock(&(ca)->set->bucket_lock); \ mutex_unlock(&(ca)->set->bucket_lock); \
if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \ if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \
finish_wait(&ca->set->alloc_wait, &__wait); \ closure_put(&ca->set->cl); \
closure_return(cl); \ return 0; \
} \ } \
\ \
schedule(); \ schedule(); \
mutex_lock(&(ca)->set->bucket_lock); \ mutex_lock(&(ca)->set->bucket_lock); \
} \ } \
\ __set_current_state(TASK_RUNNING); \
finish_wait(&ca->set->alloc_wait, &__wait); \
} while (0) } while (0)
void bch_allocator_thread(struct closure *cl) static int bch_allocator_thread(void *arg)
{ {
struct cache *ca = container_of(cl, struct cache, alloc); struct cache *ca = arg;
mutex_lock(&ca->set->bucket_lock); mutex_lock(&ca->set->bucket_lock);
...@@ -442,7 +439,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl) ...@@ -442,7 +439,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
{ {
long r = -1; long r = -1;
again: again:
wake_up(&ca->set->alloc_wait); wake_up_process(ca->alloc_thread);
if (fifo_used(&ca->free) > ca->watermark[watermark] && if (fifo_used(&ca->free) > ca->watermark[watermark] &&
fifo_pop(&ca->free, r)) { fifo_pop(&ca->free, r)) {
...@@ -552,6 +549,19 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, ...@@ -552,6 +549,19 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
/* Init */ /* Init */
int bch_cache_allocator_start(struct cache *ca)
{
ca->alloc_thread = kthread_create(bch_allocator_thread,
ca, "bcache_allocator");
if (IS_ERR(ca->alloc_thread))
return PTR_ERR(ca->alloc_thread);
closure_get(&ca->set->cl);
wake_up_process(ca->alloc_thread);
return 0;
}
void bch_cache_allocator_exit(struct cache *ca) void bch_cache_allocator_exit(struct cache *ca)
{ {
struct discard *d; struct discard *d;
......
...@@ -565,8 +565,7 @@ struct cache { ...@@ -565,8 +565,7 @@ struct cache {
unsigned watermark[WATERMARK_MAX]; unsigned watermark[WATERMARK_MAX];
struct closure alloc; struct task_struct *alloc_thread;
struct workqueue_struct *alloc_workqueue;
struct closure prio; struct closure prio;
struct prio_set *disk_buckets; struct prio_set *disk_buckets;
...@@ -703,9 +702,6 @@ struct cache_set { ...@@ -703,9 +702,6 @@ struct cache_set {
/* For the btree cache */ /* For the btree cache */
struct shrinker shrink; struct shrinker shrink;
/* For the allocator itself */
wait_queue_head_t alloc_wait;
/* For the btree cache and anything allocation related */ /* For the btree cache and anything allocation related */
struct mutex bucket_lock; struct mutex bucket_lock;
...@@ -1173,6 +1169,15 @@ static inline uint8_t bucket_disk_gen(struct bucket *b) ...@@ -1173,6 +1169,15 @@ static inline uint8_t bucket_disk_gen(struct bucket *b)
static struct kobj_attribute ksysfs_##n = \ static struct kobj_attribute ksysfs_##n = \
__ATTR(n, S_IWUSR|S_IRUSR, show, store) __ATTR(n, S_IWUSR|S_IRUSR, show, store)
static inline void wake_up_allocators(struct cache_set *c)
{
struct cache *ca;
unsigned i;
for_each_cache(ca, c, i)
wake_up_process(ca->alloc_thread);
}
/* Forward declarations */ /* Forward declarations */
void bch_writeback_queue(struct cached_dev *); void bch_writeback_queue(struct cached_dev *);
...@@ -1193,7 +1198,6 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); ...@@ -1193,7 +1198,6 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
uint8_t bch_inc_gen(struct cache *, struct bucket *); uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int); void bch_rescale_priorities(struct cache_set *, int);
bool bch_bucket_add_unused(struct cache *, struct bucket *); bool bch_bucket_add_unused(struct cache *, struct bucket *);
void bch_allocator_thread(struct closure *);
long bch_bucket_alloc(struct cache *, unsigned, struct closure *); long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
void bch_bucket_free(struct cache_set *, struct bkey *); void bch_bucket_free(struct cache_set *, struct bkey *);
...@@ -1244,6 +1248,7 @@ int bch_btree_cache_alloc(struct cache_set *); ...@@ -1244,6 +1248,7 @@ int bch_btree_cache_alloc(struct cache_set *);
void bch_cached_dev_writeback_init(struct cached_dev *); void bch_cached_dev_writeback_init(struct cached_dev *);
void bch_moving_init_cache_set(struct cache_set *); void bch_moving_init_cache_set(struct cache_set *);
int bch_cache_allocator_start(struct cache *ca);
void bch_cache_allocator_exit(struct cache *ca); void bch_cache_allocator_exit(struct cache *ca);
int bch_cache_allocator_init(struct cache *ca); int bch_cache_allocator_init(struct cache *ca);
......
...@@ -273,7 +273,7 @@ static void btree_complete_write(struct btree *b, struct btree_write *w) ...@@ -273,7 +273,7 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
{ {
if (w->prio_blocked && if (w->prio_blocked &&
!atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
wake_up(&b->c->alloc_wait); wake_up_allocators(b->c);
if (w->journal) { if (w->journal) {
atomic_dec_bug(w->journal); atomic_dec_bug(w->journal);
...@@ -984,7 +984,7 @@ static void btree_node_free(struct btree *b, struct btree_op *op) ...@@ -984,7 +984,7 @@ static void btree_node_free(struct btree *b, struct btree_op *op)
if (b->prio_blocked && if (b->prio_blocked &&
!atomic_sub_return(b->prio_blocked, &b->c->prio_blocked)) !atomic_sub_return(b->prio_blocked, &b->c->prio_blocked))
wake_up(&b->c->alloc_wait); wake_up_allocators(b->c);
b->prio_blocked = 0; b->prio_blocked = 0;
...@@ -1547,7 +1547,7 @@ static void bch_btree_gc(struct closure *cl) ...@@ -1547,7 +1547,7 @@ static void bch_btree_gc(struct closure *cl)
blktrace_msg_all(c, "Finished gc"); blktrace_msg_all(c, "Finished gc");
trace_bcache_gc_end(c->sb.set_uuid); trace_bcache_gc_end(c->sb.set_uuid);
wake_up(&c->alloc_wait); wake_up_allocators(c);
continue_at(cl, bch_moving_gc, bch_gc_wq); continue_at(cl, bch_moving_gc, bch_gc_wq);
} }
......
...@@ -1282,7 +1282,7 @@ static void cache_set_flush(struct closure *cl) ...@@ -1282,7 +1282,7 @@ static void cache_set_flush(struct closure *cl)
/* Shut down allocator threads */ /* Shut down allocator threads */
set_bit(CACHE_SET_STOPPING_2, &c->flags); set_bit(CACHE_SET_STOPPING_2, &c->flags);
wake_up(&c->alloc_wait); wake_up_allocators(c);
bch_cache_accounting_destroy(&c->accounting); bch_cache_accounting_destroy(&c->accounting);
...@@ -1373,7 +1373,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) ...@@ -1373,7 +1373,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->btree_pages = max_t(int, c->btree_pages / 4, c->btree_pages = max_t(int, c->btree_pages / 4,
BTREE_MAX_PAGES); BTREE_MAX_PAGES);
init_waitqueue_head(&c->alloc_wait);
mutex_init(&c->bucket_lock); mutex_init(&c->bucket_lock);
mutex_init(&c->fill_lock); mutex_init(&c->fill_lock);
mutex_init(&c->sort_lock); mutex_init(&c->sort_lock);
...@@ -1496,9 +1495,10 @@ static void run_cache_set(struct cache_set *c) ...@@ -1496,9 +1495,10 @@ static void run_cache_set(struct cache_set *c)
*/ */
bch_journal_next(&c->journal); bch_journal_next(&c->journal);
err = "error starting allocator thread";
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
closure_call(&ca->alloc, bch_allocator_thread, if (bch_cache_allocator_start(ca))
system_wq, &c->cl); goto err;
/* /*
* First place it's safe to allocate: btree_check() and * First place it's safe to allocate: btree_check() and
...@@ -1531,17 +1531,16 @@ static void run_cache_set(struct cache_set *c) ...@@ -1531,17 +1531,16 @@ static void run_cache_set(struct cache_set *c)
bch_btree_gc_finish(c); bch_btree_gc_finish(c);
err = "error starting allocator thread";
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
closure_call(&ca->alloc, bch_allocator_thread, if (bch_cache_allocator_start(ca))
ca->alloc_workqueue, &c->cl); goto err;
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
bch_prio_write(ca); bch_prio_write(ca);
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
wake_up(&c->alloc_wait);
err = "cannot allocate new UUID bucket"; err = "cannot allocate new UUID bucket";
if (__uuid_write(c)) if (__uuid_write(c))
goto err_unlock_gc; goto err_unlock_gc;
...@@ -1673,9 +1672,6 @@ void bch_cache_release(struct kobject *kobj) ...@@ -1673,9 +1672,6 @@ void bch_cache_release(struct kobject *kobj)
bio_split_pool_free(&ca->bio_split_hook); bio_split_pool_free(&ca->bio_split_hook);
if (ca->alloc_workqueue)
destroy_workqueue(ca->alloc_workqueue);
free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
kfree(ca->prio_buckets); kfree(ca->prio_buckets);
vfree(ca->buckets); vfree(ca->buckets);
...@@ -1723,7 +1719,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) ...@@ -1723,7 +1719,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
!(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
2, GFP_KERNEL)) || 2, GFP_KERNEL)) ||
!(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
!(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
bio_split_pool_init(&ca->bio_split_hook)) bio_split_pool_init(&ca->bio_split_hook))
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册