diff --git a/fs/aio.c b/fs/aio.c index 6bcd3fb5265ad13fba958c2de06d0f54232a5eba..88d7927ffbc61910c8c47948157fccfa8e6f10ed 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -115,8 +115,7 @@ struct kioctx { struct page **ring_pages; long nr_pages; - struct rcu_head free_rcu; - struct work_struct free_work; /* see free_ioctx() */ + struct rcu_work free_rwork; /* see free_ioctx() */ /* * signals when all in-flight requests are done @@ -592,13 +591,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb) /* * free_ioctx() should be RCU delayed to synchronize against the RCU * protected lookup_ioctx() and also needs process context to call - * aio_free_ring(), so the double bouncing through kioctx->free_rcu and - * ->free_work. + * aio_free_ring(). Use rcu_work. */ static void free_ioctx(struct work_struct *work) { - struct kioctx *ctx = container_of(work, struct kioctx, free_work); - + struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, + free_rwork); pr_debug("freeing %p\n", ctx); aio_free_ring(ctx); @@ -608,14 +606,6 @@ static void free_ioctx(struct work_struct *work) kmem_cache_free(kioctx_cachep, ctx); } -static void free_ioctx_rcufn(struct rcu_head *head) -{ - struct kioctx *ctx = container_of(head, struct kioctx, free_rcu); - - INIT_WORK(&ctx->free_work, free_ioctx); - schedule_work(&ctx->free_work); -} - static void free_ioctx_reqs(struct percpu_ref *ref) { struct kioctx *ctx = container_of(ref, struct kioctx, reqs); @@ -625,7 +615,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref) complete(&ctx->rq_wait->comp); /* Synchronize against RCU protected table->table[] dereferences */ - call_rcu(&ctx->free_rcu, free_ioctx_rcufn); + INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); + queue_rcu_work(system_wq, &ctx->free_rwork); } /* diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index f8e76d01a5ade411c231c926e55e59a71dde6cc5..dc5b70449dc60ec5163ba891005283525acda40c 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -151,8 +151,8 @@ struct cgroup_subsys_state { atomic_t online_cnt; /* percpu_ref killing and RCU release */ - struct rcu_head rcu_head; struct work_struct destroy_work; + struct rcu_work destroy_rwork; /* * PI: the parent css. Placed here for cache proximity to following diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0c3301421c57746c086996e67401a167c82fe98c..39a0e215022a458e21ed57af697ba8e544510bfc 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -13,6 +13,7 @@ #include #include #include +#include struct workqueue_struct; @@ -120,6 +121,14 @@ struct delayed_work { int cpu; }; +struct rcu_work { + struct work_struct work; + struct rcu_head rcu; + + /* target workqueue ->rcu uses to queue ->work */ + struct workqueue_struct *wq; +}; + /** * struct workqueue_attrs - A struct for workqueue attributes. * @@ -151,6 +160,11 @@ static inline struct delayed_work *to_delayed_work(struct work_struct *work) return container_of(work, struct delayed_work, work); } +static inline struct rcu_work *to_rcu_work(struct work_struct *work) +{ + return container_of(work, struct rcu_work, work); +} + struct execute_work { struct work_struct work; }; @@ -266,6 +280,12 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) +#define INIT_RCU_WORK(_work, _func) \ + INIT_WORK(&(_work)->work, (_func)) + +#define INIT_RCU_WORK_ONSTACK(_work, _func) \ + INIT_WORK_ONSTACK(&(_work)->work, (_func)) + /** * work_pending - Find out whether a work item is currently pending * @work: The work item in question @@ -447,6 +467,7 @@ extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay); +extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); extern void flush_workqueue(struct workqueue_struct *wq); extern void drain_workqueue(struct workqueue_struct *wq); @@ -462,6 +483,8 @@ extern bool flush_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work_sync(struct delayed_work *dwork); +extern bool flush_rcu_work(struct rcu_work *rwork); + extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); extern struct work_struct *current_work(void); diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 4bfb2908ec157204692424bc1b1a32a7ef185b29..a662bfcbea0e79725023dd9a5c9118d2e3fcdada 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4524,10 +4524,10 @@ static struct cftype cgroup_base_files[] = { * and thus involve punting to css->destroy_work adding two additional * steps to the already complex sequence. */ -static void css_free_work_fn(struct work_struct *work) +static void css_free_rwork_fn(struct work_struct *work) { - struct cgroup_subsys_state *css = - container_of(work, struct cgroup_subsys_state, destroy_work); + struct cgroup_subsys_state *css = container_of(to_rcu_work(work), + struct cgroup_subsys_state, destroy_rwork); struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; @@ -4573,15 +4573,6 @@ static void css_free_work_fn(struct work_struct *work) } } -static void css_free_rcu_fn(struct rcu_head *rcu_head) -{ - struct cgroup_subsys_state *css = - container_of(rcu_head, struct cgroup_subsys_state, rcu_head); - - INIT_WORK(&css->destroy_work, css_free_work_fn); - queue_work(cgroup_destroy_wq, &css->destroy_work); -} - static void css_release_work_fn(struct work_struct *work) { struct cgroup_subsys_state *css = @@ -4631,7 +4622,8 @@ static void css_release_work_fn(struct work_struct *work) mutex_unlock(&cgroup_mutex); - call_rcu(&css->rcu_head, css_free_rcu_fn); + INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); + queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); } static void css_release(struct percpu_ref *ref) @@ -4765,7 +4757,8 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, err_list_del: list_del_rcu(&css->sibling); err_free_css: - call_rcu(&css->rcu_head, css_free_rcu_fn); + INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); + queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); return ERR_PTR(err); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 254e636a3d6b221eb96cc7c83be737578fd563f2..ca7959be8aaa1bf4bd6f540c54db614974db8586 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -153,10 +153,9 @@ struct worker_pool { unsigned long watchdog_ts; /* L: watchdog timestamp */ struct list_head worklist; /* L: list of pending works */ - int nr_workers; /* L: total number of workers */ - /* nr_idle includes the ones off idle_list for rebinding */ - int nr_idle; /* L: currently idle ones */ + int nr_workers; /* L: total number of workers */ + int nr_idle; /* L: currently idle workers */ struct list_head idle_list; /* X: list of idle workers */ struct timer_list idle_timer; /* L: worker idle timeout */ @@ -166,7 +165,6 @@ struct worker_pool { DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); /* L: hash of busy workers */ - /* see manage_workers() for details on the two manager mutexes */ struct worker *manager; /* L: purely informational */ struct mutex attach_mutex; /* attach/detach exclusion */ struct list_head workers; /* A: attached workers */ @@ -1604,6 +1602,40 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(mod_delayed_work_on); +static void rcu_work_rcufn(struct rcu_head *rcu) +{ + struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); + + /* read the comment in __queue_work() */ + local_irq_disable(); + __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); + local_irq_enable(); +} + +/** + * queue_rcu_work - queue work after a RCU grace period + * @wq: workqueue to use + * @rwork: work to queue + * + * Return: %false if @rwork was already pending, %true otherwise. Note + * that a full RCU grace period is guaranteed only after a %true return. + * While @rwork is guarnateed to be executed after a %false return, the + * execution may happen before a full RCU grace period has passed. + */ +bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) +{ + struct work_struct *work = &rwork->work; + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + rwork->wq = wq; + call_rcu(&rwork->rcu, rcu_work_rcufn); + return true; + } + + return false; +} +EXPORT_SYMBOL(queue_rcu_work); + /** * worker_enter_idle - enter idle state * @worker: worker which is entering idle state @@ -3001,6 +3033,26 @@ bool flush_delayed_work(struct delayed_work *dwork) } EXPORT_SYMBOL(flush_delayed_work); +/** + * flush_rcu_work - wait for a rwork to finish executing the last queueing + * @rwork: the rcu work to flush + * + * Return: + * %true if flush_rcu_work() waited for the work to finish execution, + * %false if it was already idle. + */ +bool flush_rcu_work(struct rcu_work *rwork) +{ + if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { + rcu_barrier(); + flush_work(&rwork->work); + return true; + } else { + return flush_work(&rwork->work); + } +} +EXPORT_SYMBOL(flush_rcu_work); + static bool __cancel_work(struct work_struct *work, bool is_dwork) { unsigned long flags;