提交 403c821d 编写于 作者: T Tejun Heo

workqueue: ROGUE workers are UNBOUND workers

Currently, WORKER_UNBOUND is used to mark workers for the unbound
global_cwq and WORKER_ROGUE is used to mark workers for disassociated
per-cpu global_cwqs.  Both are used to make the marked worker skip
concurrency management and the only place they make any difference is
in worker_enter_idle() where WORKER_ROGUE is used to skip scheduling
idle timer, which can easily be replaced with trustee state testing.

This patch replaces WORKER_ROGUE with WORKER_UNBOUND and drops
WORKER_ROGUE.  This is to prepare for removing trustee and handling
disassociated global_cwqs as unbound.
Signed-off-by: NTejun Heo <tj@kernel.org>
Acked-by: N"Rafael J. Wysocki" <rjw@sisk.pl>
上级 f2d5a0ee
...@@ -58,13 +58,12 @@ enum { ...@@ -58,13 +58,12 @@ enum {
WORKER_DIE = 1 << 1, /* die die die */ WORKER_DIE = 1 << 1, /* die die die */
WORKER_IDLE = 1 << 2, /* is idle */ WORKER_IDLE = 1 << 2, /* is idle */
WORKER_PREP = 1 << 3, /* preparing to run works */ WORKER_PREP = 1 << 3, /* preparing to run works */
WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
WORKER_REBIND = 1 << 5, /* mom is home, come back */ WORKER_REBIND = 1 << 5, /* mom is home, come back */
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
WORKER_UNBOUND = 1 << 7, /* worker is unbound */ WORKER_UNBOUND = 1 << 7, /* worker is unbound */
WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | WORKER_NOT_RUNNING = WORKER_PREP | WORKER_REBIND | WORKER_UNBOUND |
WORKER_CPU_INTENSIVE | WORKER_UNBOUND, WORKER_CPU_INTENSIVE,
/* gcwq->trustee_state */ /* gcwq->trustee_state */
TRUSTEE_START = 0, /* start */ TRUSTEE_START = 0, /* start */
...@@ -1198,7 +1197,7 @@ static void worker_enter_idle(struct worker *worker) ...@@ -1198,7 +1197,7 @@ static void worker_enter_idle(struct worker *worker)
/* idle_list is LIFO */ /* idle_list is LIFO */
list_add(&worker->entry, &pool->idle_list); list_add(&worker->entry, &pool->idle_list);
if (likely(!(worker->flags & WORKER_ROGUE))) { if (likely(gcwq->trustee_state != TRUSTEE_DONE)) {
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, mod_timer(&pool->idle_timer,
jiffies + IDLE_WORKER_TIMEOUT); jiffies + IDLE_WORKER_TIMEOUT);
...@@ -1207,7 +1206,7 @@ static void worker_enter_idle(struct worker *worker) ...@@ -1207,7 +1206,7 @@ static void worker_enter_idle(struct worker *worker)
/* /*
* Sanity check nr_running. Because trustee releases gcwq->lock * Sanity check nr_running. Because trustee releases gcwq->lock
* between setting %WORKER_ROGUE and zapping nr_running, the * between setting %WORKER_UNBOUND and zapping nr_running, the
* warning may trigger spuriously. Check iff trustee is idle. * warning may trigger spuriously. Check iff trustee is idle.
*/ */
WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE && WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
...@@ -1301,10 +1300,10 @@ __acquires(&gcwq->lock) ...@@ -1301,10 +1300,10 @@ __acquires(&gcwq->lock)
} }
/* /*
* Function for worker->rebind_work used to rebind rogue busy workers * Function for worker->rebind_work used to rebind unbound busy workers to
* to the associated cpu which is coming back online. This is * the associated cpu which is coming back online. This is scheduled by
* scheduled by cpu up but can race with other cpu hotplug operations * cpu up but can race with other cpu hotplug operations and may be
* and may be executed twice without intervening cpu down. * executed twice without intervening cpu down.
*/ */
static void worker_rebind_fn(struct work_struct *work) static void worker_rebind_fn(struct work_struct *work)
{ {
...@@ -1385,9 +1384,8 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind) ...@@ -1385,9 +1384,8 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind)
set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
/* /*
* A rogue worker will become a regular one if CPU comes * An unbound worker will become a regular one if CPU comes online
* online later on. Make sure every worker has * later on. Make sure every worker has PF_THREAD_BOUND set.
* PF_THREAD_BOUND set.
*/ */
if (bind && !on_unbound_cpu) if (bind && !on_unbound_cpu)
kthread_bind(worker->task, gcwq->cpu); kthread_bind(worker->task, gcwq->cpu);
...@@ -3215,11 +3213,10 @@ EXPORT_SYMBOL_GPL(work_busy); ...@@ -3215,11 +3213,10 @@ EXPORT_SYMBOL_GPL(work_busy);
* gcwqs serve mix of short, long and very long running works making * gcwqs serve mix of short, long and very long running works making
* blocked draining impractical. * blocked draining impractical.
* *
* This is solved by allowing a gcwq to be detached from CPU, running * This is solved by allowing a gcwq to be detached from CPU, running it
* it with unbound (rogue) workers and allowing it to be reattached * with unbound workers and allowing it to be reattached later if the cpu
* later if the cpu comes back online. A separate thread is created * comes back online. A separate thread is created to govern a gcwq in
* to govern a gcwq in such state and is called the trustee of the * such state and is called the trustee of the gcwq.
* gcwq.
* *
* Trustee states and their descriptions. * Trustee states and their descriptions.
* *
...@@ -3359,19 +3356,18 @@ static int __cpuinit trustee_thread(void *__gcwq) ...@@ -3359,19 +3356,18 @@ static int __cpuinit trustee_thread(void *__gcwq)
pool->flags |= POOL_MANAGING_WORKERS; pool->flags |= POOL_MANAGING_WORKERS;
list_for_each_entry(worker, &pool->idle_list, entry) list_for_each_entry(worker, &pool->idle_list, entry)
worker->flags |= WORKER_ROGUE; worker->flags |= WORKER_UNBOUND;
} }
for_each_busy_worker(worker, i, pos, gcwq) for_each_busy_worker(worker, i, pos, gcwq)
worker->flags |= WORKER_ROGUE; worker->flags |= WORKER_UNBOUND;
gcwq->flags |= GCWQ_DISASSOCIATED; gcwq->flags |= GCWQ_DISASSOCIATED;
/* /*
* Call schedule() so that we cross rq->lock and thus can * Call schedule() so that we cross rq->lock and thus can guarantee
* guarantee sched callbacks see the rogue flag. This is * sched callbacks see the unbound flag. This is necessary as
* necessary as scheduler callbacks may be invoked from other * scheduler callbacks may be invoked from other cpus.
* cpus.
*/ */
spin_unlock_irq(&gcwq->lock); spin_unlock_irq(&gcwq->lock);
schedule(); schedule();
...@@ -3439,7 +3435,7 @@ static int __cpuinit trustee_thread(void *__gcwq) ...@@ -3439,7 +3435,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
worker = create_worker(pool, false); worker = create_worker(pool, false);
spin_lock_irq(&gcwq->lock); spin_lock_irq(&gcwq->lock);
if (worker) { if (worker) {
worker->flags |= WORKER_ROGUE; worker->flags |= WORKER_UNBOUND;
start_worker(worker); start_worker(worker);
} }
} }
...@@ -3488,7 +3484,7 @@ static int __cpuinit trustee_thread(void *__gcwq) ...@@ -3488,7 +3484,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
* rebinding is scheduled. * rebinding is scheduled.
*/ */
worker->flags |= WORKER_REBIND; worker->flags |= WORKER_REBIND;
worker->flags &= ~WORKER_ROGUE; worker->flags &= ~WORKER_UNBOUND;
/* queue rebind_work, wq doesn't matter, use the default one */ /* queue rebind_work, wq doesn't matter, use the default one */
if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册