提交 bdbc5dd7 编写于 作者: T Tejun Heo

workqueue: prepare for WQ_UNBOUND implementation

In preparation of WQ_UNBOUND addition, make the following changes.

* Add WORK_CPU_* constants for pseudo cpu id numbers used (currently
  only WORK_CPU_NONE) and use them instead of NR_CPUS.  This is to
  allow another pseudo cpu id for unbound cpu.

* Reorder WQ_* flags.

* Make workqueue_struct->cpu_wq a union which contains a percpu
  pointer, regular pointer and an unsigned long value and use
  kzalloc/kfree() in UP allocation path.  This will be used to
  implement unbound workqueues which will use only one cwq on SMPs.

* Move alloc_cwqs() allocation after initialization of wq fields, so
  that alloc_cwqs() has access to wq->flags.

* Trivial relocation of wq local variables in freeze functions.

These changes don't cause any functional change.
Signed-off-by: NTejun Heo <tj@kernel.org>
上级 ad72cf98
...@@ -50,6 +50,10 @@ enum { ...@@ -50,6 +50,10 @@ enum {
WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
WORK_NO_COLOR = WORK_NR_COLORS, WORK_NO_COLOR = WORK_NR_COLORS,
/* special cpu IDs */
WORK_CPU_NONE = NR_CPUS,
WORK_CPU_LAST = WORK_CPU_NONE,
/* /*
* Reserve 6 bits off of cwq pointer w/ debugobjects turned * Reserve 6 bits off of cwq pointer w/ debugobjects turned
* off. This makes cwqs aligned to 64 bytes which isn't too * off. This makes cwqs aligned to 64 bytes which isn't too
...@@ -60,7 +64,7 @@ enum { ...@@ -60,7 +64,7 @@ enum {
WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
WORK_STRUCT_NO_CPU = NR_CPUS << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
/* bit mask for work_busy() return values */ /* bit mask for work_busy() return values */
WORK_BUSY_PENDING = 1 << 0, WORK_BUSY_PENDING = 1 << 0,
...@@ -227,9 +231,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } ...@@ -227,9 +231,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
enum { enum {
WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */ WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */
WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */ WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
WQ_RESCUER = 1 << 3, /* has an rescue worker */ WQ_RESCUER = 1 << 3, /* has an rescue worker */
WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
......
...@@ -190,7 +190,11 @@ struct wq_flusher { ...@@ -190,7 +190,11 @@ struct wq_flusher {
*/ */
struct workqueue_struct { struct workqueue_struct {
unsigned int flags; /* I: WQ_* flags */ unsigned int flags; /* I: WQ_* flags */
struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ union {
struct cpu_workqueue_struct __percpu *pcpu;
struct cpu_workqueue_struct *single;
unsigned long v;
} cpu_wq; /* I: cwq's */
struct list_head list; /* W: list of all workqueues */ struct list_head list; /* W: list of all workqueues */
struct mutex flush_mutex; /* protects wq flushing */ struct mutex flush_mutex; /* protects wq flushing */
...@@ -362,7 +366,11 @@ static atomic_t *get_gcwq_nr_running(unsigned int cpu) ...@@ -362,7 +366,11 @@ static atomic_t *get_gcwq_nr_running(unsigned int cpu)
static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
struct workqueue_struct *wq) struct workqueue_struct *wq)
{ {
return per_cpu_ptr(wq->cpu_wq, cpu); #ifndef CONFIG_SMP
return wq->cpu_wq.single;
#else
return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
#endif
} }
static unsigned int work_color_to_flags(int color) static unsigned int work_color_to_flags(int color)
...@@ -442,7 +450,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) ...@@ -442,7 +450,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
return ((struct cpu_workqueue_struct *)data)->gcwq; return ((struct cpu_workqueue_struct *)data)->gcwq;
cpu = data >> WORK_STRUCT_FLAG_BITS; cpu = data >> WORK_STRUCT_FLAG_BITS;
if (cpu == NR_CPUS) if (cpu == WORK_CPU_NONE)
return NULL; return NULL;
BUG_ON(cpu >= nr_cpu_ids); BUG_ON(cpu >= nr_cpu_ids);
...@@ -846,7 +854,7 @@ static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq) ...@@ -846,7 +854,7 @@ static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
*/ */
if (likely(!(gcwq->flags & GCWQ_FREEZING))) { if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
smp_wmb(); /* paired with cmpxchg() in __queue_work() */ smp_wmb(); /* paired with cmpxchg() in __queue_work() */
wq->single_cpu = NR_CPUS; wq->single_cpu = WORK_CPU_NONE;
} }
} }
...@@ -904,7 +912,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, ...@@ -904,7 +912,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
*/ */
retry: retry:
cpu = wq->single_cpu; cpu = wq->single_cpu;
arbitrate = cpu == NR_CPUS; arbitrate = cpu == WORK_CPU_NONE;
if (arbitrate) if (arbitrate)
cpu = req_cpu; cpu = req_cpu;
...@@ -918,7 +926,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, ...@@ -918,7 +926,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
* visible on the new cpu after this point. * visible on the new cpu after this point.
*/ */
if (arbitrate) if (arbitrate)
cmpxchg(&wq->single_cpu, NR_CPUS, cpu); cmpxchg(&wq->single_cpu, WORK_CPU_NONE, cpu);
if (unlikely(wq->single_cpu != cpu)) { if (unlikely(wq->single_cpu != cpu)) {
spin_unlock_irqrestore(&gcwq->lock, flags); spin_unlock_irqrestore(&gcwq->lock, flags);
...@@ -2572,7 +2580,7 @@ int keventd_up(void) ...@@ -2572,7 +2580,7 @@ int keventd_up(void)
return system_wq != NULL; return system_wq != NULL;
} }
static struct cpu_workqueue_struct *alloc_cwqs(void) static int alloc_cwqs(struct workqueue_struct *wq)
{ {
/* /*
* cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
...@@ -2582,40 +2590,36 @@ static struct cpu_workqueue_struct *alloc_cwqs(void) ...@@ -2582,40 +2590,36 @@ static struct cpu_workqueue_struct *alloc_cwqs(void)
const size_t size = sizeof(struct cpu_workqueue_struct); const size_t size = sizeof(struct cpu_workqueue_struct);
const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
__alignof__(unsigned long long)); __alignof__(unsigned long long));
struct cpu_workqueue_struct *cwqs;
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
void *ptr; void *ptr;
/* /*
* On UP, percpu allocator doesn't honor alignment parameter * Allocate enough room to align cwq and put an extra pointer
* and simply uses arch-dependent default. Allocate enough * at the end pointing back to the originally allocated
* room to align cwq and put an extra pointer at the end * pointer which will be used for free.
* pointing back to the originally allocated pointer which
* will be used for free.
*
* FIXME: This really belongs to UP percpu code. Update UP
* percpu code to honor alignment and remove this ugliness.
*/ */
ptr = __alloc_percpu(size + align + sizeof(void *), 1); ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
cwqs = PTR_ALIGN(ptr, align); if (ptr) {
*(void **)per_cpu_ptr(cwqs + 1, 0) = ptr; wq->cpu_wq.single = PTR_ALIGN(ptr, align);
*(void **)(wq->cpu_wq.single + 1) = ptr;
}
#else #else
/* On SMP, percpu allocator can do it itself */ /* On SMP, percpu allocator can align itself */
cwqs = __alloc_percpu(size, align); wq->cpu_wq.pcpu = __alloc_percpu(size, align);
#endif #endif
/* just in case, make sure it's actually aligned */ /* just in case, make sure it's actually aligned */
BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align)); BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
return cwqs; return wq->cpu_wq.v ? 0 : -ENOMEM;
} }
static void free_cwqs(struct cpu_workqueue_struct *cwqs) static void free_cwqs(struct workqueue_struct *wq)
{ {
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* on UP, the pointer to free is stored right after the cwq */ /* on UP, the pointer to free is stored right after the cwq */
if (cwqs) if (wq->cpu_wq.single)
free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0)); kfree(*(void **)(wq->cpu_wq.single + 1));
#else #else
free_percpu(cwqs); free_percpu(wq->cpu_wq.pcpu);
#endif #endif
} }
...@@ -2645,22 +2649,21 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, ...@@ -2645,22 +2649,21 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
if (!wq) if (!wq)
goto err; goto err;
wq->cpu_wq = alloc_cwqs();
if (!wq->cpu_wq)
goto err;
wq->flags = flags; wq->flags = flags;
wq->saved_max_active = max_active; wq->saved_max_active = max_active;
mutex_init(&wq->flush_mutex); mutex_init(&wq->flush_mutex);
atomic_set(&wq->nr_cwqs_to_flush, 0); atomic_set(&wq->nr_cwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow); INIT_LIST_HEAD(&wq->flusher_overflow);
wq->single_cpu = NR_CPUS; wq->single_cpu = WORK_CPU_NONE;
wq->name = name; wq->name = name;
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
INIT_LIST_HEAD(&wq->list); INIT_LIST_HEAD(&wq->list);
if (alloc_cwqs(wq) < 0)
goto err;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
struct global_cwq *gcwq = get_gcwq(cpu); struct global_cwq *gcwq = get_gcwq(cpu);
...@@ -2710,7 +2713,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, ...@@ -2710,7 +2713,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
return wq; return wq;
err: err:
if (wq) { if (wq) {
free_cwqs(wq->cpu_wq); free_cwqs(wq);
free_cpumask_var(wq->mayday_mask); free_cpumask_var(wq->mayday_mask);
kfree(wq->rescuer); kfree(wq->rescuer);
kfree(wq); kfree(wq);
...@@ -2755,7 +2758,7 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -2755,7 +2758,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
free_cpumask_var(wq->mayday_mask); free_cpumask_var(wq->mayday_mask);
} }
free_cwqs(wq->cpu_wq); free_cwqs(wq);
kfree(wq); kfree(wq);
} }
EXPORT_SYMBOL_GPL(destroy_workqueue); EXPORT_SYMBOL_GPL(destroy_workqueue);
...@@ -2821,13 +2824,13 @@ EXPORT_SYMBOL_GPL(workqueue_congested); ...@@ -2821,13 +2824,13 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
* @work: the work of interest * @work: the work of interest
* *
* RETURNS: * RETURNS:
* CPU number if @work was ever queued. NR_CPUS otherwise. * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
*/ */
unsigned int work_cpu(struct work_struct *work) unsigned int work_cpu(struct work_struct *work)
{ {
struct global_cwq *gcwq = get_work_gcwq(work); struct global_cwq *gcwq = get_work_gcwq(work);
return gcwq ? gcwq->cpu : NR_CPUS; return gcwq ? gcwq->cpu : WORK_CPU_NONE;
} }
EXPORT_SYMBOL_GPL(work_cpu); EXPORT_SYMBOL_GPL(work_cpu);
...@@ -3300,7 +3303,6 @@ EXPORT_SYMBOL_GPL(work_on_cpu); ...@@ -3300,7 +3303,6 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
*/ */
void freeze_workqueues_begin(void) void freeze_workqueues_begin(void)
{ {
struct workqueue_struct *wq;
unsigned int cpu; unsigned int cpu;
spin_lock(&workqueue_lock); spin_lock(&workqueue_lock);
...@@ -3310,6 +3312,7 @@ void freeze_workqueues_begin(void) ...@@ -3310,6 +3312,7 @@ void freeze_workqueues_begin(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu); struct global_cwq *gcwq = get_gcwq(cpu);
struct workqueue_struct *wq;
spin_lock_irq(&gcwq->lock); spin_lock_irq(&gcwq->lock);
...@@ -3344,7 +3347,6 @@ void freeze_workqueues_begin(void) ...@@ -3344,7 +3347,6 @@ void freeze_workqueues_begin(void)
*/ */
bool freeze_workqueues_busy(void) bool freeze_workqueues_busy(void)
{ {
struct workqueue_struct *wq;
unsigned int cpu; unsigned int cpu;
bool busy = false; bool busy = false;
...@@ -3353,6 +3355,7 @@ bool freeze_workqueues_busy(void) ...@@ -3353,6 +3355,7 @@ bool freeze_workqueues_busy(void)
BUG_ON(!workqueue_freezing); BUG_ON(!workqueue_freezing);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct workqueue_struct *wq;
/* /*
* nr_active is monotonically decreasing. It's safe * nr_active is monotonically decreasing. It's safe
* to peek without lock. * to peek without lock.
...@@ -3386,7 +3389,6 @@ bool freeze_workqueues_busy(void) ...@@ -3386,7 +3389,6 @@ bool freeze_workqueues_busy(void)
*/ */
void thaw_workqueues(void) void thaw_workqueues(void)
{ {
struct workqueue_struct *wq;
unsigned int cpu; unsigned int cpu;
spin_lock(&workqueue_lock); spin_lock(&workqueue_lock);
...@@ -3396,6 +3398,7 @@ void thaw_workqueues(void) ...@@ -3396,6 +3398,7 @@ void thaw_workqueues(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu); struct global_cwq *gcwq = get_gcwq(cpu);
struct workqueue_struct *wq;
spin_lock_irq(&gcwq->lock); spin_lock_irq(&gcwq->lock);
...@@ -3443,7 +3446,7 @@ void __init init_workqueues(void) ...@@ -3443,7 +3446,7 @@ void __init init_workqueues(void)
* sure cpu number won't overflow into kernel pointer area so * sure cpu number won't overflow into kernel pointer area so
* that they can be distinguished. * that they can be distinguished.
*/ */
BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET); BUILD_BUG_ON(WORK_CPU_LAST << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册