未验证 提交 0c49500f 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!1769 workqueue: Make flush_workqueue() also watch flush_work()

Merge Pull Request from: @ci-robot 
 
PR sync from: Zeng Heng <zengheng4@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/VMCSQFPYZZYKFJAPUACCE3S3JB5CZCAH/ 
Lai Jiangshan (5):
  workqueue: Rename "delayed" (delayed by active management) to
    "inactive"
  workqueue: Change arguement of pwq_dec_nr_in_flight()
  workqueue: Change the code of calculating work_flags in
    insert_wq_barrier()
  workqueue: Mark barrier work with WORK_STRUCT_INACTIVE
  workqueue: Assign a color to barrier work items


--
2.25.1
 
https://gitee.com/openeuler/kernel/issues/I7LRJF 
 
Link:https://gitee.com/openeuler/kernel/pulls/1769 

Reviewed-by: Wei Li <liwei391@huawei.com> 
Signed-off-by: Liu YongQiang <liuyongqiang13@huawei.com> 
......@@ -30,7 +30,7 @@ void delayed_work_timer_fn(struct timer_list *t);
enum {
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */
WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
#ifdef CONFIG_DEBUG_OBJECTS_WORK
......@@ -43,7 +43,7 @@ enum {
WORK_STRUCT_COLOR_BITS = 4,
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
#ifdef CONFIG_DEBUG_OBJECTS_WORK
......
......@@ -205,9 +205,26 @@ struct pool_workqueue {
int refcnt; /* L: reference count */
int nr_in_flight[WORK_NR_COLORS];
/* L: nr of in_flight works */
/*
* nr_active management and WORK_STRUCT_INACTIVE:
*
* When pwq->nr_active >= max_active, new work item is queued to
* pwq->inactive_works instead of pool->worklist and marked with
* WORK_STRUCT_INACTIVE.
*
* All work items marked with WORK_STRUCT_INACTIVE do not participate
* in pwq->nr_active and all work items in pwq->inactive_works are
* marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE
* work items are in pwq->inactive_works. Some of them are ready to
* run in pool->worklist or worker->scheduled. Those work itmes are
* only struct wq_barrier which is used for flush_work() and should
* not participate in pwq->nr_active. For non-barrier work item, it
* is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
*/
int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */
struct list_head inactive_works; /* L: inactive works */
struct list_head pwqs_node; /* WR: node on wq->pwqs */
struct list_head mayday_node; /* MD: node on wq->maydays */
......@@ -581,9 +598,9 @@ static unsigned int work_color_to_flags(int color)
return color << WORK_STRUCT_COLOR_SHIFT;
}
static int get_work_color(struct work_struct *work)
static int get_work_color(unsigned long work_data)
{
return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
((1 << WORK_STRUCT_COLOR_BITS) - 1);
}
......@@ -1105,7 +1122,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
}
}
static void pwq_activate_delayed_work(struct work_struct *work)
static void pwq_activate_inactive_work(struct work_struct *work)
{
struct pool_workqueue *pwq = get_work_pwq(work);
......@@ -1113,22 +1130,22 @@ static void pwq_activate_delayed_work(struct work_struct *work)
if (list_empty(&pwq->pool->worklist))
pwq->pool->watchdog_ts = jiffies;
move_linked_works(work, &pwq->pool->worklist, NULL);
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
__clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
pwq->nr_active++;
}
static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
{
struct work_struct *work = list_first_entry(&pwq->delayed_works,
struct work_struct *work = list_first_entry(&pwq->inactive_works,
struct work_struct, entry);
pwq_activate_delayed_work(work);
pwq_activate_inactive_work(work);
}
/**
* pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
* @pwq: pwq of interest
* @color: color of work which left the queue
* @work_data: work_data of work which left the queue
*
* A work either has completed or is removed from pending queue,
* decrement nr_in_flight of its pwq and handle workqueue flushing.
......@@ -1136,21 +1153,21 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
* CONTEXT:
* spin_lock_irq(pool->lock).
*/
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
{
/* uncolored work items don't participate in flushing or nr_active */
if (color == WORK_NO_COLOR)
goto out_put;
pwq->nr_in_flight[color]--;
int color = get_work_color(work_data);
pwq->nr_active--;
if (!list_empty(&pwq->delayed_works)) {
/* one down, submit a delayed one */
if (pwq->nr_active < pwq->max_active)
pwq_activate_first_delayed(pwq);
if (!(work_data & WORK_STRUCT_INACTIVE)) {
pwq->nr_active--;
if (!list_empty(&pwq->inactive_works)) {
/* one down, submit an inactive one */
if (pwq->nr_active < pwq->max_active)
pwq_activate_first_inactive(pwq);
}
}
pwq->nr_in_flight[color]--;
/* is flush in progress and are we at the flushing tip? */
if (likely(pwq->flush_color != color))
goto out_put;
......@@ -1246,17 +1263,21 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
debug_work_deactivate(work);
/*
* A delayed work item cannot be grabbed directly because
* it might have linked NO_COLOR work items which, if left
* on the delayed_list, will confuse pwq->nr_active
* A cancelable inactive work item must be in the
* pwq->inactive_works since a queued barrier can't be
* canceled (see the comments in insert_wq_barrier()).
*
* An inactive work item cannot be grabbed directly because
* it might have linked barrier work items which, if left
* on the inactive_works list, will confuse pwq->nr_active
* management later on and cause stall. Make sure the work
* item is activated before grabbing.
*/
if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
pwq_activate_delayed_work(work);
if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
pwq_activate_inactive_work(work);
list_del_init(&work->entry);
pwq_dec_nr_in_flight(pwq, get_work_color(work));
pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
/* work->data points to pwq iff queued, point to pool */
set_work_pool_and_keep_pending(work, pool->id);
......@@ -1451,8 +1472,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (list_empty(worklist))
pwq->pool->watchdog_ts = jiffies;
} else {
work_flags |= WORK_STRUCT_DELAYED;
worklist = &pwq->delayed_works;
work_flags |= WORK_STRUCT_INACTIVE;
worklist = &pwq->inactive_works;
}
debug_work_activate(work);
......@@ -2129,7 +2150,7 @@ __acquires(&pool->lock)
struct pool_workqueue *pwq = get_work_pwq(work);
struct worker_pool *pool = worker->pool;
bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
int work_color;
unsigned long work_data;
struct worker *collision;
#ifdef CONFIG_LOCKDEP
/*
......@@ -2165,7 +2186,8 @@ __acquires(&pool->lock)
worker->current_work = work;
worker->current_func = work->func;
worker->current_pwq = pwq;
work_color = get_work_color(work);
work_data = *work_data_bits(work);
worker->current_color = get_work_color(work_data);
/*
* Record wq name for cmdline and debug reporting, may get
......@@ -2270,7 +2292,7 @@ __acquires(&pool->lock)
* (!= NO_COLOR) to avoid prematurely restoring the nice level.
*/
if (unlikely(worker->flags & WORKER_NICED &&
work_color != WORK_NO_COLOR)) {
get_work_color(work_data) != WORK_NO_COLOR)) {
set_user_nice(worker->task, worker->pool->attrs->nice);
worker_clr_flags(worker, WORKER_NICED);
}
......@@ -2280,7 +2302,8 @@ __acquires(&pool->lock)
worker->current_work = NULL;
worker->current_func = NULL;
worker->current_pwq = NULL;
pwq_dec_nr_in_flight(pwq, work_color);
worker->current_color = INT_MAX;
pwq_dec_nr_in_flight(pwq, work_data);
}
/**
......@@ -2496,7 +2519,7 @@ static int rescuer_thread(void *__rescuer)
/*
* The above execution of rescued work items could
* have created more to rescue through
* pwq_activate_first_delayed() or chained
* pwq_activate_first_inactive() or chained
* queueing. Let's put @pwq back on mayday list so
* that such back-to-back work items, which may be
* being used to relieve memory pressure, don't
......@@ -2623,8 +2646,9 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
struct wq_barrier *barr,
struct work_struct *target, struct worker *worker)
{
unsigned int work_flags = 0;
unsigned int work_color;
struct list_head *head;
unsigned int linked = 0;
/*
* debugobject calls are safe here even with pool->lock locked
......@@ -2639,24 +2663,31 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
barr->task = current;
/* The barrier work item does not participate in pwq->nr_active. */
work_flags |= WORK_STRUCT_INACTIVE;
/*
* If @target is currently being executed, schedule the
* barrier to the worker; otherwise, put it after @target.
*/
if (worker)
if (worker) {
head = worker->scheduled.next;
else {
work_color = worker->current_color;
} else {
unsigned long *bits = work_data_bits(target);
head = target->entry.next;
/* there can already be other linked works, inherit and set */
linked = *bits & WORK_STRUCT_LINKED;
work_flags |= *bits & WORK_STRUCT_LINKED;
work_color = get_work_color(*bits);
__set_bit(WORK_STRUCT_LINKED_BIT, bits);
}
pwq->nr_in_flight[work_color]++;
work_flags |= work_color_to_flags(work_color);
debug_work_activate(&barr->work);
insert_work(pwq, &barr->work, head,
work_color_to_flags(WORK_NO_COLOR) | linked);
insert_work(pwq, &barr->work, head, work_flags);
}
/**
......@@ -2922,7 +2953,7 @@ void drain_workqueue(struct workqueue_struct *wq)
bool drained;
spin_lock_irq(&pwq->pool->lock);
drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
spin_unlock_irq(&pwq->pool->lock);
if (drained)
......@@ -3703,7 +3734,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
* @pwq: target pool_workqueue
*
* If @pwq isn't freezing, set @pwq->max_active to the associated
* workqueue's saved_max_active and activate delayed work items
* workqueue's saved_max_active and activate inactive work items
* accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
*/
static void pwq_adjust_max_active(struct pool_workqueue *pwq)
......@@ -3732,9 +3763,9 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
pwq->max_active = wq->saved_max_active;
while (!list_empty(&pwq->delayed_works) &&
while (!list_empty(&pwq->inactive_works) &&
pwq->nr_active < pwq->max_active) {
pwq_activate_first_delayed(pwq);
pwq_activate_first_inactive(pwq);
kick = true;
}
......@@ -3765,7 +3796,7 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
pwq->wq = wq;
pwq->flush_color = -1;
pwq->refcnt = 1;
INIT_LIST_HEAD(&pwq->delayed_works);
INIT_LIST_HEAD(&pwq->inactive_works);
INIT_LIST_HEAD(&pwq->pwqs_node);
INIT_LIST_HEAD(&pwq->mayday_node);
INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
......@@ -4386,7 +4417,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
WARN_ON(pwq->nr_active) ||
WARN_ON(!list_empty(&pwq->delayed_works))) {
WARN_ON(!list_empty(&pwq->inactive_works))) {
mutex_unlock(&wq->mutex);
show_workqueue_state();
return;
......@@ -4527,7 +4558,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
else
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
ret = !list_empty(&pwq->inactive_works);
rcu_read_unlock_sched();
return ret;
......@@ -4722,11 +4753,11 @@ static void show_pwq(struct pool_workqueue *pwq)
pr_cont("\n");
}
if (!list_empty(&pwq->delayed_works)) {
if (!list_empty(&pwq->inactive_works)) {
bool comma = false;
pr_info(" delayed:");
list_for_each_entry(work, &pwq->delayed_works, entry) {
pr_info(" inactive:");
list_for_each_entry(work, &pwq->inactive_works, entry) {
pr_cont_work(comma, work);
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
}
......@@ -4756,7 +4787,7 @@ void show_workqueue_state(void)
bool idle = true;
for_each_pwq(pwq, wq) {
if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
idle = false;
break;
}
......@@ -4768,7 +4799,7 @@ void show_workqueue_state(void)
for_each_pwq(pwq, wq) {
spin_lock_irqsave(&pwq->pool->lock, flags);
if (pwq->nr_active || !list_empty(&pwq->delayed_works))
if (pwq->nr_active || !list_empty(&pwq->inactive_works))
show_pwq(pwq);
spin_unlock_irqrestore(&pwq->pool->lock, flags);
/*
......@@ -5143,7 +5174,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe);
* freeze_workqueues_begin - begin freezing workqueues
*
* Start freezing workqueues. After this function returns, all freezable
* workqueues will queue new works to their delayed_works list instead of
* workqueues will queue new works to their inactive_works list instead of
* pool->worklist.
*
* CONTEXT:
......
......@@ -30,7 +30,8 @@ struct worker {
struct work_struct *current_work; /* L: work being processed */
work_func_t current_func; /* L: current_work's fn */
struct pool_workqueue *current_pwq; /* L: current_work's pwq */
struct pool_workqueue *current_pwq; /* L: current_work's pwq */
unsigned int current_color; /* L: current_work's color */
struct list_head scheduled; /* L: scheduled works */
/* 64 bytes boundary on 64bit, 32 on 32bit */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册