提交 bcc78db6 编写于 作者: S Suren Baghdasaryan 提交者: Linus Torvalds

psi: rename psi fields in preparation for psi trigger addition

Rename psi_group structure member fields used for calculating psi totals
and averages for clear distinction between them and for trigger-related
fields that will be added by "psi: introduce psi monitor".

[surenb@google.com: v6]
  Link: http://lkml.kernel.org/r/20190319235619.260832-4-surenb@google.com
Link: http://lkml.kernel.org/r/20190124211518.244221-5-surenb@google.comSigned-off-by: NSuren Baghdasaryan <surenb@google.com>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 9289c5e6
...@@ -69,17 +69,17 @@ struct psi_group_cpu { ...@@ -69,17 +69,17 @@ struct psi_group_cpu {
}; };
struct psi_group { struct psi_group {
/* Protects data updated during an aggregation */ /* Protects data used by the aggregator */
struct mutex stat_lock; struct mutex avgs_lock;
/* Per-cpu task state & time tracking */ /* Per-cpu task state & time tracking */
struct psi_group_cpu __percpu *pcpu; struct psi_group_cpu __percpu *pcpu;
/* Periodic aggregation state */ /* Running pressure averages */
u64 total_prev[NR_PSI_STATES - 1]; u64 avg_total[NR_PSI_STATES - 1];
u64 last_update; u64 avg_last_update;
u64 next_update; u64 avg_next_update;
struct delayed_work clock_work; struct delayed_work avgs_work;
/* Total stall times and sampled pressure averages */ /* Total stall times and sampled pressure averages */
u64 total[NR_PSI_STATES - 1]; u64 total[NR_PSI_STATES - 1];
......
...@@ -165,7 +165,7 @@ static struct psi_group psi_system = { ...@@ -165,7 +165,7 @@ static struct psi_group psi_system = {
.pcpu = &system_group_pcpu, .pcpu = &system_group_pcpu,
}; };
static void psi_update_work(struct work_struct *work); static void psi_avgs_work(struct work_struct *work);
static void group_init(struct psi_group *group) static void group_init(struct psi_group *group)
{ {
...@@ -173,9 +173,9 @@ static void group_init(struct psi_group *group) ...@@ -173,9 +173,9 @@ static void group_init(struct psi_group *group)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
group->next_update = sched_clock() + psi_period; group->avg_next_update = sched_clock() + psi_period;
INIT_DELAYED_WORK(&group->clock_work, psi_update_work); INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
mutex_init(&group->stat_lock); mutex_init(&group->avgs_lock);
} }
void __init psi_init(void) void __init psi_init(void)
...@@ -278,7 +278,7 @@ static bool update_stats(struct psi_group *group) ...@@ -278,7 +278,7 @@ static bool update_stats(struct psi_group *group)
int cpu; int cpu;
int s; int s;
mutex_lock(&group->stat_lock); mutex_lock(&group->avgs_lock);
/* /*
* Collect the per-cpu time buckets and average them into a * Collect the per-cpu time buckets and average them into a
...@@ -319,7 +319,7 @@ static bool update_stats(struct psi_group *group) ...@@ -319,7 +319,7 @@ static bool update_stats(struct psi_group *group)
/* avgX= */ /* avgX= */
now = sched_clock(); now = sched_clock();
expires = group->next_update; expires = group->avg_next_update;
if (now < expires) if (now < expires)
goto out; goto out;
if (now - expires >= psi_period) if (now - expires >= psi_period)
...@@ -332,14 +332,14 @@ static bool update_stats(struct psi_group *group) ...@@ -332,14 +332,14 @@ static bool update_stats(struct psi_group *group)
* But the deltas we sample out of the per-cpu buckets above * But the deltas we sample out of the per-cpu buckets above
* are based on the actual time elapsing between clock ticks. * are based on the actual time elapsing between clock ticks.
*/ */
group->next_update = expires + ((1 + missed_periods) * psi_period); group->avg_next_update = expires + ((1 + missed_periods) * psi_period);
period = now - (group->last_update + (missed_periods * psi_period)); period = now - (group->avg_last_update + (missed_periods * psi_period));
group->last_update = now; group->avg_last_update = now;
for (s = 0; s < NR_PSI_STATES - 1; s++) { for (s = 0; s < NR_PSI_STATES - 1; s++) {
u32 sample; u32 sample;
sample = group->total[s] - group->total_prev[s]; sample = group->total[s] - group->avg_total[s];
/* /*
* Due to the lockless sampling of the time buckets, * Due to the lockless sampling of the time buckets,
* recorded time deltas can slip into the next period, * recorded time deltas can slip into the next period,
...@@ -359,22 +359,22 @@ static bool update_stats(struct psi_group *group) ...@@ -359,22 +359,22 @@ static bool update_stats(struct psi_group *group)
*/ */
if (sample > period) if (sample > period)
sample = period; sample = period;
group->total_prev[s] += sample; group->avg_total[s] += sample;
calc_avgs(group->avg[s], missed_periods, sample, period); calc_avgs(group->avg[s], missed_periods, sample, period);
} }
out: out:
mutex_unlock(&group->stat_lock); mutex_unlock(&group->avgs_lock);
return nonidle_total; return nonidle_total;
} }
static void psi_update_work(struct work_struct *work) static void psi_avgs_work(struct work_struct *work)
{ {
struct delayed_work *dwork; struct delayed_work *dwork;
struct psi_group *group; struct psi_group *group;
bool nonidle; bool nonidle;
dwork = to_delayed_work(work); dwork = to_delayed_work(work);
group = container_of(dwork, struct psi_group, clock_work); group = container_of(dwork, struct psi_group, avgs_work);
/* /*
* If there is task activity, periodically fold the per-cpu * If there is task activity, periodically fold the per-cpu
...@@ -391,8 +391,9 @@ static void psi_update_work(struct work_struct *work) ...@@ -391,8 +391,9 @@ static void psi_update_work(struct work_struct *work)
u64 now; u64 now;
now = sched_clock(); now = sched_clock();
if (group->next_update > now) if (group->avg_next_update > now)
delay = nsecs_to_jiffies(group->next_update - now) + 1; delay = nsecs_to_jiffies(
group->avg_next_update - now) + 1;
schedule_delayed_work(dwork, delay); schedule_delayed_work(dwork, delay);
} }
} }
...@@ -546,13 +547,13 @@ void psi_task_change(struct task_struct *task, int clear, int set) ...@@ -546,13 +547,13 @@ void psi_task_change(struct task_struct *task, int clear, int set)
*/ */
if (unlikely((clear & TSK_RUNNING) && if (unlikely((clear & TSK_RUNNING) &&
(task->flags & PF_WQ_WORKER) && (task->flags & PF_WQ_WORKER) &&
wq_worker_last_func(task) == psi_update_work)) wq_worker_last_func(task) == psi_avgs_work))
wake_clock = false; wake_clock = false;
while ((group = iterate_groups(task, &iter))) { while ((group = iterate_groups(task, &iter))) {
psi_group_change(group, cpu, clear, set); psi_group_change(group, cpu, clear, set);
if (wake_clock && !delayed_work_pending(&group->clock_work)) if (wake_clock && !delayed_work_pending(&group->avgs_work))
schedule_delayed_work(&group->clock_work, PSI_FREQ); schedule_delayed_work(&group->avgs_work, PSI_FREQ);
} }
} }
...@@ -649,7 +650,7 @@ void psi_cgroup_free(struct cgroup *cgroup) ...@@ -649,7 +650,7 @@ void psi_cgroup_free(struct cgroup *cgroup)
if (static_branch_likely(&psi_disabled)) if (static_branch_likely(&psi_disabled))
return; return;
cancel_delayed_work_sync(&cgroup->psi.clock_work); cancel_delayed_work_sync(&cgroup->psi.avgs_work);
free_percpu(cgroup->psi.pcpu); free_percpu(cgroup->psi.pcpu);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部