提交 be234044 编写于 作者: P Peter Zijlstra 提交者: Zheng Zengkai

sched: Trivial core scheduling cookie management

mainline inclusion
from mainline-v5.14-rc1
commit 6e33cad0
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5OOWG
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=6e33cad0af49336952e5541464bd02f5b5fd433e

--------------------------------------------------------------------------

In order to not have to use pid_struct, create a new, smaller,
structure to manage task cookies for core scheduling.
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: NDon Hiatt <dhiatt@digitalocean.com>
Tested-by: NHongyu Ning <hongyu.ning@linux.intel.com>
Tested-by: NVincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210422123308.919768100@infradead.orgSigned-off-by: NLin Shengwang <linshengwang1@huawei.com>
Reviewed-by: Nlihua <hucool.lihua@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 30a1426a
...@@ -2173,6 +2173,12 @@ int sched_trace_rq_nr_running(struct rq *rq); ...@@ -2173,6 +2173,12 @@ int sched_trace_rq_nr_running(struct rq *rq);
const struct cpumask *sched_trace_rd_span(struct root_domain *rd); const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
#else
static inline void sched_core_free(struct task_struct *tsk) { }
#endif
#ifdef CONFIG_QOS_SCHED #ifdef CONFIG_QOS_SCHED
void sched_move_offline_task(struct task_struct *p); void sched_move_offline_task(struct task_struct *p);
void sched_qos_offline_wait(void); void sched_qos_offline_wait(void);
......
...@@ -739,6 +739,7 @@ void __put_task_struct(struct task_struct *tsk) ...@@ -739,6 +739,7 @@ void __put_task_struct(struct task_struct *tsk)
exit_creds(tsk); exit_creds(tsk);
delayacct_tsk_free(tsk); delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal); put_signal_struct(tsk->signal);
sched_core_free(tsk);
if (!profile_handoff_task(tsk)) if (!profile_handoff_task(tsk))
free_task(tsk); free_task(tsk);
......
...@@ -36,3 +36,4 @@ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o ...@@ -36,3 +36,4 @@ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
obj-$(CONFIG_MEMBARRIER) += membarrier.o obj-$(CONFIG_MEMBARRIER) += membarrier.o
obj-$(CONFIG_CPU_ISOLATION) += isolation.o obj-$(CONFIG_CPU_ISOLATION) += isolation.o
obj-$(CONFIG_PSI) += psi.o obj-$(CONFIG_PSI) += psi.o
obj-$(CONFIG_SCHED_CORE) += core_sched.o
...@@ -158,7 +158,7 @@ static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node) ...@@ -158,7 +158,7 @@ static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
return 0; return 0;
} }
static void sched_core_enqueue(struct rq *rq, struct task_struct *p) void sched_core_enqueue(struct rq *rq, struct task_struct *p)
{ {
rq->core->core_task_seq++; rq->core->core_task_seq++;
...@@ -168,14 +168,15 @@ static void sched_core_enqueue(struct rq *rq, struct task_struct *p) ...@@ -168,14 +168,15 @@ static void sched_core_enqueue(struct rq *rq, struct task_struct *p)
rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
} }
static void sched_core_dequeue(struct rq *rq, struct task_struct *p) void sched_core_dequeue(struct rq *rq, struct task_struct *p)
{ {
rq->core->core_task_seq++; rq->core->core_task_seq++;
if (!p->core_cookie) if (!sched_core_enqueued(p))
return; return;
rb_erase(&p->core_node, &rq->core_tree); rb_erase(&p->core_node, &rq->core_tree);
RB_CLEAR_NODE(&p->core_node);
} }
/* /*
......
// SPDX-License-Identifier: GPL-2.0-only
#include "sched.h"
/*
* A simple wrapper around refcount. An allocated sched_core_cookie's
* address is used to compute the cookie of the task.
*/
struct sched_core_cookie {
refcount_t refcnt;
};
unsigned long sched_core_alloc_cookie(void)
{
struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL);
if (!ck)
return 0;
refcount_set(&ck->refcnt, 1);
sched_core_get();
return (unsigned long)ck;
}
void sched_core_put_cookie(unsigned long cookie)
{
struct sched_core_cookie *ptr = (void *)cookie;
if (ptr && refcount_dec_and_test(&ptr->refcnt)) {
kfree(ptr);
sched_core_put();
}
}
unsigned long sched_core_get_cookie(unsigned long cookie)
{
struct sched_core_cookie *ptr = (void *)cookie;
if (ptr)
refcount_inc(&ptr->refcnt);
return cookie;
}
/*
* sched_core_update_cookie - replace the cookie on a task
* @p: the task to update
* @cookie: the new cookie
*
* Effectively exchange the task cookie; caller is responsible for lifetimes on
* both ends.
*
* Returns: the old cookie
*/
unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie)
{
unsigned long old_cookie;
struct rq_flags rf;
struct rq *rq;
bool enqueued;
rq = task_rq_lock(p, &rf);
/*
* Since creating a cookie implies sched_core_get(), and we cannot set
* a cookie until after we've created it, similarly, we cannot destroy
* a cookie until after we've removed it, we must have core scheduling
* enabled here.
*/
SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq));
enqueued = sched_core_enqueued(p);
if (enqueued)
sched_core_dequeue(rq, p);
old_cookie = p->core_cookie;
p->core_cookie = cookie;
if (enqueued)
sched_core_enqueue(rq, p);
/*
* If task is currently running, it may not be compatible anymore after
* the cookie change, so enter the scheduler on its CPU to schedule it
* away.
*/
if (task_running(rq, p))
resched_curr(rq);
task_rq_unlock(rq, p, &rf);
return old_cookie;
}
static unsigned long sched_core_clone_cookie(struct task_struct *p)
{
unsigned long cookie, flags;
raw_spin_lock_irqsave(&p->pi_lock, flags);
cookie = sched_core_get_cookie(p->core_cookie);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return cookie;
}
void sched_core_free(struct task_struct *p)
{
sched_core_put_cookie(p->core_cookie);
}
...@@ -1267,6 +1267,22 @@ static inline bool sched_group_cookie_match(struct rq *rq, ...@@ -1267,6 +1267,22 @@ static inline bool sched_group_cookie_match(struct rq *rq,
extern void queue_core_balance(struct rq *rq); extern void queue_core_balance(struct rq *rq);
static inline bool sched_core_enqueued(struct task_struct *p)
{
return !RB_EMPTY_NODE(&p->core_node);
}
extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
extern void sched_core_dequeue(struct rq *rq, struct task_struct *p);
extern void sched_core_get(void);
extern void sched_core_put(void);
extern unsigned long sched_core_alloc_cookie(void);
extern void sched_core_put_cookie(unsigned long cookie);
extern unsigned long sched_core_get_cookie(unsigned long cookie);
extern unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie);
#else /* !CONFIG_SCHED_CORE */ #else /* !CONFIG_SCHED_CORE */
static inline bool sched_core_enabled(struct rq *rq) static inline bool sched_core_enabled(struct rq *rq)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册