提交 ed00d398 编写于 作者: C Chen Hui 提交者: Zheng Zengkai

sched: Move some definitions to sched.h

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5F6X6
CVE: NA

-------------------

Move the definitions of four functions(task_of, task_cfs_rq,
cfs_rq_of, group_cfs_rq) to sched.h, consistent with the
latest version.
Signed-off-by: NChen Hui <judy.chenhui@huawei.com>
Signed-off-by: NRen Zhijie <renzhijie2@huawei.com>
上级 d93afd18
...@@ -282,33 +282,11 @@ const struct sched_class fair_sched_class; ...@@ -282,33 +282,11 @@ const struct sched_class fair_sched_class;
*/ */
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
static inline struct task_struct *task_of(struct sched_entity *se)
{
SCHED_WARN_ON(!entity_is_task(se));
return container_of(se, struct task_struct, se);
}
/* Walk up scheduling entities hierarchy */ /* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \ #define for_each_sched_entity(se) \
for (; se; se = se->parent) for (; se; se = se->parent)
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return p->se.cfs_rq;
}
/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
return se->cfs_rq;
}
/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return grp->my_q;
}
static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
{ {
if (!path) if (!path)
...@@ -469,33 +447,9 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) ...@@ -469,33 +447,9 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
#else /* !CONFIG_FAIR_GROUP_SCHED */ #else /* !CONFIG_FAIR_GROUP_SCHED */
static inline struct task_struct *task_of(struct sched_entity *se)
{
return container_of(se, struct task_struct, se);
}
#define for_each_sched_entity(se) \ #define for_each_sched_entity(se) \
for (; se; se = NULL) for (; se; se = NULL)
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return &task_rq(p)->cfs;
}
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);
return &rq->cfs;
}
/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return NULL;
}
static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
{ {
if (path) if (path)
......
...@@ -1176,6 +1176,58 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ...@@ -1176,6 +1176,58 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() raw_cpu_ptr(&runqueues) #define raw_rq() raw_cpu_ptr(&runqueues)
#ifdef CONFIG_FAIR_GROUP_SCHED
static inline struct task_struct *task_of(struct sched_entity *se)
{
SCHED_WARN_ON(!entity_is_task(se));
return container_of(se, struct task_struct, se);
}
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return p->se.cfs_rq;
}
/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
return se->cfs_rq;
}
/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return grp->my_q;
}
#else
static inline struct task_struct *task_of(struct sched_entity *se)
{
return container_of(se, struct task_struct, se);
}
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return &task_rq(p)->cfs;
}
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);
return &rq->cfs;
}
/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return NULL;
}
#endif
extern void update_rq_clock(struct rq *rq); extern void update_rq_clock(struct rq *rq);
static inline u64 __rq_clock_broken(struct rq *rq) static inline u64 __rq_clock_broken(struct rq *rq)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册