提交 ec840b5b 编写于 作者: L Lin Shengwang 提交者: Zheng Zengkai

sched: fix kabi for core scheduling

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5OOWG
CVE: NA

--------------------------------------------------------------------------
Signed-off-by: NLin Shengwang <linshengwang1@huawei.com>
Reviewed-by: NChen Hui <judy.chenhui@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 22e41f75
......@@ -729,17 +729,11 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;
#ifdef CONFIG_SCHED_CORE
struct rb_node core_node;
unsigned long core_cookie;
unsigned int core_occupation;
#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
struct sched_dl_entity dl;
#ifdef CONFIG_UCLAMP_TASK
/*
......@@ -1404,11 +1398,18 @@ struct task_struct {
*/
randomized_struct_fields_end
#if defined CONFIG_SCHED_CORE && !defined(__GENKSYMS__)
struct rb_node core_node;
unsigned long core_cookie;
unsigned int core_occupation;
KABI_FILL_HOLE(unsigned int kabi_hole)
#else
KABI_RESERVE(1)
KABI_RESERVE(2)
KABI_RESERVE(3)
KABI_RESERVE(4)
KABI_RESERVE(5)
#endif
KABI_RESERVE(6)
KABI_RESERVE(7)
KABI_RESERVE(8)
......
......@@ -553,11 +553,6 @@ struct cfs_rq {
u64 exec_clock;
u64 min_vruntime;
#ifdef CONFIG_SCHED_CORE
unsigned int forceidle_seq;
u64 min_vruntime_fi;
#endif
#ifndef CONFIG_64BIT
u64 min_vruntime_copy;
#endif
......@@ -644,8 +639,14 @@ struct cfs_rq {
KABI_RESERVE(1)
KABI_RESERVE(2)
#endif
#if defined CONFIG_SCHED_CORE && !defined(__GENKSYMS__)
unsigned int forceidle_seq;
KABI_FILL_HOLE(unsigned int kabi_hole)
u64 min_vruntime_fi;
#else
KABI_RESERVE(3)
KABI_RESERVE(4)
#endif
};
static inline int rt_bandwidth_enabled(void)
......@@ -942,7 +943,7 @@ DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
*/
struct rq {
/* runqueue lock: */
raw_spinlock_t __lock;
raw_spinlock_t KABI_RENAME(lock, __lock);
/*
* nr_running and cpu_load should be in the same cacheline because
......@@ -1112,7 +1113,7 @@ struct rq {
struct cpuidle_state *idle_state;
#endif
#ifdef CONFIG_SCHED_CORE
#if defined(CONFIG_SCHED_CORE) && !defined(__GENKSYMS__)
/* per rq */
struct rq *core;
struct task_struct *core_pick;
......@@ -2135,9 +2136,6 @@ struct sched_class {
#ifdef CONFIG_SMP
int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
struct task_struct * (*pick_task)(struct rq *rq);
void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
......@@ -2175,7 +2173,11 @@ struct sched_class {
void (*task_change_group)(struct task_struct *p, int type);
#endif
#if !defined(__GENKSYMS__) && defined(CONFIG_SMP)
struct task_struct * (*pick_task)(struct rq *rq);
#else
KABI_RESERVE(1)
#endif
KABI_RESERVE(2)
} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册