提交 800d1646 编写于 作者: G Guan Jing 提交者: Zheng Zengkai

KABI:reserve space for sched structures

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4KAP1?from=project-issue
CVE: NA

--------
We reserve some fields beforehand for sched structures prone to change,
therefore, we can hot add/change features of sched with this enhancement.
After reserving, normally cache does not matter as the reserved fields
are not accessed at all.
Signed-off-by: NGuan Jing <guanjing6@huawei.com>
Reviewed-by: NChen Hui <judy.chenhui@huawei.com>
Reviewed-by: NCheng Jian <cj.chengjian@huawei.com>
Reviewed-by: NCheng Jian <cj.chengjian@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 f389569e
......@@ -8,6 +8,7 @@
#define _LINUX_DELAYACCT_H
#include <uapi/linux/taskstats.h>
#include <linux/kabi.h>
/*
* Per-task flags relevant to delay accounting
......@@ -53,6 +54,9 @@ struct task_delay_info {
u32 freepages_count; /* total count of memory reclaim */
u32 thrashing_count; /* total count of thrash waits */
KABI_RESERVE(1)
KABI_RESERVE(2)
};
#endif
......
......@@ -35,6 +35,7 @@
#include <linux/seqlock.h>
#include <linux/kcsan.h>
#include <linux/thread_bits.h>
#include <linux/kabi.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
......@@ -307,6 +308,10 @@ struct sched_info {
/* When were we last queued to run? */
unsigned long long last_queued;
KABI_RESERVE(1)
KABI_RESERVE(2)
KABI_RESERVE(3)
KABI_RESERVE(4)
#endif /* CONFIG_SCHED_INFO */
};
......@@ -455,6 +460,11 @@ struct sched_statistics {
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
KABI_RESERVE(1)
KABI_RESERVE(2)
KABI_RESERVE(3)
KABI_RESERVE(4)
#endif
};
......@@ -494,6 +504,11 @@ struct sched_entity {
*/
struct sched_avg avg;
#endif
KABI_RESERVE(1)
KABI_RESERVE(2)
KABI_RESERVE(3)
KABI_RESERVE(4)
};
struct sched_rt_entity {
......@@ -512,6 +527,9 @@ struct sched_rt_entity {
/* rq "owned" by this entity/group: */
struct rt_rq *my_q;
#endif
KABI_RESERVE(1)
KABI_RESERVE(2)
} __randomize_layout;
struct sched_dl_entity {
......@@ -589,6 +607,9 @@ struct sched_dl_entity {
*/
struct sched_dl_entity *pi_se;
#endif
KABI_RESERVE(1)
KABI_RESERVE(2)
};
#ifdef CONFIG_UCLAMP_TASK
......@@ -1369,6 +1390,15 @@ struct task_struct {
*/
randomized_struct_fields_end
KABI_RESERVE(1)
KABI_RESERVE(2)
KABI_RESERVE(3)
KABI_RESERVE(4)
KABI_RESERVE(5)
KABI_RESERVE(6)
KABI_RESERVE(7)
KABI_RESERVE(8)
/* CPU-specific state of this task: */
struct thread_struct thread;
......
......@@ -12,6 +12,7 @@
#include <linux/posix-timers.h>
#include <linux/mm_types.h>
#include <asm/ptrace.h>
#include <linux/kabi.h>
/*
* Types defining task->signal and task->sighand and APIs using them:
......@@ -235,6 +236,11 @@ struct signal_struct {
* and may have inconsistent
* permissions.
*/
KABI_RESERVE(1)
KABI_RESERVE(2)
KABI_RESERVE(3)
KABI_RESERVE(4)
} __randomize_layout;
/*
......
......@@ -5,6 +5,7 @@
#include <linux/topology.h>
#include <linux/sched/idle.h>
#include <linux/kabi.h>
/*
* sched-domains (multiprocessor balancing) declarations:
......@@ -152,6 +153,9 @@ struct sched_domain {
};
struct sched_domain_shared *shared;
KABI_RESERVE(1)
KABI_RESERVE(2)
unsigned int span_weight;
/*
* Span of all CPUs in this domain.
......
......@@ -6,6 +6,7 @@
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <linux/ratelimit.h>
#include <linux/kabi.h>
/*
* Some day this will be a full-fledged user tracking system..
......@@ -42,6 +43,9 @@ struct user_struct {
/* Miscellaneous per-user rate limit */
struct ratelimit_state ratelimit;
KABI_RESERVE(1)
KABI_RESERVE(2)
};
extern int uids_sysfs_init(void);
......
......@@ -384,6 +384,8 @@ struct cfs_bandwidth {
int nr_periods;
int nr_throttled;
u64 throttled_time;
KABI_RESERVE(1)
#endif
};
......@@ -618,6 +620,9 @@ struct cfs_rq {
struct list_head throttled_list;
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
KABI_RESERVE(1)
KABI_RESERVE(2)
};
static inline int rt_bandwidth_enabled(void)
......@@ -843,6 +848,11 @@ struct root_domain {
* CPUs of the rd. Protected by RCU.
*/
struct perf_domain __rcu *pd;
KABI_RESERVE(1)
KABI_RESERVE(2)
KABI_RESERVE(3)
KABI_RESERVE(4)
};
extern void init_defrootdomain(void);
......@@ -1078,6 +1088,9 @@ struct rq {
/* Must be inspected within a rcu lock section */
struct cpuidle_state *idle_state;
#endif
KABI_RESERVE(1)
KABI_RESERVE(2)
};
#ifdef CONFIG_FAIR_GROUP_SCHED
......@@ -1519,6 +1532,8 @@ struct sched_group {
struct sched_group_capacity *sgc;
int asym_prefer_cpu; /* CPU of highest priority in group */
KABI_RESERVE(1)
KABI_RESERVE(2)
/*
* The CPUs this group covers.
*
......@@ -1870,6 +1885,9 @@ struct sched_class {
#ifdef CONFIG_FAIR_GROUP_SCHED
void (*task_change_group)(struct task_struct *p, int type);
#endif
KABI_RESERVE(1)
KABI_RESERVE(2)
} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册