提交 fc78b18d 编写于 作者: D Ding Tianhong 提交者: Yang Yingliang

ascend: sharepool: support multi-group mode

ascend inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4EUVI
CVE: NA

-------------------------------------------------

The share pool need to support multi-group mode for automotive platform,
it will enhance the system reliability and security.

The new multi-group mode could be enabled by boot command line. When
disabled, the share pool should only support single group mode by
default. When enabled, the task could be added to several groups
(at most 3k). At most 50k groups can be created in the whole system.

This patch also fixes the kabi problem for mm struct.
Signed-off-by: NDing Tianhong <dingtianhong@huawei.com>
Signed-off-by: NTang Yizhou <tangyizhou@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Reviewed-by: NDing Tianhong <dingtianhong@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 c6c8f37f
...@@ -470,11 +470,6 @@ struct mm_struct { ...@@ -470,11 +470,6 @@ struct mm_struct {
#endif #endif
struct user_namespace *user_ns; struct user_namespace *user_ns;
#ifdef CONFIG_ASCEND_SHARE_POOL
struct sp_group *sp_group;
struct list_head sp_node; /* link to sp_group->procs */
int sp_stat_id;
#endif
/* store ref to file /proc/<pid>/exe symlink points to */ /* store ref to file /proc/<pid>/exe symlink points to */
struct file __rcu *exe_file; struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER #ifdef CONFIG_MMU_NOTIFIER
...@@ -525,6 +520,10 @@ struct mm_struct { ...@@ -525,6 +520,10 @@ struct mm_struct {
KABI_RESERVE(1) KABI_RESERVE(1)
#endif #endif
#ifdef CONFIG_ASCEND_SHARE_POOL
struct sp_group_master *sp_group_master;
#endif
KABI_RESERVE(2) KABI_RESERVE(2)
KABI_RESERVE(3) KABI_RESERVE(3)
KABI_RESERVE(4) KABI_RESERVE(4)
......
...@@ -80,9 +80,9 @@ struct sp_group { ...@@ -80,9 +80,9 @@ struct sp_group {
int hugepage_failures; int hugepage_failures;
struct file *file; struct file *file;
struct file *file_hugetlb; struct file *file_hugetlb;
/* list head of processes */ /* list head of processes (sp_group_node, each represents a process) */
struct list_head procs; struct list_head procs;
/* list of sp_area. it is protected by spin_lock sp_area_lock */ /* list head of sp_area. it is protected by spin_lock sp_area_lock */
struct list_head spa_list; struct list_head spa_list;
/* number of sp_area */ /* number of sp_area */
atomic_t spa_num; atomic_t spa_num;
...@@ -107,6 +107,34 @@ struct sp_group { ...@@ -107,6 +107,34 @@ struct sp_group {
struct rw_semaphore rw_lock; struct rw_semaphore rw_lock;
}; };
/* a per-process(per mm) struct which manages a sp_group_node list */
struct sp_group_master {
/*
* number of sp groups the process belongs to,
* a.k.a the number of sp_node in node_list
*/
unsigned int count;
int sp_stat_id;
/* list head of sp_node */
struct list_head node_list;
struct mm_struct *mm;
};
/*
* each instance represents an sp group the process belongs to
* sp_group_master : sp_group_node = 1 : N
* sp_group_node->spg : sp_group = 1 : 1
* sp_group_node : sp_group->procs = N : 1
*/
struct sp_group_node {
/* list node in sp_group->procs */
struct list_head proc_node;
/* list node in sp_group_maseter->node_list */
struct list_head group_node;
struct sp_group_master *master;
struct sp_group *spg;
};
struct sp_walk_data { struct sp_walk_data {
struct page **pages; struct page **pages;
unsigned int page_count; unsigned int page_count;
...@@ -150,9 +178,7 @@ struct sp_proc_stat { ...@@ -150,9 +178,7 @@ struct sp_proc_stat {
static inline void sp_init_mm(struct mm_struct *mm) static inline void sp_init_mm(struct mm_struct *mm)
{ {
mm->sp_group = NULL; mm->sp_group_master = NULL;
INIT_LIST_HEAD(&mm->sp_node);
mm->sp_stat_id = 0;
} }
extern int sp_group_add_task(int pid, int spg_id); extern int sp_group_add_task(int pid, int spg_id);
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册