提交 39344a28 编写于 作者: T Tang Yizhou 提交者: Yang Yingliang

share_pool: Calculate sp_alloc() size for a sp_group

ascend inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4EUVI
CVE: NA

-------------------------------------------------
spg_overview() will show how many normal page memory and hugepage memory
are allocated for a sp_group.
Signed-off-by: NTang Yizhou <tangyizhou@huawei.com>
Reviewed-by: NDing Tianhong <dingtianhong@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 5e06fdd6
...@@ -66,6 +66,8 @@ extern bool vmap_allow_huge; ...@@ -66,6 +66,8 @@ extern bool vmap_allow_huge;
*/ */
struct sp_group { struct sp_group {
int id; int id;
/* record the number of hugepage allocation failures */
int hugepage_failures;
struct file *file; struct file *file;
struct file *file_hugetlb; struct file *file_hugetlb;
/* list head of processes */ /* list head of processes */
...@@ -76,12 +78,14 @@ struct sp_group { ...@@ -76,12 +78,14 @@ struct sp_group {
atomic_t spa_num; atomic_t spa_num;
/* total size of all sp_area from sp_alloc and k2u(spg) */ /* total size of all sp_area from sp_alloc and k2u(spg) */
atomic64_t size; atomic64_t size;
/* record the number of hugepage allocation failures */ /* total size of all sp_area from sp_alloc normal page */
int hugepage_failures; atomic64_t alloc_nsize;
/* is_alive == false means it's being destroyed */ /* total size of all sp_area from sp_alloc hugepage */
bool is_alive; atomic64_t alloc_hsize;
/* we define the creator process of a sp_group as owner */ /* we define the creator process of a sp_group as owner */
struct task_struct *owner; struct task_struct *owner;
/* is_alive == false means it's being destroyed */
bool is_alive;
/* dvpp_multi_spaces == true means multiple dvpp 16G spaces are set */ /* dvpp_multi_spaces == true means multiple dvpp 16G spaces are set */
bool dvpp_multi_spaces; bool dvpp_multi_spaces;
unsigned long dvpp_va_start; unsigned long dvpp_va_start;
......
...@@ -52,8 +52,8 @@ ...@@ -52,8 +52,8 @@
#define spg_valid(spg) ((spg) && ((spg)->is_alive == true)) #define spg_valid(spg) ((spg) && ((spg)->is_alive == true))
#define ESPGMMEXIT 4000 #define ESPGMMEXIT 4000
#define byte2kb(size) ((size) / 1024) #define byte2kb(size) ((size) >> 10)
#define byte2mb(size) ((size) / 1024 / 1024) #define byte2mb(size) ((size) >> 20)
/* mdc scene hack */ /* mdc scene hack */
int enable_mdc_default_group; int enable_mdc_default_group;
...@@ -366,6 +366,8 @@ static struct sp_group *find_or_alloc_sp_group(int spg_id) ...@@ -366,6 +366,8 @@ static struct sp_group *find_or_alloc_sp_group(int spg_id)
spg->id = spg_id; spg->id = spg_id;
atomic_set(&spg->spa_num, 0); atomic_set(&spg->spa_num, 0);
atomic64_set(&spg->size, 0); atomic64_set(&spg->size, 0);
atomic64_set(&spg->alloc_nsize, 0);
atomic64_set(&spg->alloc_hsize, 0);
spg->is_alive = true; spg->is_alive = true;
spg->hugepage_failures = 0; spg->hugepage_failures = 0;
spg->dvpp_multi_spaces = false; spg->dvpp_multi_spaces = false;
...@@ -925,6 +927,12 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, ...@@ -925,6 +927,12 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags,
if (spa->spg) { if (spa->spg) {
atomic_inc(&spg->spa_num); atomic_inc(&spg->spa_num);
atomic64_add(size, &spg->size); atomic64_add(size, &spg->size);
if (type == SPA_TYPE_ALLOC) {
if (spa->is_hugepage)
atomic64_add(size, &spg->alloc_hsize);
else
atomic64_add(size, &spg->alloc_nsize);
}
atomic_inc(&spg_stat.spa_total_num); atomic_inc(&spg_stat.spa_total_num);
atomic64_add(size, &spg_stat.spa_total_size); atomic64_add(size, &spg_stat.spa_total_size);
list_add_tail(&spa->link, &spg->spa_list); list_add_tail(&spa->link, &spg->spa_list);
...@@ -1003,6 +1011,12 @@ static void sp_free_area(struct sp_area *spa) ...@@ -1003,6 +1011,12 @@ static void sp_free_area(struct sp_area *spa)
if (spa->spg) { if (spa->spg) {
atomic_dec(&spa->spg->spa_num); atomic_dec(&spa->spg->spa_num);
atomic64_sub(spa->real_size, &spa->spg->size); atomic64_sub(spa->real_size, &spa->spg->size);
if (spa->type == SPA_TYPE_ALLOC) {
if (spa->is_hugepage)
atomic64_sub(spa->real_size, &spa->spg->alloc_hsize);
else
atomic64_sub(spa->real_size, &spa->spg->alloc_nsize);
}
atomic_dec(&spg_stat.spa_total_num); atomic_dec(&spg_stat.spa_total_num);
atomic64_sub(spa->real_size, &spg_stat.spa_total_size); atomic64_sub(spa->real_size, &spg_stat.spa_total_size);
list_del(&spa->link); list_del(&spa->link);
...@@ -2509,16 +2523,18 @@ static int idr_spg_stat_cb(int id, void *p, void *data) ...@@ -2509,16 +2523,18 @@ static int idr_spg_stat_cb(int id, void *p, void *data)
struct sp_group *spg = p; struct sp_group *spg = p;
struct seq_file *seq = data; struct seq_file *seq = data;
seq_printf(seq, "Group %-10d size: %13ld KB, spa num: %d.\n", seq_printf(seq, "Group %6d size: %ld KB, spa num: %d, normal alloc: %ld KB, "
id, byte2kb(atomic64_read(&spg->size)), "huge alloc: %ld KB\n",
atomic_read(&spg->spa_num)); id, byte2kb(atomic64_read(&spg->size)), atomic_read(&spg->spa_num),
byte2kb(atomic64_read(&spg->alloc_nsize)),
byte2kb(atomic64_read(&spg->alloc_hsize)));
return 0; return 0;
} }
static void spg_overview_show(struct seq_file *seq) static void spg_overview_show(struct seq_file *seq)
{ {
seq_printf(seq, "Share pool total size: %13ld KB, spa total num: %d.\n", seq_printf(seq, "Share pool total size: %ld KB, spa total num: %d.\n",
byte2kb(atomic64_read(&spg_stat.spa_total_size)), byte2kb(atomic64_read(&spg_stat.spa_total_size)),
atomic_read(&spg_stat.spa_total_num)); atomic_read(&spg_stat.spa_total_num));
mutex_lock(&sp_mutex); mutex_lock(&sp_mutex);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册