You need to sign in or sign up before continuing.
提交 d83dcc99 编写于 作者: W Wang Wensheng 提交者: Zheng Zengkai

share_pool: Add proc interfaces to show sp info

ascend inclusion
category: Feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4NDAW
CVE: NA

-------------------

1. /proc/sharepool/* those interfaces show the system-wide processes
   that are in the sharepool group and all the groups.
2. /proc/<pid>/sp_group expose the per-task sp_group state value.
Signed-off-by: NWang Wensheng <wangwensheng4@huawei.com>
Signed-off-by: NTang Yizhou <tangyizhou@huawei.com>
Reviewed-by: Kefeng Wang<wangkefeng.wang@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 4f92a284
...@@ -96,6 +96,7 @@ ...@@ -96,6 +96,7 @@
#include <linux/posix-timers.h> #include <linux/posix-timers.h>
#include <linux/time_namespace.h> #include <linux/time_namespace.h>
#include <linux/resctrl.h> #include <linux/resctrl.h>
#include <linux/share_pool.h>
#include <trace/events/oom.h> #include <trace/events/oom.h>
#include "internal.h" #include "internal.h"
#include "fd.h" #include "fd.h"
...@@ -3297,6 +3298,9 @@ static const struct pid_entry tgid_base_stuff[] = { ...@@ -3297,6 +3298,9 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_SECCOMP_CACHE_DEBUG #ifdef CONFIG_SECCOMP_CACHE_DEBUG
ONE("seccomp_cache", S_IRUSR, proc_pid_seccomp_cache), ONE("seccomp_cache", S_IRUSR, proc_pid_seccomp_cache),
#endif #endif
#ifdef CONFIG_ASCEND_SHARE_POOL
ONE("sp_group", 0444, proc_sp_group_state),
#endif
}; };
static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
...@@ -3631,6 +3635,9 @@ static const struct pid_entry tid_base_stuff[] = { ...@@ -3631,6 +3635,9 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_SECCOMP_CACHE_DEBUG #ifdef CONFIG_SECCOMP_CACHE_DEBUG
ONE("seccomp_cache", S_IRUSR, proc_pid_seccomp_cache), ONE("seccomp_cache", S_IRUSR, proc_pid_seccomp_cache),
#endif #endif
#ifdef CONFIG_ASCEND_SHARE_POOL
ONE("sp_group", 0444, proc_sp_group_state),
#endif
}; };
static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
......
...@@ -20,6 +20,37 @@ ...@@ -20,6 +20,37 @@
#define pr_fmt(fmt) "share pool: " fmt #define pr_fmt(fmt) "share pool: " fmt
#include <linux/share_pool.h> #include <linux/share_pool.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/mm_types.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/printk.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/pid.h>
#include <linux/pid_namespace.h>
#include <linux/atomic.h>
#include <linux/lockdep.h>
#include <linux/kernel.h>
#include <linux/falloc.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/rmap.h>
#include <linux/compaction.h>
#include <linux/preempt.h>
#include <linux/swapops.h>
#include <linux/mmzone.h>
#include <linux/timekeeping.h>
#include <linux/time64.h>
/* access control mode macros */ /* access control mode macros */
#define AC_NONE 0 #define AC_NONE 0
...@@ -63,14 +94,406 @@ static bool is_sp_dev_addr_enabled(int device_id) ...@@ -63,14 +94,406 @@ static bool is_sp_dev_addr_enabled(int device_id)
return sp_dev_va_size[device_id]; return sp_dev_va_size[device_id];
} }
/* idr of all sp_groups */
static DEFINE_IDR(sp_group_idr);
/* rw semaphore for sp_group_idr and mm->sp_group_master */
static DECLARE_RWSEM(sp_group_sem);
static BLOCKING_NOTIFIER_HEAD(sp_notifier_chain); static BLOCKING_NOTIFIER_HEAD(sp_notifier_chain);
static DEFINE_IDA(sp_group_id_ida);
/*** Statistical and maintenance tools ***/
/* idr of all sp_proc_stats */
static DEFINE_IDR(sp_proc_stat_idr);
/* rw semaphore for sp_proc_stat_idr */
static DECLARE_RWSEM(sp_proc_stat_sem);
/* idr of all sp_spg_stats */
static DEFINE_IDR(sp_spg_stat_idr);
/* rw semaphore for sp_spg_stat_idr */
static DECLARE_RWSEM(sp_spg_stat_sem);
/* for kthread buff_module_guard_work */
static struct sp_proc_stat kthread_stat;
/* The caller must hold sp_group_sem */
static struct sp_group_master *sp_init_group_master_locked(
struct mm_struct *mm, bool *exist)
{
struct sp_group_master *master = mm->sp_group_master;
if (master) {
*exist = true;
return master;
}
master = kmalloc(sizeof(struct sp_group_master), GFP_KERNEL);
if (master == NULL)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&master->node_list);
master->count = 0;
master->stat = NULL;
master->mm = mm;
mm->sp_group_master = master;
*exist = false;
return master;
}
static struct sp_proc_stat *create_proc_stat(struct mm_struct *mm,
struct task_struct *tsk)
{
struct sp_proc_stat *stat;
stat = kmalloc(sizeof(*stat), GFP_KERNEL);
if (stat == NULL)
return ERR_PTR(-ENOMEM);
atomic_set(&stat->use_count, 1);
atomic64_set(&stat->alloc_size, 0);
atomic64_set(&stat->k2u_size, 0);
stat->tgid = tsk->tgid;
stat->mm = mm;
mutex_init(&stat->lock);
hash_init(stat->hash);
get_task_comm(stat->comm, tsk);
return stat;
}
static struct sp_proc_stat *sp_init_proc_stat(struct sp_group_master *master,
struct mm_struct *mm, struct task_struct *tsk)
{
struct sp_proc_stat *stat;
int alloc_id, tgid = tsk->tgid;
down_write(&sp_proc_stat_sem);
stat = master->stat;
if (stat) {
up_write(&sp_proc_stat_sem);
return stat;
}
stat = create_proc_stat(mm, tsk);
if (IS_ERR(stat)) {
up_write(&sp_proc_stat_sem);
return stat;
}
alloc_id = idr_alloc(&sp_proc_stat_idr, stat, tgid, tgid + 1, GFP_KERNEL);
if (alloc_id < 0) {
up_write(&sp_proc_stat_sem);
pr_err_ratelimited("proc stat idr alloc failed %d\n", alloc_id);
kfree(stat);
return ERR_PTR(alloc_id);
}
master->stat = stat;
up_write(&sp_proc_stat_sem);
return stat;
}
static void update_spg_stat_alloc(unsigned long size, bool inc,
bool huge, struct sp_spg_stat *stat)
{
if (inc) {
atomic_inc(&stat->spa_num);
atomic64_add(size, &stat->size);
atomic64_add(size, &stat->alloc_size);
if (huge)
atomic64_add(size, &stat->alloc_hsize);
else
atomic64_add(size, &stat->alloc_nsize);
} else {
atomic_dec(&stat->spa_num);
atomic64_sub(size, &stat->size);
atomic64_sub(size, &stat->alloc_size);
if (huge)
atomic64_sub(size, &stat->alloc_hsize);
else
atomic64_sub(size, &stat->alloc_nsize);
}
}
static void update_spg_stat_k2u(unsigned long size, bool inc,
struct sp_spg_stat *stat)
{
if (inc) {
atomic_inc(&stat->spa_num);
atomic64_add(size, &stat->size);
atomic64_add(size, &stat->k2u_size);
} else {
atomic_dec(&stat->spa_num);
atomic64_sub(size, &stat->size);
atomic64_sub(size, &stat->k2u_size);
}
}
/* per process/sp-group memory usage statistics */
struct spg_proc_stat {
int tgid;
int spg_id; /* 0 for non-group data, such as k2u_task */
struct hlist_node pnode; /* hlist node in sp_proc_stat->hash */
struct hlist_node gnode; /* hlist node in sp_spg_stat->hash */
struct sp_proc_stat *proc_stat;
struct sp_spg_stat *spg_stat;
/*
* alloc amount minus free amount, may be negative when freed by
* another task in the same sp group.
*/
atomic64_t alloc_size;
atomic64_t k2u_size;
};
static void update_spg_proc_stat_alloc(unsigned long size, bool inc,
struct spg_proc_stat *stat)
{
struct sp_proc_stat *proc_stat = stat->proc_stat;
if (inc) {
atomic64_add(size, &stat->alloc_size);
atomic64_add(size, &proc_stat->alloc_size);
} else {
atomic64_sub(size, &stat->alloc_size);
atomic64_sub(size, &proc_stat->alloc_size);
}
}
static void update_spg_proc_stat_k2u(unsigned long size, bool inc,
struct spg_proc_stat *stat)
{
struct sp_proc_stat *proc_stat = stat->proc_stat;
if (inc) {
atomic64_add(size, &stat->k2u_size);
atomic64_add(size, &proc_stat->k2u_size);
} else {
atomic64_sub(size, &stat->k2u_size);
atomic64_sub(size, &proc_stat->k2u_size);
}
}
static struct spg_proc_stat *find_spg_proc_stat(
struct sp_proc_stat *proc_stat, int tgid, int spg_id)
{
struct spg_proc_stat *stat = NULL;
mutex_lock(&proc_stat->lock);
hash_for_each_possible(proc_stat->hash, stat, pnode, spg_id) {
if (stat->spg_id == spg_id)
break;
}
mutex_unlock(&proc_stat->lock);
return stat;
}
static struct spg_proc_stat *create_spg_proc_stat(int tgid, int spg_id)
{
struct spg_proc_stat *stat;
stat = kmalloc(sizeof(struct spg_proc_stat), GFP_KERNEL);
if (stat == NULL)
return ERR_PTR(-ENOMEM);
stat->tgid = tgid;
stat->spg_id = spg_id;
atomic64_set(&stat->alloc_size, 0);
atomic64_set(&stat->k2u_size, 0);
return stat;
}
static struct spg_proc_stat *sp_init_spg_proc_stat(
struct sp_proc_stat *proc_stat, int tgid, struct sp_group *spg)
{
struct spg_proc_stat *stat;
int spg_id = spg->id; /* visit spg id locklessly */
struct sp_spg_stat *spg_stat = spg->stat;
stat = find_spg_proc_stat(proc_stat, tgid, spg_id);
if (stat)
return stat;
stat = create_spg_proc_stat(tgid, spg_id);
if (IS_ERR(stat))
return stat;
stat->proc_stat = proc_stat;
stat->spg_stat = spg_stat;
mutex_lock(&proc_stat->lock);
hash_add(proc_stat->hash, &stat->pnode, stat->spg_id);
mutex_unlock(&proc_stat->lock);
mutex_lock(&spg_stat->lock);
hash_add(spg_stat->hash, &stat->gnode, stat->tgid);
mutex_unlock(&spg_stat->lock);
return stat;
}
/*
* The caller must
* 1. ensure no concurrency problem for task_struct and mm_struct.
* 2. hold sp_group_sem for sp_group_master (pay attention to ABBA deadlock)
*/
static struct spg_proc_stat *sp_init_process_stat(struct task_struct *tsk,
struct mm_struct *mm, struct sp_group *spg)
{
struct sp_group_master *master;
bool exist;
struct sp_proc_stat *proc_stat;
struct spg_proc_stat *spg_proc_stat;
master = sp_init_group_master_locked(mm, &exist);
if (IS_ERR(master))
return (struct spg_proc_stat *)master;
proc_stat = sp_init_proc_stat(master, mm, tsk);
if (IS_ERR(proc_stat))
return (struct spg_proc_stat *)proc_stat;
spg_proc_stat = sp_init_spg_proc_stat(proc_stat, tsk->tgid, spg);
return spg_proc_stat;
}
static struct sp_spg_stat *create_spg_stat(int spg_id)
{
struct sp_spg_stat *stat;
stat = kmalloc(sizeof(*stat), GFP_KERNEL);
if (stat == NULL)
return ERR_PTR(-ENOMEM);
stat->spg_id = spg_id;
atomic_set(&stat->hugepage_failures, 0);
atomic_set(&stat->spa_num, 0);
atomic64_set(&stat->size, 0);
atomic64_set(&stat->alloc_nsize, 0);
atomic64_set(&stat->alloc_hsize, 0);
atomic64_set(&stat->alloc_size, 0);
mutex_init(&stat->lock);
hash_init(stat->hash);
return stat;
}
static int sp_init_spg_stat(struct sp_group *spg)
{
struct sp_spg_stat *stat;
int ret, spg_id = spg->id;
stat = create_spg_stat(spg_id);
if (IS_ERR(stat))
return PTR_ERR(stat);
down_write(&sp_spg_stat_sem);
ret = idr_alloc(&sp_spg_stat_idr, stat, spg_id, spg_id + 1,
GFP_KERNEL);
up_write(&sp_spg_stat_sem);
if (ret < 0) {
pr_err_ratelimited("group %d idr alloc failed, ret %d\n",
spg_id, ret);
kfree(stat);
}
spg->stat = stat;
return ret;
}
static void free_spg_stat(int spg_id)
{
struct sp_spg_stat *stat;
down_write(&sp_spg_stat_sem);
stat = idr_remove(&sp_spg_stat_idr, spg_id);
up_write(&sp_spg_stat_sem);
WARN_ON(!stat);
kfree(stat);
}
/* /*
* Group '0' for k2u_task and pass through. No process will be actually * Group '0' for k2u_task and pass through. No process will be actually
* added to. * added to.
*/ */
static struct sp_group *spg_none; static struct sp_group *spg_none;
/* statistics of all sp area, protected by sp_area_lock */
struct sp_spa_stat {
unsigned int total_num;
unsigned int alloc_num;
unsigned int k2u_task_num;
unsigned int k2u_spg_num;
unsigned long total_size;
unsigned long alloc_size;
unsigned long k2u_task_size;
unsigned long k2u_spg_size;
unsigned long dvpp_size;
unsigned long dvpp_va_size;
};
static struct sp_spa_stat spa_stat;
/* statistics of all sp group born from sp_alloc and k2u(spg) */
struct sp_overall_stat {
atomic_t spa_total_num;
atomic64_t spa_total_size;
};
static struct sp_overall_stat sp_overall_stat;
/*** Global share pool VA allocator ***/
enum spa_type {
SPA_TYPE_ALLOC = 1,
SPA_TYPE_K2TASK,
SPA_TYPE_K2SPG,
};
/*
* We bump the reference when each mmap succeeds, and it will be dropped
* when vma is about to release, so sp_area object will be automatically
* freed when all tasks in the sp group has exited.
*/
struct sp_area {
unsigned long va_start;
unsigned long va_end; /* va_end always align to hugepage */
unsigned long real_size; /* real size with alignment */
unsigned long region_vstart; /* belong to normal region or DVPP region */
unsigned long flags;
bool is_hugepage;
bool is_dead;
atomic_t use_count; /* How many vmas use this VA region */
struct rb_node rb_node; /* address sorted rbtree */
struct list_head link; /* link to the spg->head */
struct sp_group *spg;
enum spa_type type; /* where spa born from */
struct mm_struct *mm; /* owner of k2u(task) */
unsigned long kva; /* shared kva */
pid_t applier; /* the original applier process */
int node_id; /* memory node */
int device_id;
};
static DEFINE_SPINLOCK(sp_area_lock);
static struct rb_root sp_area_root = RB_ROOT;
static unsigned long spa_size(struct sp_area *spa)
{
return spa->real_size;
}
static struct file *spa_file(struct sp_area *spa)
{
if (spa->is_hugepage)
return spa->spg->file_hugetlb;
else
return spa->spg->file;
}
static struct sp_group *create_spg(int spg_id) static struct sp_group *create_spg(int spg_id)
{ {
return NULL; return NULL;
...@@ -137,6 +560,10 @@ int sp_group_add_task(int pid, int spg_id) ...@@ -137,6 +560,10 @@ int sp_group_add_task(int pid, int spg_id)
} }
EXPORT_SYMBOL_GPL(sp_group_add_task); EXPORT_SYMBOL_GPL(sp_group_add_task);
static void __sp_area_drop_locked(struct sp_area *spa)
{
}
/** /**
* mg_sp_group_del_task() - delete a process from a sp group. * mg_sp_group_del_task() - delete a process from a sp group.
* @pid: the pid of the task to be deleted * @pid: the pid of the task to be deleted
...@@ -414,6 +841,508 @@ static int __init enable_sp_multi_group_mode(char *s) ...@@ -414,6 +841,508 @@ static int __init enable_sp_multi_group_mode(char *s)
} }
__setup("enable_sp_multi_group_mode", enable_sp_multi_group_mode); __setup("enable_sp_multi_group_mode", enable_sp_multi_group_mode);
/*** Statistical and maintenance functions ***/
static void free_process_spg_proc_stat(struct sp_proc_stat *proc_stat)
{
int i;
struct spg_proc_stat *stat;
struct hlist_node *tmp;
struct sp_spg_stat *spg_stat;
/* traverse proc_stat->hash locklessly as process is exiting */
hash_for_each_safe(proc_stat->hash, i, tmp, stat, pnode) {
spg_stat = stat->spg_stat;
mutex_lock(&spg_stat->lock);
hash_del(&stat->gnode);
mutex_unlock(&spg_stat->lock);
hash_del(&stat->pnode);
kfree(stat);
}
}
static void free_sp_proc_stat(struct sp_proc_stat *stat)
{
free_process_spg_proc_stat(stat);
down_write(&sp_proc_stat_sem);
stat->mm->sp_group_master->stat = NULL;
idr_remove(&sp_proc_stat_idr, stat->tgid);
up_write(&sp_proc_stat_sem);
kfree(stat);
}
/* the caller make sure stat is not NULL */
void sp_proc_stat_drop(struct sp_proc_stat *stat)
{
if (atomic_dec_and_test(&stat->use_count))
free_sp_proc_stat(stat);
}
static void get_mm_rss_info(struct mm_struct *mm, unsigned long *anon,
unsigned long *file, unsigned long *shmem, unsigned long *total_rss)
{
*anon = get_mm_counter(mm, MM_ANONPAGES);
*file = get_mm_counter(mm, MM_FILEPAGES);
*shmem = get_mm_counter(mm, MM_SHMEMPAGES);
*total_rss = *anon + *file + *shmem;
}
static long get_proc_alloc(struct sp_proc_stat *stat)
{
return byte2kb(atomic64_read(&stat->alloc_size));
}
static long get_proc_k2u(struct sp_proc_stat *stat)
{
return byte2kb(atomic64_read(&stat->k2u_size));
}
static long get_spg_alloc(struct sp_spg_stat *stat)
{
return byte2kb(atomic64_read(&stat->alloc_size));
}
static long get_spg_alloc_nsize(struct sp_spg_stat *stat)
{
return byte2kb(atomic64_read(&stat->alloc_nsize));
}
static long get_spg_proc_alloc(struct spg_proc_stat *stat)
{
return byte2kb(atomic64_read(&stat->alloc_size));
}
static long get_spg_proc_k2u(struct spg_proc_stat *stat)
{
return byte2kb(atomic64_read(&stat->k2u_size));
}
static void get_process_sp_res(struct sp_proc_stat *stat,
long *sp_res_out, long *sp_res_nsize_out)
{
int i;
struct spg_proc_stat *spg_proc_stat;
struct sp_spg_stat *spg_stat;
long sp_res = 0, sp_res_nsize = 0;
mutex_lock(&stat->lock);
hash_for_each(stat->hash, i, spg_proc_stat, pnode) {
spg_stat = spg_proc_stat->spg_stat;
sp_res += get_spg_alloc(spg_stat);
sp_res_nsize += get_spg_alloc_nsize(spg_stat);
}
mutex_unlock(&stat->lock);
*sp_res_out = sp_res;
*sp_res_nsize_out = sp_res_nsize;
}
/*
* Statistics of RSS has a maximum 64 pages deviation (256KB).
* Please check_sync_rss_stat().
*/
static void get_process_non_sp_res(unsigned long total_rss, unsigned long shmem,
long sp_res_nsize, long *non_sp_res_out, long *non_sp_shm_out)
{
long non_sp_res, non_sp_shm;
non_sp_res = page2kb(total_rss) - sp_res_nsize;
non_sp_res = non_sp_res < 0 ? 0 : non_sp_res;
non_sp_shm = page2kb(shmem) - sp_res_nsize;
non_sp_shm = non_sp_shm < 0 ? 0 : non_sp_shm;
*non_sp_res_out = non_sp_res;
*non_sp_shm_out = non_sp_shm;
}
static long get_sp_res_by_spg_proc(struct spg_proc_stat *stat)
{
return byte2kb(atomic64_read(&stat->spg_stat->alloc_size));
}
static unsigned long get_process_prot_locked(int spg_id, struct mm_struct *mm)
{
unsigned long prot = 0;
struct sp_group_node *spg_node;
struct sp_group_master *master = mm->sp_group_master;
list_for_each_entry(spg_node, &master->node_list, group_node) {
if (spg_node->spg->id == spg_id) {
prot = spg_node->prot;
break;
}
}
return prot;
}
static void print_process_prot(struct seq_file *seq, unsigned long prot)
{
if (prot == PROT_READ)
seq_puts(seq, "R");
else if (prot == (PROT_READ | PROT_WRITE))
seq_puts(seq, "RW");
else /* e.g. spg_none */
seq_puts(seq, "-");
}
int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
struct mm_struct *mm = task->mm;
struct sp_group_master *master;
struct sp_proc_stat *proc_stat;
struct spg_proc_stat *spg_proc_stat;
int i;
unsigned long anon, file, shmem, total_rss, prot;
long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;
if (!mm)
return 0;
master = mm->sp_group_master;
if (!master)
return 0;
get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);
proc_stat = master->stat;
get_process_sp_res(proc_stat, &sp_res, &sp_res_nsize);
get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
&non_sp_res, &non_sp_shm);
seq_puts(m, "Share Pool Aggregate Data of This Process\n\n");
seq_printf(m, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
"PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
"Non-SP_Shm", "VIRT");
seq_printf(m, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
proc_stat->tgid, proc_stat->comm,
get_proc_alloc(proc_stat),
get_proc_k2u(proc_stat),
sp_res, non_sp_res, non_sp_shm,
page2kb(mm->total_vm));
seq_puts(m, "\n\nProcess in Each SP Group\n\n");
seq_printf(m, "%-8s %-9s %-9s %-9s %-4s\n",
"Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES", "PROT");
/* to prevent ABBA deadlock, first hold sp_group_sem */
down_read(&sp_group_sem);
mutex_lock(&proc_stat->lock);
hash_for_each(proc_stat->hash, i, spg_proc_stat, pnode) {
prot = get_process_prot_locked(spg_proc_stat->spg_id, mm);
seq_printf(m, "%-8d %-9ld %-9ld %-9ld ",
spg_proc_stat->spg_id,
get_spg_proc_alloc(spg_proc_stat),
get_spg_proc_k2u(spg_proc_stat),
get_sp_res_by_spg_proc(spg_proc_stat));
print_process_prot(m, prot);
seq_putc(m, '\n');
}
mutex_unlock(&proc_stat->lock);
up_read(&sp_group_sem);
return 0;
}
static void rb_spa_stat_show(struct seq_file *seq)
{
struct rb_node *node;
struct sp_area *spa, *prev = NULL;
spin_lock(&sp_area_lock);
for (node = rb_first(&sp_area_root); node; node = rb_next(node)) {
__sp_area_drop_locked(prev);
spa = rb_entry(node, struct sp_area, rb_node);
prev = spa;
atomic_inc(&spa->use_count);
spin_unlock(&sp_area_lock);
if (spa->spg == spg_none) /* k2u to task */
seq_printf(seq, "%-10s ", "None");
else {
down_read(&spa->spg->rw_lock);
if (spg_valid(spa->spg)) /* k2u to group */
seq_printf(seq, "%-10d ", spa->spg->id);
else /* spg is dead */
seq_printf(seq, "%-10s ", "Dead");
up_read(&spa->spg->rw_lock);
}
seq_printf(seq, "%2s%-14lx %2s%-14lx %-10ld ",
"0x", spa->va_start,
"0x", spa->va_end,
byte2kb(spa->real_size));
switch (spa->type) {
case SPA_TYPE_ALLOC:
seq_printf(seq, "%-7s ", "ALLOC");
break;
case SPA_TYPE_K2TASK:
seq_printf(seq, "%-7s ", "TASK");
break;
case SPA_TYPE_K2SPG:
seq_printf(seq, "%-7s ", "SPG");
break;
default:
/* usually impossible, perhaps a developer's mistake */
break;
}
if (spa->is_hugepage)
seq_printf(seq, "%-5s ", "Y");
else
seq_printf(seq, "%-5s ", "N");
seq_printf(seq, "%-8d ", spa->applier);
seq_printf(seq, "%-8d\n", atomic_read(&spa->use_count));
spin_lock(&sp_area_lock);
}
__sp_area_drop_locked(prev);
spin_unlock(&sp_area_lock);
}
void spa_overview_show(struct seq_file *seq)
{
unsigned int total_num, alloc_num, k2u_task_num, k2u_spg_num;
unsigned long total_size, alloc_size, k2u_task_size, k2u_spg_size;
unsigned long dvpp_size, dvpp_va_size;
if (!sp_is_enabled())
return;
spin_lock(&sp_area_lock);
total_num = spa_stat.total_num;
alloc_num = spa_stat.alloc_num;
k2u_task_num = spa_stat.k2u_task_num;
k2u_spg_num = spa_stat.k2u_spg_num;
total_size = spa_stat.total_size;
alloc_size = spa_stat.alloc_size;
k2u_task_size = spa_stat.k2u_task_size;
k2u_spg_size = spa_stat.k2u_spg_size;
dvpp_size = spa_stat.dvpp_size;
dvpp_va_size = spa_stat.dvpp_va_size;
spin_unlock(&sp_area_lock);
if (seq != NULL) {
seq_printf(seq, "Spa total num %u.\n", total_num);
seq_printf(seq, "Spa alloc num %u, k2u(task) num %u, k2u(spg) num %u.\n",
alloc_num, k2u_task_num, k2u_spg_num);
seq_printf(seq, "Spa total size: %13lu KB\n", byte2kb(total_size));
seq_printf(seq, "Spa alloc size: %13lu KB\n", byte2kb(alloc_size));
seq_printf(seq, "Spa k2u(task) size: %13lu KB\n", byte2kb(k2u_task_size));
seq_printf(seq, "Spa k2u(spg) size: %13lu KB\n", byte2kb(k2u_spg_size));
seq_printf(seq, "Spa dvpp size: %13lu KB\n", byte2kb(dvpp_size));
seq_printf(seq, "Spa dvpp va size: %13lu MB\n", byte2mb(dvpp_va_size));
seq_puts(seq, "\n");
} else {
pr_info("Spa total num %u.\n", total_num);
pr_info("Spa alloc num %u, k2u(task) num %u, k2u(spg) num %u.\n",
alloc_num, k2u_task_num, k2u_spg_num);
pr_info("Spa total size: %13lu KB\n", byte2kb(total_size));
pr_info("Spa alloc size: %13lu KB\n", byte2kb(alloc_size));
pr_info("Spa k2u(task) size: %13lu KB\n", byte2kb(k2u_task_size));
pr_info("Spa k2u(spg) size: %13lu KB\n", byte2kb(k2u_spg_size));
pr_info("Spa dvpp size: %13lu KB\n", byte2kb(dvpp_size));
pr_info("Spa dvpp va size: %13lu MB\n", byte2mb(dvpp_va_size));
pr_info("\n");
}
}
/* the caller must hold sp_group_sem */
static int idr_spg_stat_cb(int id, void *p, void *data)
{
struct sp_spg_stat *s = p;
struct seq_file *seq = data;
if (seq != NULL) {
if (id == 0)
seq_puts(seq, "Non Group ");
else
seq_printf(seq, "Group %6d ", id);
seq_printf(seq, "size: %lld KB, spa num: %d, total alloc: %lld KB, normal alloc: %lld KB, huge alloc: %lld KB\n",
byte2kb(atomic64_read(&s->size)),
atomic_read(&s->spa_num),
byte2kb(atomic64_read(&s->alloc_size)),
byte2kb(atomic64_read(&s->alloc_nsize)),
byte2kb(atomic64_read(&s->alloc_hsize)));
} else {
if (id == 0)
pr_info("Non Group ");
else
pr_info("Group %6d ", id);
pr_info("size: %lld KB, spa num: %d, total alloc: %lld KB, normal alloc: %lld KB, huge alloc: %lld KB\n",
byte2kb(atomic64_read(&s->size)),
atomic_read(&s->spa_num),
byte2kb(atomic64_read(&s->alloc_size)),
byte2kb(atomic64_read(&s->alloc_nsize)),
byte2kb(atomic64_read(&s->alloc_hsize)));
}
return 0;
}
void spg_overview_show(struct seq_file *seq)
{
if (!sp_is_enabled())
return;
if (seq != NULL) {
seq_printf(seq, "Share pool total size: %lld KB, spa total num: %d.\n",
byte2kb(atomic64_read(&sp_overall_stat.spa_total_size)),
atomic_read(&sp_overall_stat.spa_total_num));
} else {
pr_info("Share pool total size: %lld KB, spa total num: %d.\n",
byte2kb(atomic64_read(&sp_overall_stat.spa_total_size)),
atomic_read(&sp_overall_stat.spa_total_num));
}
down_read(&sp_group_sem);
idr_for_each(&sp_spg_stat_idr, idr_spg_stat_cb, seq);
up_read(&sp_group_sem);
if (seq != NULL)
seq_puts(seq, "\n");
else
pr_info("\n");
}
static int spa_stat_show(struct seq_file *seq, void *offset)
{
spg_overview_show(seq);
spa_overview_show(seq);
/* print the file header */
seq_printf(seq, "%-10s %-16s %-16s %-10s %-7s %-5s %-8s %-8s\n",
"Group ID", "va_start", "va_end", "Size(KB)", "Type", "Huge", "PID", "Ref");
rb_spa_stat_show(seq);
return 0;
}
static int idr_proc_stat_cb(int id, void *p, void *data)
{
struct sp_spg_stat *spg_stat = p;
struct seq_file *seq = data;
int i, tgid;
struct sp_proc_stat *proc_stat;
struct spg_proc_stat *spg_proc_stat;
struct mm_struct *mm;
unsigned long anon, file, shmem, total_rss, prot;
/*
* non_sp_res: resident memory size excluding share pool memory
* sp_res: resident memory size of share pool, including normal
* page and hugepage memory
* non_sp_shm: resident shared memory size excluding share pool
* memory
*/
long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;
/* to prevent ABBA deadlock, first hold sp_group_sem */
down_read(&sp_group_sem);
mutex_lock(&spg_stat->lock);
hash_for_each(spg_stat->hash, i, spg_proc_stat, gnode) {
proc_stat = spg_proc_stat->proc_stat;
tgid = proc_stat->tgid;
mm = proc_stat->mm;
get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);
get_process_sp_res(proc_stat, &sp_res, &sp_res_nsize);
get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
&non_sp_res, &non_sp_shm);
prot = get_process_prot_locked(id, mm);
seq_printf(seq, "%-8d ", tgid);
if (id == 0)
seq_printf(seq, "%-8c ", '-');
else
seq_printf(seq, "%-8d ", id);
seq_printf(seq, "%-9ld %-9ld %-9ld %-10ld %-10ld %-8ld %-7ld %-7ld %-10ld ",
get_spg_proc_alloc(spg_proc_stat),
get_spg_proc_k2u(spg_proc_stat),
get_sp_res_by_spg_proc(spg_proc_stat),
sp_res, non_sp_res,
page2kb(mm->total_vm), page2kb(total_rss),
page2kb(shmem), non_sp_shm);
print_process_prot(seq, prot);
seq_putc(seq, '\n');
}
mutex_unlock(&spg_stat->lock);
up_read(&sp_group_sem);
return 0;
}
static int proc_stat_show(struct seq_file *seq, void *offset)
{
spg_overview_show(seq);
spa_overview_show(seq);
/* print the file header */
seq_printf(seq, "%-8s %-8s %-9s %-9s %-9s %-10s %-10s %-8s %-7s %-7s %-10s %-4s\n",
"PID", "Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES", "SP_RES_T",
"Non-SP_RES", "VIRT", "RES", "Shm", "Non-SP_Shm", "PROT");
/* print kthread buff_module_guard_work */
seq_printf(seq, "%-8s %-8s %-9lld %-9lld\n",
"guard", "-",
byte2kb(atomic64_read(&kthread_stat.alloc_size)),
byte2kb(atomic64_read(&kthread_stat.k2u_size)));
/* pay attention to potential ABBA deadlock */
down_read(&sp_spg_stat_sem);
idr_for_each(&sp_spg_stat_idr, idr_proc_stat_cb, seq);
up_read(&sp_spg_stat_sem);
return 0;
}
static int idr_proc_overview_cb(int id, void *p, void *data)
{
struct sp_proc_stat *proc_stat = p;
struct seq_file *seq = data;
struct mm_struct *mm = proc_stat->mm;
unsigned long anon, file, shmem, total_rss;
long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;
get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);
get_process_sp_res(proc_stat, &sp_res, &sp_res_nsize);
get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
&non_sp_res, &non_sp_shm);
seq_printf(seq, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
id, proc_stat->comm,
get_proc_alloc(proc_stat),
get_proc_k2u(proc_stat),
sp_res, non_sp_res, non_sp_shm,
page2kb(mm->total_vm));
return 0;
}
static int proc_overview_show(struct seq_file *seq, void *offset)
{
seq_printf(seq, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
"PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
"Non-SP_Shm", "VIRT");
down_read(&sp_proc_stat_sem);
idr_for_each(&sp_proc_stat_idr, idr_proc_overview_cb, seq);
up_read(&sp_proc_stat_sem);
return 0;
}
static void __init proc_sharepool_init(void)
{
if (!proc_mkdir("sharepool", NULL))
return;
proc_create_single_data("sharepool/proc_stat", 0400, NULL, proc_stat_show, NULL);
proc_create_single_data("sharepool/spa_stat", 0400, NULL, spa_stat_show, NULL);
proc_create_single_data("sharepool/proc_overview", 0400, NULL, proc_overview_show, NULL);
}
/*** End of tatistical and maintenance functions ***/
DEFINE_STATIC_KEY_FALSE(share_pool_enabled_key); DEFINE_STATIC_KEY_FALSE(share_pool_enabled_key);
static int __init enable_share_pool(char *s) static int __init enable_share_pool(char *s)
...@@ -446,6 +1375,7 @@ static int __init share_pool_init(void) ...@@ -446,6 +1375,7 @@ static int __init share_pool_init(void)
goto fail; goto fail;
sp_device_number_detect(); sp_device_number_detect();
proc_sharepool_init();
return 0; return 0;
fail: fail:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册