未验证 提交 3feff602 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!1550 Sync patches for sharepool

Merge Pull Request from: @ci-robot 
 
PR sync from: Zhang Zekun <zhangzekun11@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/NB6FQIVTRNQEWM4RJWP274HPUWYTVNX4/ 
Sync patches for sharepool
- Fix some bugs in code
- Add a new feature for sharepool

v2:
- Fix some errors in patch 1, missing some code because of
using git format-patch wrongly.

Chen Jun (1):
  mm/sharepool: Add mg_sp_alloc_nodemask

Xu Qiang (5):
  mm/sharepool: Change data type of members in sp_spa_stat to atomic64.
  mm/sharepool: Delete SPG_FLAG_NON_DVPP.
  mm/sharepool: Add sp_group_sem protection.
  mm/sharepool: Delete total_num and total_size in sp_spa_stat
  mm/sharepool: Delete unused flag in sp_group.


-- 
2.17.1
 
https://gitee.com/openeuler/kernel/issues/I6GI0X 
 
Link:https://gitee.com/openeuler/kernel/pulls/1550 

Reviewed-by: Weilong Chen <chenweilong@huawei.com> 
Reviewed-by: Jialin Zhang <zhangjialin11@huawei.com> 
Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com> 
...@@ -629,6 +629,9 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, ...@@ -629,6 +629,9 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
const struct hstate *hugetlb_get_hstate(void); const struct hstate *hugetlb_get_hstate(void);
struct page *hugetlb_alloc_hugepage(int nid, int flag); struct page *hugetlb_alloc_hugepage(int nid, int flag);
struct page *hugetlb_alloc_hugepage_vma(struct vm_area_struct *vma,
unsigned long address, int flag);
int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr,
pgprot_t prot, struct page *hpage); pgprot_t prot, struct page *hpage);
int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm,
...@@ -645,6 +648,12 @@ static inline struct page *hugetlb_alloc_hugepage(int nid, int flag) ...@@ -645,6 +648,12 @@ static inline struct page *hugetlb_alloc_hugepage(int nid, int flag)
return NULL; return NULL;
} }
static inline struct page *hugetlb_alloc_hugepage_vma(struct vm_area_struct *vma,
unsigned long address, int flag)
{
return NULL;
}
static inline int hugetlb_insert_hugepage_pte(struct mm_struct *mm, static inline int hugetlb_insert_hugepage_pte(struct mm_struct *mm,
unsigned long addr, pgprot_t prot, struct page *hpage) unsigned long addr, pgprot_t prot, struct page *hpage)
{ {
...@@ -1091,6 +1100,12 @@ static inline struct page *hugetlb_alloc_hugepage(int nid, int flag) ...@@ -1091,6 +1100,12 @@ static inline struct page *hugetlb_alloc_hugepage(int nid, int flag)
return NULL; return NULL;
} }
static inline struct page *hugetlb_alloc_hugepage_vma(struct vm_area_struct *vma,
unsigned long address, int flag)
{
return NULL;
}
static inline int hugetlb_insert_hugepage_pte(struct mm_struct *mm, static inline int hugetlb_insert_hugepage_pte(struct mm_struct *mm,
unsigned long addr, pgprot_t prot, struct page *hpage) unsigned long addr, pgprot_t prot, struct page *hpage)
{ {
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include <linux/jump_label.h> #include <linux/jump_label.h>
#include <linux/kabi.h> #include <linux/kabi.h>
#include <linux/share_pool_interface.h>
#define SP_HUGEPAGE (1 << 0) #define SP_HUGEPAGE (1 << 0)
#define SP_HUGEPAGE_ONLY (1 << 1) #define SP_HUGEPAGE_ONLY (1 << 1)
#define SP_DVPP (1 << 2) #define SP_DVPP (1 << 2)
...@@ -48,8 +50,6 @@ ...@@ -48,8 +50,6 @@
#define SPG_ID_LOCAL_MIN 200001 #define SPG_ID_LOCAL_MIN 200001
#define SPG_ID_LOCAL_MAX 299999 #define SPG_ID_LOCAL_MAX 299999
#define SPG_FLAG_NON_DVPP (1 << 0)
#define MAX_DEVID 8 /* the max num of Da-vinci devices */ #define MAX_DEVID 8 /* the max num of Da-vinci devices */
extern struct static_key_false share_pool_enabled_key; extern struct static_key_false share_pool_enabled_key;
...@@ -256,6 +256,8 @@ extern int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns, ...@@ -256,6 +256,8 @@ extern int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task); struct pid *pid, struct task_struct *task);
extern void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id); extern void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id);
extern void *mg_sp_alloc_nodemask(unsigned long size, unsigned long sp_flags, int spg_id,
nodemask_t nodemask);
extern int mg_sp_free(unsigned long addr, int id); extern int mg_sp_free(unsigned long addr, int id);
extern void *mg_sp_make_share_k2u(unsigned long kva, unsigned long size, extern void *mg_sp_make_share_k2u(unsigned long kva, unsigned long size,
...@@ -286,7 +288,6 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -286,7 +288,6 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
unsigned long address, pte_t *ptep, unsigned int flags); unsigned long address, pte_t *ptep, unsigned int flags);
extern bool sp_check_addr(unsigned long addr); extern bool sp_check_addr(unsigned long addr);
extern bool sp_check_mmap_addr(unsigned long addr, unsigned long flags); extern bool sp_check_mmap_addr(unsigned long addr, unsigned long flags);
extern int sp_node_id(struct vm_area_struct *vma);
static inline bool sp_is_enabled(void) static inline bool sp_is_enabled(void)
{ {
...@@ -452,11 +453,6 @@ static inline bool is_vmalloc_sharepool(unsigned long vm_flags) ...@@ -452,11 +453,6 @@ static inline bool is_vmalloc_sharepool(unsigned long vm_flags)
return NULL; return NULL;
} }
static inline int sp_node_id(struct vm_area_struct *vma)
{
return numa_node_id();
}
static inline bool sp_check_addr(unsigned long addr) static inline bool sp_check_addr(unsigned long addr)
{ {
return false; return false;
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_SHARE_POOL_INTERFACE_H
#define LINUX_SHARE_POOL_INTERFACE_H
#include <linux/mman.h>
#include <linux/mm_types.h>
#include <linux/numa.h>
#include <linux/kabi.h>
#ifdef CONFIG_ASCEND_SHARE_POOL
extern int sp_node_id(struct vm_area_struct *vma);
#else
static inline int sp_node_id(struct vm_area_struct *vma)
{
return numa_node_id();
}
#endif /* !CONFIG_ASCEND_SHARE_POOL */
#endif /* LINUX_SHARE_POOL_INTERFACE_H */
...@@ -6312,7 +6312,7 @@ static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h, ...@@ -6312,7 +6312,7 @@ static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h,
/* /*
* Allocate hugepage without reserve * Allocate hugepage without reserve
*/ */
struct page *hugetlb_alloc_hugepage(int nid, int flag) struct page *hugetlb_alloc_hugepage_nodemask(int nid, int flag, nodemask_t *nodemask)
{ {
struct hstate *h = &default_hstate; struct hstate *h = &default_hstate;
gfp_t gfp_mask = htlb_alloc_mask(h); gfp_t gfp_mask = htlb_alloc_mask(h);
...@@ -6327,7 +6327,6 @@ struct page *hugetlb_alloc_hugepage(int nid, int flag) ...@@ -6327,7 +6327,6 @@ struct page *hugetlb_alloc_hugepage(int nid, int flag)
if (flag & ~HUGETLB_ALLOC_MASK) if (flag & ~HUGETLB_ALLOC_MASK)
return NULL; return NULL;
gfp_mask |= __GFP_THISNODE;
if (enable_charge_mighp) if (enable_charge_mighp)
gfp_mask |= __GFP_ACCOUNT; gfp_mask |= __GFP_ACCOUNT;
...@@ -6337,12 +6336,22 @@ struct page *hugetlb_alloc_hugepage(int nid, int flag) ...@@ -6337,12 +6336,22 @@ struct page *hugetlb_alloc_hugepage(int nid, int flag)
if (flag & HUGETLB_ALLOC_NORMAL) if (flag & HUGETLB_ALLOC_NORMAL)
page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid); page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid);
else if (flag & HUGETLB_ALLOC_BUDDY) else if (flag & HUGETLB_ALLOC_BUDDY)
page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); page = alloc_migrate_huge_page(h, gfp_mask, nid, nodemask);
else else
page = alloc_huge_page_nodemask(h, nid, NULL, gfp_mask); page = alloc_huge_page_nodemask(h, nid, nodemask, gfp_mask);
return page; return page;
} }
struct page *hugetlb_alloc_hugepage(int nid, int flag)
{
nodemask_t nodemask;
nodes_clear(nodemask);
node_set(nid, nodemask);
return hugetlb_alloc_hugepage_nodemask(nid, flag, &nodemask);
}
EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage); EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage);
static pte_t *hugetlb_huge_pte_alloc(struct mm_struct *mm, unsigned long addr, static pte_t *hugetlb_huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
...@@ -6364,6 +6373,19 @@ static pte_t *hugetlb_huge_pte_alloc(struct mm_struct *mm, unsigned long addr, ...@@ -6364,6 +6373,19 @@ static pte_t *hugetlb_huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
return ptep; return ptep;
} }
struct page *hugetlb_alloc_hugepage_vma(struct vm_area_struct *vma, unsigned long address, int flag)
{
int nid;
struct hstate *h = hstate_vma(vma);
struct mempolicy *mpol;
nodemask_t *nodemask;
gfp_t gfp_mask;
gfp_mask = htlb_alloc_mask(h);
nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
return hugetlb_alloc_hugepage_nodemask(nid, flag, nodemask);
}
static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr, static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr,
pgprot_t prot, unsigned long pfn) pgprot_t prot, unsigned long pfn)
{ {
......
...@@ -103,6 +103,8 @@ ...@@ -103,6 +103,8 @@
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/swapops.h> #include <linux/swapops.h>
#include <linux/share_pool_interface.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -2198,7 +2200,7 @@ int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, ...@@ -2198,7 +2200,7 @@ int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
nid = interleave_nid(*mpol, vma, addr, nid = interleave_nid(*mpol, vma, addr,
huge_page_shift(hstate_vma(vma))); huge_page_shift(hstate_vma(vma)));
} else { } else {
nid = policy_node(gfp_flags, *mpol, numa_node_id()); nid = policy_node(gfp_flags, *mpol, sp_node_id(vma));
if ((*mpol)->mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) if ((*mpol)->mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
*nodemask = &(*mpol)->v.nodes; *nodemask = &(*mpol)->v.nodes;
} }
......
...@@ -165,7 +165,6 @@ struct sp_mapping { ...@@ -165,7 +165,6 @@ struct sp_mapping {
*/ */
struct sp_group { struct sp_group {
int id; int id;
unsigned long flag;
struct file *file; struct file *file;
struct file *file_hugetlb; struct file *file_hugetlb;
/* number of process in this group */ /* number of process in this group */
...@@ -452,7 +451,7 @@ static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg) ...@@ -452,7 +451,7 @@ static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg)
local_dvpp_mapping = mm->sp_group_master->local->mapping[SP_MAPPING_DVPP]; local_dvpp_mapping = mm->sp_group_master->local->mapping[SP_MAPPING_DVPP];
spg_dvpp_mapping = spg->mapping[SP_MAPPING_DVPP]; spg_dvpp_mapping = spg->mapping[SP_MAPPING_DVPP];
if (!list_empty(&spg->procs) && !(spg->flag & SPG_FLAG_NON_DVPP)) { if (!list_empty(&spg->procs)) {
/* /*
* Don't return an error when the mappings' address range conflict. * Don't return an error when the mappings' address range conflict.
* As long as the mapping is unused, we can drop the empty mapping. * As long as the mapping is unused, we can drop the empty mapping.
...@@ -476,9 +475,8 @@ static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg) ...@@ -476,9 +475,8 @@ static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg)
return -EINVAL; return -EINVAL;
} }
} else { } else {
if (!(spg->flag & SPG_FLAG_NON_DVPP)) /* the mapping of local group is always set */
/* the mapping of local group is always set */ sp_mapping_attach(spg, local_dvpp_mapping);
sp_mapping_attach(spg, local_dvpp_mapping);
if (!spg->mapping[SP_MAPPING_NORMAL]) if (!spg->mapping[SP_MAPPING_NORMAL])
sp_mapping_attach(spg, sp_mapping_normal); sp_mapping_attach(spg, sp_mapping_normal);
if (!spg->mapping[SP_MAPPING_RO]) if (!spg->mapping[SP_MAPPING_RO])
...@@ -500,7 +498,7 @@ static struct sp_mapping *sp_mapping_find(struct sp_group *spg, ...@@ -500,7 +498,7 @@ static struct sp_mapping *sp_mapping_find(struct sp_group *spg,
return spg->mapping[SP_MAPPING_DVPP]; return spg->mapping[SP_MAPPING_DVPP];
} }
static struct sp_group *create_spg(int spg_id, unsigned long flag); static struct sp_group *create_spg(int spg_id);
static void free_new_spg_id(bool new, int spg_id); static void free_new_spg_id(bool new, int spg_id);
static void free_sp_group_locked(struct sp_group *spg); static void free_sp_group_locked(struct sp_group *spg);
static struct sp_group_node *group_add_task(struct mm_struct *mm, struct sp_group *spg, static struct sp_group_node *group_add_task(struct mm_struct *mm, struct sp_group *spg,
...@@ -520,7 +518,7 @@ static int init_local_group(struct mm_struct *mm) ...@@ -520,7 +518,7 @@ static int init_local_group(struct mm_struct *mm)
return spg_id; return spg_id;
} }
spg = create_spg(spg_id, 0); spg = create_spg(spg_id);
if (IS_ERR(spg)) { if (IS_ERR(spg)) {
free_new_spg_id(true, spg_id); free_new_spg_id(true, spg_id);
return PTR_ERR(spg); return PTR_ERR(spg);
...@@ -645,18 +643,15 @@ static void update_mem_usage_k2u(unsigned long size, bool inc, ...@@ -645,18 +643,15 @@ static void update_mem_usage_k2u(unsigned long size, bool inc,
} }
} }
/* statistics of all sp area, protected by sp_area_lock */
struct sp_spa_stat { struct sp_spa_stat {
unsigned int total_num; atomic64_t alloc_num;
unsigned int alloc_num; atomic64_t k2u_task_num;
unsigned int k2u_task_num; atomic64_t k2u_spg_num;
unsigned int k2u_spg_num; atomic64_t alloc_size;
unsigned long total_size; atomic64_t k2u_task_size;
unsigned long alloc_size; atomic64_t k2u_spg_size;
unsigned long k2u_task_size; atomic64_t dvpp_size;
unsigned long k2u_spg_size; atomic64_t dvpp_va_size;
unsigned long dvpp_size;
unsigned long dvpp_va_size;
}; };
static struct sp_spa_stat spa_stat; static struct sp_spa_stat spa_stat;
...@@ -700,7 +695,7 @@ struct sp_area { ...@@ -700,7 +695,7 @@ struct sp_area {
struct mm_struct *mm; /* owner of k2u(task) */ struct mm_struct *mm; /* owner of k2u(task) */
unsigned long kva; /* shared kva */ unsigned long kva; /* shared kva */
pid_t applier; /* the original applier process */ pid_t applier; /* the original applier process */
int node_id; /* memory node */ int preferred_node_id; /* memory node */
int device_id; int device_id;
}; };
static DEFINE_SPINLOCK(sp_area_lock); static DEFINE_SPINLOCK(sp_area_lock);
...@@ -728,18 +723,18 @@ static void spa_inc_usage(struct sp_area *spa) ...@@ -728,18 +723,18 @@ static void spa_inc_usage(struct sp_area *spa)
switch (type) { switch (type) {
case SPA_TYPE_ALLOC: case SPA_TYPE_ALLOC:
spa_stat.alloc_num += 1; atomic64_inc(&spa_stat.alloc_num);
spa_stat.alloc_size += size; atomic64_add(size, &spa_stat.alloc_size);
meminfo_inc_usage(size, is_huge, &spa->spg->meminfo); meminfo_inc_usage(size, is_huge, &spa->spg->meminfo);
break; break;
case SPA_TYPE_K2TASK: case SPA_TYPE_K2TASK:
spa_stat.k2u_task_num += 1; atomic64_inc(&spa_stat.k2u_task_num);
spa_stat.k2u_task_size += size; atomic64_add(size, &spa_stat.k2u_task_size);
meminfo_inc_k2u(size, &spa->spg->meminfo); meminfo_inc_k2u(size, &spa->spg->meminfo);
break; break;
case SPA_TYPE_K2SPG: case SPA_TYPE_K2SPG:
spa_stat.k2u_spg_num += 1; atomic64_inc(&spa_stat.k2u_spg_num);
spa_stat.k2u_spg_size += size; atomic64_add(size, &spa_stat.k2u_spg_size);
meminfo_inc_k2u(size, &spa->spg->meminfo); meminfo_inc_k2u(size, &spa->spg->meminfo);
break; break;
default: default:
...@@ -747,17 +742,11 @@ static void spa_inc_usage(struct sp_area *spa) ...@@ -747,17 +742,11 @@ static void spa_inc_usage(struct sp_area *spa)
} }
if (is_dvpp) { if (is_dvpp) {
spa_stat.dvpp_size += size; atomic64_add(size, &spa_stat.dvpp_size);
spa_stat.dvpp_va_size += ALIGN(size, PMD_SIZE); atomic64_add(ALIGN(size, PMD_SIZE), &spa_stat.dvpp_va_size);
} }
atomic_inc(&spa->spg->spa_num); atomic_inc(&spa->spg->spa_num);
/*
* all the calculations won't overflow due to system limitation and
* parameter checking in sp_alloc_area()
*/
spa_stat.total_num += 1;
spa_stat.total_size += size;
if (!is_local_group(spa->spg->id)) { if (!is_local_group(spa->spg->id)) {
atomic_inc(&sp_overall_stat.spa_total_num); atomic_inc(&sp_overall_stat.spa_total_num);
...@@ -775,18 +764,18 @@ static void spa_dec_usage(struct sp_area *spa) ...@@ -775,18 +764,18 @@ static void spa_dec_usage(struct sp_area *spa)
switch (type) { switch (type) {
case SPA_TYPE_ALLOC: case SPA_TYPE_ALLOC:
spa_stat.alloc_num -= 1; atomic64_dec(&spa_stat.alloc_num);
spa_stat.alloc_size -= size; atomic64_sub(size, &spa_stat.alloc_size);
meminfo_dec_usage(size, is_huge, &spa->spg->meminfo); meminfo_dec_usage(size, is_huge, &spa->spg->meminfo);
break; break;
case SPA_TYPE_K2TASK: case SPA_TYPE_K2TASK:
spa_stat.k2u_task_num -= 1; atomic64_dec(&spa_stat.k2u_task_num);
spa_stat.k2u_task_size -= size; atomic64_sub(size, &spa_stat.k2u_task_size);
meminfo_dec_k2u(size, &spa->spg->meminfo); meminfo_dec_k2u(size, &spa->spg->meminfo);
break; break;
case SPA_TYPE_K2SPG: case SPA_TYPE_K2SPG:
spa_stat.k2u_spg_num -= 1; atomic64_dec(&spa_stat.k2u_spg_num);
spa_stat.k2u_spg_size -= size; atomic64_sub(size, &spa_stat.k2u_spg_size);
meminfo_dec_k2u(size, &spa->spg->meminfo); meminfo_dec_k2u(size, &spa->spg->meminfo);
break; break;
default: default:
...@@ -794,13 +783,11 @@ static void spa_dec_usage(struct sp_area *spa) ...@@ -794,13 +783,11 @@ static void spa_dec_usage(struct sp_area *spa)
} }
if (is_dvpp) { if (is_dvpp) {
spa_stat.dvpp_size -= size; atomic64_sub(size, &spa_stat.dvpp_size);
spa_stat.dvpp_va_size -= ALIGN(size, PMD_SIZE); atomic64_sub(ALIGN(size, PMD_SIZE), &spa_stat.dvpp_va_size);
} }
atomic_dec(&spa->spg->spa_num); atomic_dec(&spa->spg->spa_num);
spa_stat.total_num -= 1;
spa_stat.total_size -= size;
if (!is_local_group(spa->spg->id)) { if (!is_local_group(spa->spg->id)) {
atomic_dec(&sp_overall_stat.spa_total_num); atomic_dec(&sp_overall_stat.spa_total_num);
...@@ -843,8 +830,11 @@ static void sp_update_process_stat(struct task_struct *tsk, bool inc, ...@@ -843,8 +830,11 @@ static void sp_update_process_stat(struct task_struct *tsk, bool inc,
unsigned long size = spa->real_size; unsigned long size = spa->real_size;
enum spa_type type = spa->type; enum spa_type type = spa->type;
down_read(&sp_group_sem);
spg_node = find_spg_node_by_spg(tsk->mm, spa->spg); spg_node = find_spg_node_by_spg(tsk->mm, spa->spg);
update_mem_usage(size, inc, spa->is_hugepage, spg_node, type); if (spg_node != NULL)
update_mem_usage(size, inc, spa->is_hugepage, spg_node, type);
up_read(&sp_group_sem);
} }
static inline void check_interrupt_context(void) static inline void check_interrupt_context(void)
...@@ -1090,10 +1080,9 @@ static bool is_online_node_id(int node_id) ...@@ -1090,10 +1080,9 @@ static bool is_online_node_id(int node_id)
return node_id >= 0 && node_id < MAX_NUMNODES && node_online(node_id); return node_id >= 0 && node_id < MAX_NUMNODES && node_online(node_id);
} }
static void sp_group_init(struct sp_group *spg, int spg_id, unsigned long flag) static void sp_group_init(struct sp_group *spg, int spg_id)
{ {
spg->id = spg_id; spg->id = spg_id;
spg->flag = flag;
spg->is_alive = true; spg->is_alive = true;
spg->proc_num = 0; spg->proc_num = 0;
atomic_set(&spg->use_count, 1); atomic_set(&spg->use_count, 1);
...@@ -1105,7 +1094,7 @@ static void sp_group_init(struct sp_group *spg, int spg_id, unsigned long flag) ...@@ -1105,7 +1094,7 @@ static void sp_group_init(struct sp_group *spg, int spg_id, unsigned long flag)
meminfo_init(&spg->meminfo); meminfo_init(&spg->meminfo);
} }
static struct sp_group *create_spg(int spg_id, unsigned long flag) static struct sp_group *create_spg(int spg_id)
{ {
int ret; int ret;
struct sp_group *spg; struct sp_group *spg;
...@@ -1140,7 +1129,7 @@ static struct sp_group *create_spg(int spg_id, unsigned long flag) ...@@ -1140,7 +1129,7 @@ static struct sp_group *create_spg(int spg_id, unsigned long flag)
goto out_fput; goto out_fput;
} }
sp_group_init(spg, spg_id, flag); sp_group_init(spg, spg_id);
ret = idr_alloc(&sp_group_idr, spg, spg_id, spg_id + 1, GFP_KERNEL); ret = idr_alloc(&sp_group_idr, spg, spg_id, spg_id + 1, GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
...@@ -1163,14 +1152,14 @@ static struct sp_group *create_spg(int spg_id, unsigned long flag) ...@@ -1163,14 +1152,14 @@ static struct sp_group *create_spg(int spg_id, unsigned long flag)
} }
/* the caller must hold sp_group_sem */ /* the caller must hold sp_group_sem */
static struct sp_group *find_or_alloc_sp_group(int spg_id, unsigned long flag) static struct sp_group *find_or_alloc_sp_group(int spg_id)
{ {
struct sp_group *spg; struct sp_group *spg;
spg = sp_group_get_locked(current->tgid, spg_id); spg = sp_group_get_locked(current->tgid, spg_id);
if (!spg) { if (!spg) {
spg = create_spg(spg_id, flag); spg = create_spg(spg_id);
} else { } else {
down_read(&spg->rw_lock); down_read(&spg->rw_lock);
if (!spg_valid(spg)) { if (!spg_valid(spg)) {
...@@ -1341,7 +1330,6 @@ static struct sp_group_node *group_add_task(struct mm_struct *mm, struct sp_grou ...@@ -1341,7 +1330,6 @@ static struct sp_group_node *group_add_task(struct mm_struct *mm, struct sp_grou
*/ */
int mg_sp_group_add_task(int tgid, unsigned long prot, int spg_id) int mg_sp_group_add_task(int tgid, unsigned long prot, int spg_id)
{ {
unsigned long flag = 0;
struct task_struct *tsk; struct task_struct *tsk;
struct mm_struct *mm; struct mm_struct *mm;
struct sp_group *spg; struct sp_group *spg;
...@@ -1442,7 +1430,7 @@ int mg_sp_group_add_task(int tgid, unsigned long prot, int spg_id) ...@@ -1442,7 +1430,7 @@ int mg_sp_group_add_task(int tgid, unsigned long prot, int spg_id)
goto out_put_mm; goto out_put_mm;
} }
spg = find_or_alloc_sp_group(spg_id, flag); spg = find_or_alloc_sp_group(spg_id);
if (IS_ERR(spg)) { if (IS_ERR(spg)) {
up_write(&sp_group_sem); up_write(&sp_group_sem);
ret = PTR_ERR(spg); ret = PTR_ERR(spg);
...@@ -1892,16 +1880,16 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, ...@@ -1892,16 +1880,16 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags,
spa->mm = NULL; spa->mm = NULL;
spa->kva = 0; /* NULL pointer */ spa->kva = 0; /* NULL pointer */
spa->applier = applier; spa->applier = applier;
spa->node_id = node_id; spa->preferred_node_id = node_id;
spa->device_id = device_id; spa->device_id = device_id;
spa_inc_usage(spa);
insert_sp_area(mapping, spa); insert_sp_area(mapping, spa);
mapping->free_area_cache = &spa->rb_node; mapping->free_area_cache = &spa->rb_node;
list_add_tail(&spa->link, &spg->spa_list); list_add_tail(&spa->link, &spg->spa_list);
spin_unlock(&sp_area_lock); spin_unlock(&sp_area_lock);
spa_inc_usage(spa);
return spa; return spa;
error: error:
...@@ -2191,7 +2179,9 @@ static int sp_free_get_spa(struct sp_free_context *fc) ...@@ -2191,7 +2179,9 @@ static int sp_free_get_spa(struct sp_free_context *fc)
} }
/** /**
* mg_sp_free() - Free the memory allocated by mg_sp_alloc(). * mg_sp_free() - Free the memory allocated by mg_sp_alloc() or
* mg_sp_alloc_nodemask().
*
* @addr: the starting VA of the memory. * @addr: the starting VA of the memory.
* @id: Address space identifier, which is used to distinguish the addr. * @id: Address space identifier, which is used to distinguish the addr.
* *
...@@ -2448,18 +2438,15 @@ static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa, ...@@ -2448,18 +2438,15 @@ static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa,
} }
static long sp_mbind(struct mm_struct *mm, unsigned long start, unsigned long len, static long sp_mbind(struct mm_struct *mm, unsigned long start, unsigned long len,
unsigned long node) nodemask_t *nodemask)
{ {
nodemask_t nmask;
nodes_clear(nmask);
node_set(node, nmask);
return __do_mbind(start, len, MPOL_BIND, MPOL_F_STATIC_NODES, return __do_mbind(start, len, MPOL_BIND, MPOL_F_STATIC_NODES,
&nmask, MPOL_MF_STRICT, mm); nodemask, MPOL_MF_STRICT, mm);
} }
static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
struct sp_group_node *spg_node, struct sp_alloc_context *ac) struct sp_group_node *spg_node, struct sp_alloc_context *ac,
nodemask_t *nodemask)
{ {
int ret; int ret;
...@@ -2468,10 +2455,10 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, ...@@ -2468,10 +2455,10 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
return ret; return ret;
if (!ac->have_mbind) { if (!ac->have_mbind) {
ret = sp_mbind(mm, spa->va_start, spa->real_size, spa->node_id); ret = sp_mbind(mm, spa->va_start, spa->real_size, nodemask);
if (ret < 0) { if (ret < 0) {
pr_err("cannot bind the memory range to specified node:%d, err:%d\n", pr_err("cannot bind the memory range to node[%*pbl], err:%d\n",
spa->node_id, ret); nodemask_pr_args(nodemask), ret);
return ret; return ret;
} }
ac->have_mbind = true; ac->have_mbind = true;
...@@ -2490,17 +2477,25 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, ...@@ -2490,17 +2477,25 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
} }
static int sp_alloc_mmap_populate(struct sp_area *spa, static int sp_alloc_mmap_populate(struct sp_area *spa,
struct sp_alloc_context *ac) struct sp_alloc_context *ac,
nodemask_t *nodemask)
{ {
int ret = -EINVAL; int ret = -EINVAL;
int mmap_ret = 0; int mmap_ret = 0;
struct mm_struct *mm, *end_mm = NULL; struct mm_struct *mm, *end_mm = NULL;
struct sp_group_node *spg_node; struct sp_group_node *spg_node;
nodemask_t __nodemask;
if (!nodemask) { /* mg_sp_alloc */
nodes_clear(__nodemask);
node_set(spa->preferred_node_id, __nodemask);
} else /* mg_sp_alloc_nodemask */
__nodemask = *nodemask;
/* create mapping for each process in the group */ /* create mapping for each process in the group */
list_for_each_entry(spg_node, &spa->spg->procs, proc_node) { list_for_each_entry(spg_node, &spa->spg->procs, proc_node) {
mm = spg_node->master->mm; mm = spg_node->master->mm;
mmap_ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac); mmap_ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac, &__nodemask);
if (mmap_ret) { if (mmap_ret) {
/* /*
...@@ -2563,19 +2558,8 @@ static void sp_alloc_finish(int result, struct sp_area *spa, ...@@ -2563,19 +2558,8 @@ static void sp_alloc_finish(int result, struct sp_area *spa,
sp_group_put(spg); sp_group_put(spg);
} }
/** void *__mg_sp_alloc_nodemask(unsigned long size, unsigned long sp_flags, int spg_id,
* mg_sp_alloc() - Allocate shared memory for all the processes in a sp_group. nodemask_t *nodemask)
* @size: the size of memory to allocate.
* @sp_flags: how to allocate the memory.
* @spg_id: the share group that the memory is allocated to.
*
* Use pass through allocation if spg_id == SPG_ID_DEFAULT in multi-group mode.
*
* Return:
* * if succeed, return the starting address of the shared memory.
* * if fail, return the pointer of -errno.
*/
void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id)
{ {
struct sp_area *spa = NULL; struct sp_area *spa = NULL;
int ret = 0; int ret = 0;
...@@ -2598,7 +2582,7 @@ void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id) ...@@ -2598,7 +2582,7 @@ void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id)
goto out; goto out;
} }
ret = sp_alloc_mmap_populate(spa, &ac); ret = sp_alloc_mmap_populate(spa, &ac, nodemask);
if (ret && ac.state == ALLOC_RETRY) { if (ret && ac.state == ALLOC_RETRY) {
/* /*
* The mempolicy for shared memory is located at backend file, which varies * The mempolicy for shared memory is located at backend file, which varies
...@@ -2616,6 +2600,30 @@ void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id) ...@@ -2616,6 +2600,30 @@ void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id)
else else
return (void *)(spa->va_start); return (void *)(spa->va_start);
} }
void *mg_sp_alloc_nodemask(unsigned long size, unsigned long sp_flags, int spg_id,
nodemask_t nodemask)
{
return __mg_sp_alloc_nodemask(size, sp_flags, spg_id, &nodemask);
}
EXPORT_SYMBOL_GPL(mg_sp_alloc_nodemask);
/**
* mg_sp_alloc() - Allocate shared memory for all the processes in a sp_group.
* @size: the size of memory to allocate.
* @sp_flags: how to allocate the memory.
* @spg_id: the share group that the memory is allocated to.
*
* Use pass through allocation if spg_id == SPG_ID_DEFAULT in multi-group mode.
*
* Return:
* * if succeed, return the starting address of the shared memory.
* * if fail, return the pointer of -errno.
*/
void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id)
{
return __mg_sp_alloc_nodemask(size, sp_flags, spg_id, NULL);
}
EXPORT_SYMBOL_GPL(mg_sp_alloc); EXPORT_SYMBOL_GPL(mg_sp_alloc);
/** /**
...@@ -3599,7 +3607,7 @@ int sp_node_id(struct vm_area_struct *vma) ...@@ -3599,7 +3607,7 @@ int sp_node_id(struct vm_area_struct *vma)
if (vma && (vma->vm_flags & VM_SHARE_POOL) && vma->vm_private_data) { if (vma && (vma->vm_flags & VM_SHARE_POOL) && vma->vm_private_data) {
spa = vma->vm_private_data; spa = vma->vm_private_data;
node_id = spa->node_id; node_id = spa->preferred_node_id;
} }
return node_id; return node_id;
...@@ -3797,35 +3805,33 @@ static void spa_dvpp_stat_show(struct seq_file *seq) ...@@ -3797,35 +3805,33 @@ static void spa_dvpp_stat_show(struct seq_file *seq)
static void spa_overview_show(struct seq_file *seq) static void spa_overview_show(struct seq_file *seq)
{ {
unsigned int total_num, alloc_num, k2u_task_num, k2u_spg_num; s64 total_num, alloc_num, k2u_task_num, k2u_spg_num;
unsigned long total_size, alloc_size, k2u_task_size, k2u_spg_size; s64 total_size, alloc_size, k2u_task_size, k2u_spg_size;
unsigned long dvpp_size, dvpp_va_size; s64 dvpp_size, dvpp_va_size;
if (!sp_is_enabled()) if (!sp_is_enabled())
return; return;
spin_lock(&sp_area_lock); alloc_num = atomic64_read(&spa_stat.alloc_num);
total_num = spa_stat.total_num; k2u_task_num = atomic64_read(&spa_stat.k2u_task_num);
alloc_num = spa_stat.alloc_num; k2u_spg_num = atomic64_read(&spa_stat.k2u_spg_num);
k2u_task_num = spa_stat.k2u_task_num; alloc_size = atomic64_read(&spa_stat.alloc_size);
k2u_spg_num = spa_stat.k2u_spg_num; k2u_task_size = atomic64_read(&spa_stat.k2u_task_size);
total_size = spa_stat.total_size; k2u_spg_size = atomic64_read(&spa_stat.k2u_spg_size);
alloc_size = spa_stat.alloc_size; dvpp_size = atomic64_read(&spa_stat.dvpp_size);
k2u_task_size = spa_stat.k2u_task_size; dvpp_va_size = atomic64_read(&spa_stat.dvpp_va_size);
k2u_spg_size = spa_stat.k2u_spg_size; total_num = alloc_num + k2u_task_num + k2u_spg_num;
dvpp_size = spa_stat.dvpp_size; total_size = alloc_size + k2u_task_size + k2u_spg_size;
dvpp_va_size = spa_stat.dvpp_va_size;
spin_unlock(&sp_area_lock); SEQ_printf(seq, "Spa total num %lld.\n", total_num);
SEQ_printf(seq, "Spa alloc num %lld, k2u(task) num %lld, k2u(spg) num %lld.\n",
SEQ_printf(seq, "Spa total num %u.\n", total_num);
SEQ_printf(seq, "Spa alloc num %u, k2u(task) num %u, k2u(spg) num %u.\n",
alloc_num, k2u_task_num, k2u_spg_num); alloc_num, k2u_task_num, k2u_spg_num);
SEQ_printf(seq, "Spa total size: %13lu KB\n", byte2kb(total_size)); SEQ_printf(seq, "Spa total size: %13lld KB\n", byte2kb(total_size));
SEQ_printf(seq, "Spa alloc size: %13lu KB\n", byte2kb(alloc_size)); SEQ_printf(seq, "Spa alloc size: %13lld KB\n", byte2kb(alloc_size));
SEQ_printf(seq, "Spa k2u(task) size: %13lu KB\n", byte2kb(k2u_task_size)); SEQ_printf(seq, "Spa k2u(task) size: %13lld KB\n", byte2kb(k2u_task_size));
SEQ_printf(seq, "Spa k2u(spg) size: %13lu KB\n", byte2kb(k2u_spg_size)); SEQ_printf(seq, "Spa k2u(spg) size: %13lld KB\n", byte2kb(k2u_spg_size));
SEQ_printf(seq, "Spa dvpp size: %13lu KB\n", byte2kb(dvpp_size)); SEQ_printf(seq, "Spa dvpp size: %13lld KB\n", byte2kb(dvpp_size));
SEQ_printf(seq, "Spa dvpp va size: %13lu MB\n", byte2mb(dvpp_va_size)); SEQ_printf(seq, "Spa dvpp va size: %13lld MB\n", byte2mb(dvpp_va_size));
SEQ_printf(seq, "\n"); SEQ_printf(seq, "\n");
} }
...@@ -4028,7 +4034,6 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -4028,7 +4034,6 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
unsigned long haddr = address & huge_page_mask(h); unsigned long haddr = address & huge_page_mask(h);
bool new_page = false; bool new_page = false;
int err; int err;
int node_id;
struct sp_area *spa; struct sp_area *spa;
bool charge_hpage; bool charge_hpage;
...@@ -4037,7 +4042,6 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -4037,7 +4042,6 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
pr_err("share pool: vma is invalid, not from sp mmap\n"); pr_err("share pool: vma is invalid, not from sp mmap\n");
return ret; return ret;
} }
node_id = spa->node_id;
retry: retry:
page = find_lock_page(mapping, idx); page = find_lock_page(mapping, idx);
...@@ -4049,7 +4053,7 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -4049,7 +4053,7 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
charge_hpage = false; charge_hpage = false;
page = alloc_huge_page(vma, haddr, 0); page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) { if (IS_ERR(page)) {
page = hugetlb_alloc_hugepage(node_id, page = hugetlb_alloc_hugepage_vma(vma, haddr,
HUGETLB_ALLOC_BUDDY | HUGETLB_ALLOC_NORECLAIM); HUGETLB_ALLOC_BUDDY | HUGETLB_ALLOC_NORECLAIM);
if (!page) if (!page)
page = ERR_PTR(-ENOMEM); page = ERR_PTR(-ENOMEM);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册