提交 cc1092a7 编写于 作者: T Tang Yizhou 提交者: Yang Yingliang

share_pool: Rename buff_vzalloc_user and buff_vzalloc_hugepage_user

ascend inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4EUVI
CVE: NA

-------------------------------------------------

Let function names be general.
Rename buff_vzalloc_user to vzalloc_user_account.
Rename buff_vzalloc_hugepage_user to vzalloc_hugepage_user_account.

To support NUMA configuration, we also introduce parameter *node*,
which means NUMA node id.
Signed-off-by: NTang Yizhou <tangyizhou@huawei.com>
Reviewed-by: NDing Tianhong <dingtianhong@huawei.com>
Reviewed-by: NKefeng  Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 a97a5859
......@@ -253,8 +253,8 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
extern void *vmalloc_hugepage(unsigned long size);
extern void *vmalloc_hugepage_user(unsigned long size);
extern void *buff_vzalloc_user(unsigned long size);
extern void *buff_vzalloc_hugepage_user(unsigned long size);
extern void *vzalloc_user_account(unsigned long size, int node);
extern void *vzalloc_hugepage_user_account(unsigned long size, int node);
void sp_exit_mm(struct mm_struct *mm);
......@@ -456,12 +456,12 @@ static inline void *vmalloc_hugepage_user(unsigned long size)
return NULL;
}
static inline void *buff_vzalloc_user(unsigned long size)
static inline void *vzalloc_user_account(unsigned long size, int node)
{
return NULL;
}
static inline void *buff_vzalloc_hugepage_user(unsigned long size)
static inline void *vzalloc_hugepage_user_account(unsigned long size, int node)
{
return NULL;
}
......
......@@ -3043,9 +3043,10 @@ void *vmalloc_hugepage_user(unsigned long size)
EXPORT_SYMBOL(vmalloc_hugepage_user);
/**
* buff_vzalloc_user - allocate zeroed virtually contiguous memory
* vzalloc_user_account - allocate zeroed virtually contiguous memory
* for userspace
* @size: allocation size
* @node: NUMA node id
*
* The resulting memory area is zeroed so it can be mapped to userspace
* without leaking data.
......@@ -3053,19 +3054,20 @@ EXPORT_SYMBOL(vmalloc_hugepage_user);
* Compare to vmalloc_user(), this is a customized function because
* __GFP_ACCOUNT is used to limit memory usage.
*/
void *buff_vzalloc_user(unsigned long size)
void *vzalloc_user_account(unsigned long size, int node)
{
return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT, PAGE_KERNEL,
VM_USERMAP, NUMA_NO_NODE,
VM_USERMAP, node,
__builtin_return_address(0));
}
EXPORT_SYMBOL(buff_vzalloc_user);
EXPORT_SYMBOL(vzalloc_user_account);
/**
* buff_vzalloc_hugepage_user - allocate virtually contiguous hugetlb memory
* vzalloc_hugepage_user_account - allocate virtually contiguous hugetlb memory
* for userspace
* @size: allocation size
* @node: NUMA node id
*
* Allocate enough huge pages to cover @size and map them into
* contiguous kernel virtual space. The resulting memory area
......@@ -3076,17 +3078,17 @@ EXPORT_SYMBOL(buff_vzalloc_user);
* Compare to vmalloc_hugepage_user(), this is a customized function because
* __GFP_ACCOUNT is used to limit memory usage.
*/
void *buff_vzalloc_hugepage_user(unsigned long size)
void *vzalloc_hugepage_user_account(unsigned long size, int node)
{
/* PMD hugepage aligned */
size = PMD_ALIGN(size);
return __vmalloc_node_range(size, PMD_SIZE, VMALLOC_START, VMALLOC_END,
GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT, PAGE_KERNEL,
VM_HUGE_PAGES | VM_USERMAP, NUMA_NO_NODE,
VM_HUGE_PAGES | VM_USERMAP, node,
__builtin_return_address(0));
}
EXPORT_SYMBOL(buff_vzalloc_hugepage_user);
EXPORT_SYMBOL(vzalloc_hugepage_user_account);
int enable_ascend_share_pool;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册