提交 0886320b 编写于 作者: Z Zhou Guanghui 提交者: Yongqiang Liu

mm/sharepool: Charge Buddy hugepage to memcg

ascend inclusion
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I67NC1
CVE: NA

------------------------------------------

Charge Buddy hugepage to memcg when kmemcg is disabled. If kmemcg
is enabled, we can alse use kmemcg to charge buddy hugepages.
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYongqiang Liu <liuyongqiang13@huawei.com>
上级 97e4e6f4
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
#include <linux/numa.h> #include <linux/numa.h>
#include <linux/hugetlb.h>
#include <linux/memcontrol.h>
#define SP_HUGEPAGE (1 << 0) #define SP_HUGEPAGE (1 << 0)
#define SP_HUGEPAGE_ONLY (1 << 1) #define SP_HUGEPAGE_ONLY (1 << 1)
...@@ -411,6 +413,26 @@ extern bool sp_check_mmap_addr(unsigned long addr, unsigned long flags); ...@@ -411,6 +413,26 @@ extern bool sp_check_mmap_addr(unsigned long addr, unsigned long flags);
extern int sp_id_of_current(void); extern int sp_id_of_current(void);
extern int mg_sp_id_of_current(void); extern int mg_sp_id_of_current(void);
static inline void sp_kmemcg_uncharge_hpage(struct page *page)
{
if (!sp_is_enabled())
return;
if (memcg_kmem_enabled() && PageKmemcg(page)) {
int order = huge_page_order(page_hstate(page));
__memcg_kmem_uncharge(page, order);
}
}
static inline void sp_memcg_uncharge_hpage(struct page *page)
{
if (!sp_is_enabled())
return;
mem_cgroup_uncharge(page);
}
#else #else
static inline int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id) static inline int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id)
...@@ -684,6 +706,14 @@ static inline int mg_sp_id_of_current(void) ...@@ -684,6 +706,14 @@ static inline int mg_sp_id_of_current(void)
return -EPERM; return -EPERM;
} }
static inline void sp_kmemcg_uncharge_hpage(struct page *page)
{
}
static inline void sp_memcg_uncharge_hpage(struct page *page)
{
}
#endif #endif
#endif /* LINUX_SHARE_POOL_H */ #endif /* LINUX_SHARE_POOL_H */
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/node.h> #include <linux/node.h>
#include <linux/userfaultfd_k.h> #include <linux/userfaultfd_k.h>
#include <linux/page_owner.h> #include <linux/page_owner.h>
#include <linux/share_pool.h>
#include "internal.h" #include "internal.h"
int hugetlb_max_hstate __read_mostly; int hugetlb_max_hstate __read_mostly;
...@@ -1311,6 +1312,7 @@ void free_huge_page(struct page *page) ...@@ -1311,6 +1312,7 @@ void free_huge_page(struct page *page)
(struct hugepage_subpool *)page_private(page); (struct hugepage_subpool *)page_private(page);
bool restore_reserve; bool restore_reserve;
sp_kmemcg_uncharge_hpage(page);
set_page_private(page, 0); set_page_private(page, 0);
page->mapping = NULL; page->mapping = NULL;
VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(page_count(page), page);
...@@ -1345,6 +1347,7 @@ void free_huge_page(struct page *page) ...@@ -1345,6 +1347,7 @@ void free_huge_page(struct page *page)
h->resv_huge_pages++; h->resv_huge_pages++;
if (PageHugeTemporary(page)) { if (PageHugeTemporary(page)) {
sp_memcg_uncharge_hpage(page);
list_del(&page->lru); list_del(&page->lru);
ClearPageHugeTemporary(page); ClearPageHugeTemporary(page);
update_and_free_page(h, page); update_and_free_page(h, page);
......
...@@ -4555,6 +4555,8 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -4555,6 +4555,8 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
int err; int err;
int node_id; int node_id;
struct sp_area *spa; struct sp_area *spa;
bool charge_hpage = false;
struct mem_cgroup *memcg;
spa = vma->vm_private_data; spa = vma->vm_private_data;
if (!spa) { if (!spa) {
...@@ -4572,10 +4574,11 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -4572,10 +4574,11 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
page = alloc_huge_page(vma, haddr, 0); page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) { if (IS_ERR(page)) {
page = alloc_huge_page_node(hstate_file(vma->vm_file), page = hugetlb_alloc_hugepage(node_id, HUGETLB_ALLOC_BUDDY);
node_id);
if (!page) if (!page)
page = ERR_PTR(-ENOMEM); page = ERR_PTR(-ENOMEM);
else if (!PageKmemcg(page))
charge_hpage = true;
} }
if (IS_ERR(page)) { if (IS_ERR(page)) {
ptl = huge_pte_lock(h, mm, ptep); ptl = huge_pte_lock(h, mm, ptep);
...@@ -4588,12 +4591,24 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -4588,12 +4591,24 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
ret = vmf_error(PTR_ERR(page)); ret = vmf_error(PTR_ERR(page));
goto out; goto out;
} }
if (charge_hpage &&
mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg, true)) {
put_page(page);
ret = vmf_error(-ENOMEM);
goto out;
}
__SetPageUptodate(page); __SetPageUptodate(page);
new_page = true; new_page = true;
/* sharepool pages are all shared */ /* sharepool pages are all shared */
err = huge_add_to_page_cache(page, mapping, idx); err = huge_add_to_page_cache(page, mapping, idx);
if (err) { if (err) {
if (charge_hpage) {
mem_cgroup_cancel_charge(page, memcg, true);
charge_hpage = false;
}
put_page(page); put_page(page);
if (err == -EEXIST) if (err == -EEXIST)
goto retry; goto retry;
...@@ -4601,7 +4616,6 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -4601,7 +4616,6 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
} }
} }
ptl = huge_pte_lock(h, mm, ptep); ptl = huge_pte_lock(h, mm, ptep);
size = i_size_read(mapping->host) >> huge_page_shift(h); size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size) if (idx >= size)
...@@ -4618,11 +4632,13 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -4618,11 +4632,13 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
hugetlb_count_add(pages_per_huge_page(h), mm); hugetlb_count_add(pages_per_huge_page(h), mm);
if (charge_hpage)
mem_cgroup_commit_charge(page, memcg, false, true);
spin_unlock(ptl); spin_unlock(ptl);
if (new_page) { if (new_page)
SetPagePrivate(&page[1]); SetPagePrivate(&page[1]);
}
unlock_page(page); unlock_page(page);
out: out:
...@@ -4631,6 +4647,8 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -4631,6 +4647,8 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
backout: backout:
spin_unlock(ptl); spin_unlock(ptl);
unlock_page(page); unlock_page(page);
if (charge_hpage)
mem_cgroup_cancel_charge(page, memcg, true);
put_page(page); put_page(page);
goto out; goto out;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册