提交 e3f73e77 编写于 作者: M Muchun Song 提交者: Zheng Zengkai

mm: memcontrol: introduce obj_cgroup_{un}charge_pages

mainline inclusion
from mainline-v5.13-rc1
commit e74d2259
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4C0GB
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=e74d225910ec3a9999f06934afa068b6a30babf8

----------------------------------------------------------------------

We know that the unit of slab object charging is bytes, the unit of kmem
page charging is PAGE_SIZE.  If we want to reuse obj_cgroup APIs to
charge the kmem pages, we should pass PAGE_SIZE (as third parameter) to
obj_cgroup_charge().  Because the size is already PAGE_SIZE, we can skip
touch the objcg stock.  And obj_cgroup_{un}charge_pages() are introduced
to charge in units of page level.

In the latter patch, we also can reuse those two helpers to charge or
uncharge a number of kernel pages to a object cgroup.  This is just a
code movement without any functional changes.

Link: https://lkml.kernel.org/r/20210319163821.20704-3-songmuchun@bytedance.comSigned-off-by: NMuchun Song <songmuchun@bytedance.com>
Acked-by: NRoman Gushchin <guro@fb.com>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: NShakeel Butt <shakeelb@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NChen Huang <chenhuang5@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: NChen Wandun <chenwandun@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 492cf0b0
......@@ -2844,6 +2844,20 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg)
page->memcg_data = (unsigned long)memcg;
}
static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
{
struct mem_cgroup *memcg;
rcu_read_lock();
retry:
memcg = obj_cgroup_memcg(objcg);
if (unlikely(!css_tryget(&memcg->css)))
goto retry;
rcu_read_unlock();
return memcg;
}
#ifdef CONFIG_MEMCG_KMEM
/*
* The allocated objcg pointers array is not accounted directly.
......@@ -2988,6 +3002,29 @@ static void memcg_free_cache_id(int id)
ida_simple_remove(&memcg_cache_ida, id);
}
static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
unsigned int nr_pages)
{
struct mem_cgroup *memcg;
memcg = get_mem_cgroup_from_objcg(objcg);
__memcg_kmem_uncharge(memcg, nr_pages);
css_put(&memcg->css);
}
static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
unsigned int nr_pages)
{
struct mem_cgroup *memcg;
int ret;
memcg = get_mem_cgroup_from_objcg(objcg);
ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
css_put(&memcg->css);
return ret;
}
/**
* __memcg_kmem_charge: charge a number of kernel pages to a memcg
* @memcg: memory cgroup to charge
......@@ -3112,19 +3149,8 @@ static void drain_obj_stock(struct memcg_stock_pcp *stock)
unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
if (nr_pages) {
struct mem_cgroup *memcg;
rcu_read_lock();
retry:
memcg = obj_cgroup_memcg(old);
if (unlikely(!css_tryget(&memcg->css)))
goto retry;
rcu_read_unlock();
__memcg_kmem_uncharge(memcg, nr_pages);
css_put(&memcg->css);
}
if (nr_pages)
obj_cgroup_uncharge_pages(old, nr_pages);
/*
* The leftover is flushed to the centralized per-memcg value.
......@@ -3182,7 +3208,6 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
{
struct mem_cgroup *memcg;
unsigned int nr_pages, nr_bytes;
int ret;
......@@ -3199,24 +3224,16 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
* refill_obj_stock(), called from this function or
* independently later.
*/
rcu_read_lock();
retry:
memcg = obj_cgroup_memcg(objcg);
if (unlikely(!css_tryget(&memcg->css)))
goto retry;
rcu_read_unlock();
nr_pages = size >> PAGE_SHIFT;
nr_bytes = size & (PAGE_SIZE - 1);
if (nr_bytes)
nr_pages += 1;
ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
if (!ret && nr_bytes)
refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
css_put(&memcg->css);
return ret;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
新手
引导
客服 返回
顶部