提交 df406551 编写于 作者: V Vladimir Davydov 提交者: Linus Torvalds

memcg: simplify and inline __mem_cgroup_from_kmem

Before the previous patch ("memcg: unify slab and other kmem pages
charging"), __mem_cgroup_from_kmem had to handle two types of kmem - slab
pages and pages allocated with alloc_kmem_pages - memcg in the page
struct.  Now we can unify it.  Since after it, this function becomes tiny
we can fold it into mem_cgroup_from_kmem.

[hughd@google.com: move mem_cgroup_from_kmem into list_lru.c]
Signed-off-by: NVladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: NMichal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 f3ccb2c4
...@@ -770,8 +770,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) ...@@ -770,8 +770,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
void __memcg_kmem_put_cache(struct kmem_cache *cachep); void __memcg_kmem_put_cache(struct kmem_cache *cachep);
struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr);
static inline bool __memcg_kmem_bypass(gfp_t gfp) static inline bool __memcg_kmem_bypass(gfp_t gfp)
{ {
if (!memcg_kmem_enabled()) if (!memcg_kmem_enabled())
...@@ -830,13 +828,6 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) ...@@ -830,13 +828,6 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
if (memcg_kmem_enabled()) if (memcg_kmem_enabled())
__memcg_kmem_put_cache(cachep); __memcg_kmem_put_cache(cachep);
} }
static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
{
if (!memcg_kmem_enabled())
return NULL;
return __mem_cgroup_from_kmem(ptr);
}
#else #else
#define for_each_memcg_cache_index(_idx) \ #define for_each_memcg_cache_index(_idx) \
for (; NULL; ) for (; NULL; )
...@@ -882,11 +873,5 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -882,11 +873,5 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
{ {
} }
static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
{
return NULL;
}
#endif /* CONFIG_MEMCG_KMEM */ #endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */ #endif /* _LINUX_MEMCONTROL_H */
...@@ -63,6 +63,16 @@ list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) ...@@ -63,6 +63,16 @@ list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
return &nlru->lru; return &nlru->lru;
} }
static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
{
struct page *page;
if (!memcg_kmem_enabled())
return NULL;
page = virt_to_head_page(ptr);
return page->mem_cgroup;
}
static inline struct list_lru_one * static inline struct list_lru_one *
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
{ {
......
...@@ -2430,24 +2430,6 @@ void __memcg_kmem_uncharge(struct page *page, int order) ...@@ -2430,24 +2430,6 @@ void __memcg_kmem_uncharge(struct page *page, int order)
page->mem_cgroup = NULL; page->mem_cgroup = NULL;
css_put_many(&memcg->css, nr_pages); css_put_many(&memcg->css, nr_pages);
} }
struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
{
struct mem_cgroup *memcg = NULL;
struct kmem_cache *cachep;
struct page *page;
page = virt_to_head_page(ptr);
if (PageSlab(page)) {
cachep = page->slab_cache;
if (!is_root_cache(cachep))
memcg = cachep->memcg_params.memcg;
} else
/* page allocated by alloc_kmem_pages */
memcg = page->mem_cgroup;
return memcg;
}
#endif /* CONFIG_MEMCG_KMEM */ #endif /* CONFIG_MEMCG_KMEM */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册