提交 b347aa7b 编写于 作者: V Vasily Averin 提交者: Vlastimil Babka

mm/tracing: add 'accounted' entry into output of allocation tracepoints

Slab caches marked with SLAB_ACCOUNT force accounting for every
allocation from this cache even if __GFP_ACCOUNT flag is not passed.
Unfortunately, at the moment this flag is not visible in ftrace output,
and this makes it difficult to analyze the accounted allocations.

This patch adds boolean "accounted" entry into trace output,
and set it to 'true' for calls used __GFP_ACCOUNT flag and
for allocations from caches marked with SLAB_ACCOUNT.
Set it to 'false' if accounting is disabled in configs.
Signed-off-by: NVasily Averin <vvs@openvz.org>
Acked-by: NShakeel Butt <shakeelb@google.com>
Acked-by: NRoman Gushchin <roman.gushchin@linux.dev>
Acked-by: NMuchun Song <songmuchun@bytedance.com>
Reviewed-by: NHyeonggon Yoo <42.hyeyoo@gmail.com>
Link: https://lore.kernel.org/r/c418ed25-65fe-f623-fbf8-1676528859ed@openvz.orgSigned-off-by: NVlastimil Babka <vbabka@suse.cz>
上级 0c7e0d69
...@@ -13,11 +13,12 @@ DECLARE_EVENT_CLASS(kmem_alloc, ...@@ -13,11 +13,12 @@ DECLARE_EVENT_CLASS(kmem_alloc,
TP_PROTO(unsigned long call_site, TP_PROTO(unsigned long call_site,
const void *ptr, const void *ptr,
struct kmem_cache *s,
size_t bytes_req, size_t bytes_req,
size_t bytes_alloc, size_t bytes_alloc,
gfp_t gfp_flags), gfp_t gfp_flags),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( unsigned long, call_site ) __field( unsigned long, call_site )
...@@ -25,6 +26,7 @@ DECLARE_EVENT_CLASS(kmem_alloc, ...@@ -25,6 +26,7 @@ DECLARE_EVENT_CLASS(kmem_alloc,
__field( size_t, bytes_req ) __field( size_t, bytes_req )
__field( size_t, bytes_alloc ) __field( size_t, bytes_alloc )
__field( unsigned long, gfp_flags ) __field( unsigned long, gfp_flags )
__field( bool, accounted )
), ),
TP_fast_assign( TP_fast_assign(
...@@ -33,42 +35,47 @@ DECLARE_EVENT_CLASS(kmem_alloc, ...@@ -33,42 +35,47 @@ DECLARE_EVENT_CLASS(kmem_alloc,
__entry->bytes_req = bytes_req; __entry->bytes_req = bytes_req;
__entry->bytes_alloc = bytes_alloc; __entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = (__force unsigned long)gfp_flags; __entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
((gfp_flags & __GFP_ACCOUNT) ||
(s && s->flags & SLAB_ACCOUNT)) : false;
), ),
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
(void *)__entry->call_site, (void *)__entry->call_site,
__entry->ptr, __entry->ptr,
__entry->bytes_req, __entry->bytes_req,
__entry->bytes_alloc, __entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags)) show_gfp_flags(__entry->gfp_flags),
__entry->accounted ? "true" : "false")
); );
DEFINE_EVENT(kmem_alloc, kmalloc, DEFINE_EVENT(kmem_alloc, kmalloc,
TP_PROTO(unsigned long call_site, const void *ptr, TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
); );
DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
TP_PROTO(unsigned long call_site, const void *ptr, TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
); );
DECLARE_EVENT_CLASS(kmem_alloc_node, DECLARE_EVENT_CLASS(kmem_alloc_node,
TP_PROTO(unsigned long call_site, TP_PROTO(unsigned long call_site,
const void *ptr, const void *ptr,
struct kmem_cache *s,
size_t bytes_req, size_t bytes_req,
size_t bytes_alloc, size_t bytes_alloc,
gfp_t gfp_flags, gfp_t gfp_flags,
int node), int node),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( unsigned long, call_site ) __field( unsigned long, call_site )
...@@ -77,6 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, ...@@ -77,6 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__field( size_t, bytes_alloc ) __field( size_t, bytes_alloc )
__field( unsigned long, gfp_flags ) __field( unsigned long, gfp_flags )
__field( int, node ) __field( int, node )
__field( bool, accounted )
), ),
TP_fast_assign( TP_fast_assign(
...@@ -86,33 +94,37 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, ...@@ -86,33 +94,37 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->bytes_alloc = bytes_alloc; __entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = (__force unsigned long)gfp_flags; __entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->node = node; __entry->node = node;
__entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
((gfp_flags & __GFP_ACCOUNT) ||
(s && s->flags & SLAB_ACCOUNT)) : false;
), ),
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
(void *)__entry->call_site, (void *)__entry->call_site,
__entry->ptr, __entry->ptr,
__entry->bytes_req, __entry->bytes_req,
__entry->bytes_alloc, __entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags), show_gfp_flags(__entry->gfp_flags),
__entry->node) __entry->node,
__entry->accounted ? "true" : "false")
); );
DEFINE_EVENT(kmem_alloc_node, kmalloc_node, DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
TP_PROTO(unsigned long call_site, const void *ptr, TP_PROTO(unsigned long call_site, const void *ptr,
size_t bytes_req, size_t bytes_alloc, struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
gfp_t gfp_flags, int node), gfp_t gfp_flags, int node),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
); );
DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
TP_PROTO(unsigned long call_site, const void *ptr, TP_PROTO(unsigned long call_site, const void *ptr,
size_t bytes_req, size_t bytes_alloc, struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
gfp_t gfp_flags, int node), gfp_t gfp_flags, int node),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
); );
TRACE_EVENT(kfree, TRACE_EVENT(kfree,
......
...@@ -3478,7 +3478,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, ...@@ -3478,7 +3478,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
{ {
void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_); void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret, trace_kmem_cache_alloc(_RET_IP_, ret, cachep,
cachep->object_size, cachep->size, flags); cachep->object_size, cachep->size, flags);
return ret; return ret;
...@@ -3567,7 +3567,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) ...@@ -3567,7 +3567,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_); ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);
ret = kasan_kmalloc(cachep, ret, size, flags); ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret, trace_kmalloc(_RET_IP_, ret, cachep,
size, cachep->size, flags); size, cachep->size, flags);
return ret; return ret;
} }
...@@ -3592,7 +3592,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -3592,7 +3592,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{ {
void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_); void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
trace_kmem_cache_alloc_node(_RET_IP_, ret, trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep,
cachep->object_size, cachep->size, cachep->object_size, cachep->size,
flags, nodeid); flags, nodeid);
...@@ -3611,7 +3611,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, ...@@ -3611,7 +3611,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_); ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
ret = kasan_kmalloc(cachep, ret, size, flags); ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret, cachep,
size, cachep->size, size, cachep->size,
flags, nodeid); flags, nodeid);
return ret; return ret;
...@@ -3694,7 +3694,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, ...@@ -3694,7 +3694,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
ret = slab_alloc(cachep, NULL, flags, size, caller); ret = slab_alloc(cachep, NULL, flags, size, caller);
ret = kasan_kmalloc(cachep, ret, size, flags); ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(caller, ret, trace_kmalloc(caller, ret, cachep,
size, cachep->size, flags); size, cachep->size, flags);
return ret; return ret;
......
...@@ -26,13 +26,12 @@ ...@@ -26,13 +26,12 @@
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/stackdepot.h> #include <linux/stackdepot.h>
#define CREATE_TRACE_POINTS
#include <trace/events/kmem.h>
#include "internal.h" #include "internal.h"
#include "slab.h" #include "slab.h"
#define CREATE_TRACE_POINTS
#include <trace/events/kmem.h>
enum slab_state slab_state; enum slab_state slab_state;
LIST_HEAD(slab_caches); LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex); DEFINE_MUTEX(slab_mutex);
...@@ -959,7 +958,7 @@ EXPORT_SYMBOL(kmalloc_order); ...@@ -959,7 +958,7 @@ EXPORT_SYMBOL(kmalloc_order);
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{ {
void *ret = kmalloc_order(size, flags, order); void *ret = kmalloc_order(size, flags, order);
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(kmalloc_order_trace); EXPORT_SYMBOL(kmalloc_order_trace);
......
...@@ -507,7 +507,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) ...@@ -507,7 +507,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
*m = size; *m = size;
ret = (void *)m + minalign; ret = (void *)m + minalign;
trace_kmalloc_node(caller, ret, trace_kmalloc_node(caller, ret, NULL,
size, size + minalign, gfp, node); size, size + minalign, gfp, node);
} else { } else {
unsigned int order = get_order(size); unsigned int order = get_order(size);
...@@ -516,7 +516,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) ...@@ -516,7 +516,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
gfp |= __GFP_COMP; gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node); ret = slob_new_pages(gfp, order, node);
trace_kmalloc_node(caller, ret, trace_kmalloc_node(caller, ret, NULL,
size, PAGE_SIZE << order, gfp, node); size, PAGE_SIZE << order, gfp, node);
} }
...@@ -616,12 +616,12 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) ...@@ -616,12 +616,12 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
if (c->size < PAGE_SIZE) { if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node, 0); b = slob_alloc(c->size, flags, c->align, node, 0);
trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
SLOB_UNITS(c->size) * SLOB_UNIT, SLOB_UNITS(c->size) * SLOB_UNIT,
flags, node); flags, node);
} else { } else {
b = slob_new_pages(flags, get_order(c->size), node); b = slob_new_pages(flags, get_order(c->size), node);
trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
PAGE_SIZE << get_order(c->size), PAGE_SIZE << get_order(c->size),
flags, node); flags, node);
} }
......
...@@ -3257,7 +3257,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, ...@@ -3257,7 +3257,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
{ {
void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size); void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size,
s->size, gfpflags); s->size, gfpflags);
return ret; return ret;
...@@ -3280,7 +3280,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_lru); ...@@ -3280,7 +3280,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_lru);
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{ {
void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size); void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags);
ret = kasan_kmalloc(s, ret, size, gfpflags); ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret; return ret;
} }
...@@ -3292,7 +3292,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) ...@@ -3292,7 +3292,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{ {
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
trace_kmem_cache_alloc_node(_RET_IP_, ret, trace_kmem_cache_alloc_node(_RET_IP_, ret, s,
s->object_size, s->size, gfpflags, node); s->object_size, s->size, gfpflags, node);
return ret; return ret;
...@@ -3306,7 +3306,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, ...@@ -3306,7 +3306,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
{ {
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret, s,
size, s->size, gfpflags, node); size, s->size, gfpflags, node);
ret = kasan_kmalloc(s, ret, size, gfpflags); ret = kasan_kmalloc(s, ret, size, gfpflags);
...@@ -4441,7 +4441,7 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -4441,7 +4441,7 @@ void *__kmalloc(size_t size, gfp_t flags)
ret = slab_alloc(s, NULL, flags, _RET_IP_, size); ret = slab_alloc(s, NULL, flags, _RET_IP_, size);
trace_kmalloc(_RET_IP_, ret, size, s->size, flags); trace_kmalloc(_RET_IP_, ret, s, size, s->size, flags);
ret = kasan_kmalloc(s, ret, size, flags); ret = kasan_kmalloc(s, ret, size, flags);
...@@ -4475,7 +4475,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -4475,7 +4475,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = kmalloc_large_node(size, flags, node); ret = kmalloc_large_node(size, flags, node);
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret, NULL,
size, PAGE_SIZE << get_order(size), size, PAGE_SIZE << get_order(size),
flags, node); flags, node);
...@@ -4489,7 +4489,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -4489,7 +4489,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size); ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size);
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, flags, node);
ret = kasan_kmalloc(s, ret, size, flags); ret = kasan_kmalloc(s, ret, size, flags);
...@@ -4946,7 +4946,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) ...@@ -4946,7 +4946,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
ret = slab_alloc(s, NULL, gfpflags, caller, size); ret = slab_alloc(s, NULL, gfpflags, caller, size);
/* Honor the call site pointer we received. */ /* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, size, s->size, gfpflags); trace_kmalloc(caller, ret, s, size, s->size, gfpflags);
return ret; return ret;
} }
...@@ -4962,7 +4962,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -4962,7 +4962,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = kmalloc_large_node(size, gfpflags, node); ret = kmalloc_large_node(size, gfpflags, node);
trace_kmalloc_node(caller, ret, trace_kmalloc_node(caller, ret, NULL,
size, PAGE_SIZE << get_order(size), size, PAGE_SIZE << get_order(size),
gfpflags, node); gfpflags, node);
...@@ -4977,7 +4977,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -4977,7 +4977,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size); ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size);
/* Honor the call site pointer we received. */ /* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node);
return ret; return ret;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
新手
引导
客服 返回
顶部