kmem.h 9.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kmem

5
#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
6
#define _TRACE_KMEM_H
7 8

#include <linux/types.h>
9
#include <linux/tracepoint.h>
10
#include <trace/events/mmflags.h>
11

12
DECLARE_EVENT_CLASS(kmem_alloc,
13 14 15

	TP_PROTO(unsigned long call_site,
		 const void *ptr,
16
		 struct kmem_cache *s,
17 18 19 20
		 size_t bytes_req,
		 size_t bytes_alloc,
		 gfp_t gfp_flags),

21
	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
22 23 24 25 26 27

	TP_STRUCT__entry(
		__field(	unsigned long,	call_site	)
		__field(	const void *,	ptr		)
		__field(	size_t,		bytes_req	)
		__field(	size_t,		bytes_alloc	)
28
		__field(	unsigned long,	gfp_flags	)
29
		__field(	bool,		accounted	)
30 31 32 33 34 35 36
	),

	TP_fast_assign(
		__entry->call_site	= call_site;
		__entry->ptr		= ptr;
		__entry->bytes_req	= bytes_req;
		__entry->bytes_alloc	= bytes_alloc;
37
		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
38 39 40
		__entry->accounted	= IS_ENABLED(CONFIG_MEMCG_KMEM) ?
					  ((gfp_flags & __GFP_ACCOUNT) ||
					  (s && s->flags & SLAB_ACCOUNT)) : false;
41 42
	),

43
	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
44
		(void *)__entry->call_site,
45 46 47
		__entry->ptr,
		__entry->bytes_req,
		__entry->bytes_alloc,
48 49
		show_gfp_flags(__entry->gfp_flags),
		__entry->accounted ? "true" : "false")
50 51
);

52
DEFINE_EVENT(kmem_alloc, kmalloc,
53

54
	TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
55
		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
56

57
	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
58
);
59

60
DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
61

62
	TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
63
		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
64

65
	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
66 67
);

68
DECLARE_EVENT_CLASS(kmem_alloc_node,
69 70 71

	TP_PROTO(unsigned long call_site,
		 const void *ptr,
72
		 struct kmem_cache *s,
73 74 75 76 77
		 size_t bytes_req,
		 size_t bytes_alloc,
		 gfp_t gfp_flags,
		 int node),

78
	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
79 80 81 82 83 84

	TP_STRUCT__entry(
		__field(	unsigned long,	call_site	)
		__field(	const void *,	ptr		)
		__field(	size_t,		bytes_req	)
		__field(	size_t,		bytes_alloc	)
85
		__field(	unsigned long,	gfp_flags	)
86
		__field(	int,		node		)
87
		__field(	bool,		accounted	)
88 89 90 91 92 93 94
	),

	TP_fast_assign(
		__entry->call_site	= call_site;
		__entry->ptr		= ptr;
		__entry->bytes_req	= bytes_req;
		__entry->bytes_alloc	= bytes_alloc;
95
		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
96
		__entry->node		= node;
97 98 99
		__entry->accounted	= IS_ENABLED(CONFIG_MEMCG_KMEM) ?
					  ((gfp_flags & __GFP_ACCOUNT) ||
					  (s && s->flags & SLAB_ACCOUNT)) : false;
100 101
	),

102
	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
103
		(void *)__entry->call_site,
104 105 106
		__entry->ptr,
		__entry->bytes_req,
		__entry->bytes_alloc,
107
		show_gfp_flags(__entry->gfp_flags),
108 109
		__entry->node,
		__entry->accounted ? "true" : "false")
110 111
);

112
DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
113

114
	TP_PROTO(unsigned long call_site, const void *ptr,
115
		 struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
116
		 gfp_t gfp_flags, int node),
117

118
	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
119
);
120

121
DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
122

123
	TP_PROTO(unsigned long call_site, const void *ptr,
124
		 struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
125
		 gfp_t gfp_flags, int node),
126

127
	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
128 129
);

130
TRACE_EVENT(kfree,
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145

	TP_PROTO(unsigned long call_site, const void *ptr),

	TP_ARGS(call_site, ptr),

	TP_STRUCT__entry(
		__field(	unsigned long,	call_site	)
		__field(	const void *,	ptr		)
	),

	TP_fast_assign(
		__entry->call_site	= call_site;
		__entry->ptr		= ptr;
	),

146 147
	TP_printk("call_site=%pS ptr=%p",
		  (void *)__entry->call_site, __entry->ptr)
148 149
);

150
TRACE_EVENT(kmem_cache_free,
151

152
	TP_PROTO(unsigned long call_site, const void *ptr, const char *name),
153

154
	TP_ARGS(call_site, ptr, name),
155

156 157 158
	TP_STRUCT__entry(
		__field(	unsigned long,	call_site	)
		__field(	const void *,	ptr		)
159
		__string(	name,	name	)
160
	),
161

162 163 164
	TP_fast_assign(
		__entry->call_site	= call_site;
		__entry->ptr		= ptr;
165
		__assign_str(name, name);
166
	),
167

168
	TP_printk("call_site=%pS ptr=%p name=%s",
169
		  (void *)__entry->call_site, __entry->ptr, __get_str(name))
170
);
171

172
TRACE_EVENT(mm_page_free,
173 174 175 176 177 178

	TP_PROTO(struct page *page, unsigned int order),

	TP_ARGS(page, order),

	TP_STRUCT__entry(
179
		__field(	unsigned long,	pfn		)
180 181 182 183
		__field(	unsigned int,	order		)
	),

	TP_fast_assign(
184
		__entry->pfn		= page_to_pfn(page);
185 186 187
		__entry->order		= order;
	),

188
	TP_printk("page=%p pfn=0x%lx order=%d",
189 190
			pfn_to_page(__entry->pfn),
			__entry->pfn,
191 192 193
			__entry->order)
);

194
TRACE_EVENT(mm_page_free_batched,
195

196
	TP_PROTO(struct page *page),
197

198
	TP_ARGS(page),
199 200

	TP_STRUCT__entry(
201
		__field(	unsigned long,	pfn		)
202 203 204
	),

	TP_fast_assign(
205
		__entry->pfn		= page_to_pfn(page);
206 207
	),

208
	TP_printk("page=%p pfn=0x%lx order=0",
209
			pfn_to_page(__entry->pfn),
210
			__entry->pfn)
211 212 213 214 215 216 217 218 219 220
);

TRACE_EVENT(mm_page_alloc,

	TP_PROTO(struct page *page, unsigned int order,
			gfp_t gfp_flags, int migratetype),

	TP_ARGS(page, order, gfp_flags, migratetype),

	TP_STRUCT__entry(
221
		__field(	unsigned long,	pfn		)
222
		__field(	unsigned int,	order		)
223
		__field(	unsigned long,	gfp_flags	)
224 225 226 227
		__field(	int,		migratetype	)
	),

	TP_fast_assign(
228
		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
229
		__entry->order		= order;
230
		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
231 232 233
		__entry->migratetype	= migratetype;
	),

234
	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
235 236
		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
		__entry->pfn != -1UL ? __entry->pfn : 0,
237 238 239 240 241
		__entry->order,
		__entry->migratetype,
		show_gfp_flags(__entry->gfp_flags))
);

242
DECLARE_EVENT_CLASS(mm_page,
243

244 245
	TP_PROTO(struct page *page, unsigned int order, int migratetype,
		 int percpu_refill),
246

247
	TP_ARGS(page, order, migratetype, percpu_refill),
248 249

	TP_STRUCT__entry(
250
		__field(	unsigned long,	pfn		)
251 252
		__field(	unsigned int,	order		)
		__field(	int,		migratetype	)
253
		__field(	int,		percpu_refill	)
254 255 256
	),

	TP_fast_assign(
257
		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
258 259
		__entry->order		= order;
		__entry->migratetype	= migratetype;
260
		__entry->percpu_refill	= percpu_refill;
261 262
	),

263
	TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
264 265
		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
		__entry->pfn != -1UL ? __entry->pfn : 0,
266 267
		__entry->order,
		__entry->migratetype,
268
		__entry->percpu_refill)
269 270
);

271
DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
272

273 274
	TP_PROTO(struct page *page, unsigned int order, int migratetype,
		 int percpu_refill),
275

276
	TP_ARGS(page, order, migratetype, percpu_refill)
277
);
278

279
TRACE_EVENT(mm_page_pcpu_drain,
280

281 282 283
	TP_PROTO(struct page *page, unsigned int order, int migratetype),

	TP_ARGS(page, order, migratetype),
284

285 286 287 288 289 290 291 292 293 294 295 296
	TP_STRUCT__entry(
		__field(	unsigned long,	pfn		)
		__field(	unsigned int,	order		)
		__field(	int,		migratetype	)
	),

	TP_fast_assign(
		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
		__entry->order		= order;
		__entry->migratetype	= migratetype;
	),

297
	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
298
		pfn_to_page(__entry->pfn), __entry->pfn,
299
		__entry->order, __entry->migratetype)
300 301
);

302 303 304
TRACE_EVENT(mm_page_alloc_extfrag,

	TP_PROTO(struct page *page,
305
		int alloc_order, int fallback_order,
306
		int alloc_migratetype, int fallback_migratetype),
307 308 309

	TP_ARGS(page,
		alloc_order, fallback_order,
310
		alloc_migratetype, fallback_migratetype),
311 312

	TP_STRUCT__entry(
313
		__field(	unsigned long,	pfn			)
314 315 316 317
		__field(	int,		alloc_order		)
		__field(	int,		fallback_order		)
		__field(	int,		alloc_migratetype	)
		__field(	int,		fallback_migratetype	)
318
		__field(	int,		change_ownership	)
319 320 321
	),

	TP_fast_assign(
322
		__entry->pfn			= page_to_pfn(page);
323 324 325 326
		__entry->alloc_order		= alloc_order;
		__entry->fallback_order		= fallback_order;
		__entry->alloc_migratetype	= alloc_migratetype;
		__entry->fallback_migratetype	= fallback_migratetype;
327 328
		__entry->change_ownership	= (alloc_migratetype ==
					get_pageblock_migratetype(page));
329 330
	),

331
	TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
332 333
		pfn_to_page(__entry->pfn),
		__entry->pfn,
334 335 336 337 338 339
		__entry->alloc_order,
		__entry->fallback_order,
		pageblock_order,
		__entry->alloc_migratetype,
		__entry->fallback_migratetype,
		__entry->fallback_order < pageblock_order,
340
		__entry->change_ownership)
341 342
);

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
/*
 * Required for uniquely and securely identifying mm in rss_stat tracepoint.
 */
#ifndef __PTR_TO_HASHVAL
static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
{
	int ret;
	unsigned long hashval;

	ret = ptr_to_hashval(ptr, &hashval);
	if (ret)
		return 0;

	/* The hashed value is only 32-bit */
	return (unsigned int)hashval;
}
#define __PTR_TO_HASHVAL
#endif

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
#define TRACE_MM_PAGES		\
	EM(MM_FILEPAGES)	\
	EM(MM_ANONPAGES)	\
	EM(MM_SWAPENTS)		\
	EMe(MM_SHMEMPAGES)

#undef EM
#undef EMe

#define EM(a)	TRACE_DEFINE_ENUM(a);
#define EMe(a)	TRACE_DEFINE_ENUM(a);

TRACE_MM_PAGES

#undef EM
#undef EMe

#define EM(a)	{ a, #a },
#define EMe(a)	{ a, #a }

382 383
TRACE_EVENT(rss_stat,

384 385
	TP_PROTO(struct mm_struct *mm,
		int member,
386 387
		long count),

388
	TP_ARGS(mm, member, count),
389 390

	TP_STRUCT__entry(
391 392
		__field(unsigned int, mm_id)
		__field(unsigned int, curr)
393 394 395 396 397
		__field(int, member)
		__field(long, size)
	),

	TP_fast_assign(
398 399
		__entry->mm_id = mm_ptr_to_hash(mm);
		__entry->curr = !!(current->mm == mm);
400 401 402 403
		__entry->member = member;
		__entry->size = (count << PAGE_SHIFT);
	),

404
	TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
405 406
		__entry->mm_id,
		__entry->curr,
407
		__print_symbolic(__entry->member, TRACE_MM_PAGES),
408 409
		__entry->size)
	);
410
#endif /* _TRACE_KMEM_H */
411

412 413
/* This part must be outside protection */
#include <trace/define_trace.h>