slab.h 21.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7
#ifndef MM_SLAB_H
#define MM_SLAB_H
/*
 * Internal slab definitions
 */

8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#ifdef CONFIG_SLOB
/*
 * Common fields provided in kmem_cache by all slab allocators
 * This struct is either used directly by the allocator (SLOB)
 * or the allocator must include definitions for all fields
 * provided in kmem_cache_common in their definition of kmem_cache.
 *
 * Once we can do anonymous structs (C11 standard) we could put a
 * anonymous struct definition in these allocators so that the
 * separate allocations in the kmem_cache structure of SLAB and
 * SLUB is no longer needed.
 */
struct kmem_cache {
	unsigned int object_size;/* The original size of the object */
	unsigned int size;	/* The aligned/padded/added on size  */
	unsigned int align;	/* Alignment as calculated */
24
	slab_flags_t flags;	/* Active flags on the slab */
25 26
	unsigned int useroffset;/* Usercopy region offset */
	unsigned int usersize;	/* Usercopy region size */
27 28 29 30 31 32
	const char *name;	/* Slab name for sysfs */
	int refcount;		/* Use counter */
	void (*ctor)(void *);	/* Called on object slot creation */
	struct list_head list;	/* List of all slab caches on the system */
};

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
#else /* !CONFIG_SLOB */

struct memcg_cache_array {
	struct rcu_head rcu;
	struct kmem_cache *entries[0];
};

/*
 * This is the main placeholder for memcg-related information in kmem caches.
 * Both the root cache and the child caches will have it. For the root cache,
 * this will hold a dynamically allocated array large enough to hold
 * information about the currently limited memcgs in the system. To allow the
 * array to be accessed without taking any locks, on relocation we free the old
 * version only after a grace period.
 *
 * Root and child caches hold different metadata.
 *
 * @root_cache:	Common to root and child caches.  NULL for root, pointer to
 *		the root cache for children.
 *
 * The following fields are specific to root caches.
 *
 * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
 *		used to index child cachces during allocation and cleared
 *		early during shutdown.
 *
 * @root_caches_node: List node for slab_root_caches list.
 *
 * @children:	List of all child caches.  While the child caches are also
 *		reachable through @memcg_caches, a child cache remains on
 *		this list until it is actually destroyed.
 *
 * The following fields are specific to child caches.
 *
 * @memcg:	Pointer to the memcg this cache belongs to.
 *
 * @children_node: List node for @root_cache->children list.
 *
 * @kmem_caches_node: List node for @memcg->kmem_caches list.
 */
struct memcg_cache_params {
	struct kmem_cache *root_cache;
	union {
		struct {
			struct memcg_cache_array __rcu *memcg_caches;
			struct list_head __root_caches_node;
			struct list_head children;
			bool dying;
		};
		struct {
			struct mem_cgroup *memcg;
			struct list_head children_node;
			struct list_head kmem_caches_node;
			struct percpu_ref refcnt;

			void (*work_fn)(struct kmem_cache *);
			union {
				struct rcu_head rcu_head;
				struct work_struct work;
			};
		};
	};
};
96 97 98 99 100 101 102 103 104 105 106
#endif /* CONFIG_SLOB */

#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#endif

#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#endif

#include <linux/memcontrol.h>
107 108 109
#include <linux/fault-inject.h>
#include <linux/kasan.h>
#include <linux/kmemleak.h>
110
#include <linux/random.h>
111
#include <linux/sched/mm.h>
112
#include <linux/kmemleak.h>
113

114 115 116 117 118 119 120 121 122 123 124
/*
 * State of the slab allocator.
 *
 * This is used to describe the states of the allocator during bootup.
 * Allocators use this to gradually bootstrap themselves. Most allocators
 * have the problem that the structures used for managing slab caches are
 * allocated from slab caches themselves.
 */
enum slab_state {
	DOWN,			/* No slab functionality yet */
	PARTIAL,		/* SLUB: kmem_cache_node available */
125
	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
126 127 128 129 130 131
	UP,			/* Slab caches usable but not all extras yet */
	FULL			/* Everything is working */
};

extern enum slab_state slab_state;

132 133
/* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex;
134 135

/* The list of all slab caches on the system */
136 137
extern struct list_head slab_caches;

138 139 140
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;

141 142
/* A table of kmalloc cache names and sizes */
extern const struct kmalloc_info_struct {
143
	const char *name[NR_KMALLOC_TYPES];
144
	unsigned int size;
145 146
} kmalloc_info[];

147 148
#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
149
void setup_kmalloc_cache_index_table(void);
150
void create_kmalloc_caches(slab_flags_t);
151 152 153

/* Find the kmalloc slab corresponding for a certain size */
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
154 155
#endif

156
gfp_t kmalloc_fix_flags(gfp_t flags);
157

158
/* Functions provided by the slab allocators */
159
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
160

161 162 163
struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
			slab_flags_t flags, unsigned int useroffset,
			unsigned int usersize);
164
extern void create_boot_cache(struct kmem_cache *, const char *name,
165 166
			unsigned int size, slab_flags_t flags,
			unsigned int useroffset, unsigned int usersize);
167

168
int slab_unmergeable(struct kmem_cache *s);
169
struct kmem_cache *find_mergeable(unsigned size, unsigned align,
170
		slab_flags_t flags, const char *name, void (*ctor)(void *));
J
Joonsoo Kim 已提交
171
#ifndef CONFIG_SLOB
172
struct kmem_cache *
173
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
174
		   slab_flags_t flags, void (*ctor)(void *));
175

176
slab_flags_t kmem_cache_flags(unsigned int object_size,
177
	slab_flags_t flags, const char *name,
178
	void (*ctor)(void *));
179
#else
180
static inline struct kmem_cache *
181
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
182
		   slab_flags_t flags, void (*ctor)(void *))
183
{ return NULL; }
184

185
static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
186
	slab_flags_t flags, const char *name,
187 188 189 190
	void (*ctor)(void *))
{
	return flags;
}
191 192 193
#endif


194
/* Legal flag mask for kmem_cache_create(), for various configurations */
195 196
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
197
			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
198 199 200 201 202

#if defined(CONFIG_DEBUG_SLAB)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
#elif defined(CONFIG_SLUB_DEBUG)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
203
			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
204 205 206 207 208 209
#else
#define SLAB_DEBUG_FLAGS (0)
#endif

#if defined(CONFIG_SLAB)
#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
V
Vladimir Davydov 已提交
210
			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
211
			  SLAB_ACCOUNT)
212 213
#elif defined(CONFIG_SLUB)
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
214
			  SLAB_TEMPORARY | SLAB_ACCOUNT)
215 216 217 218
#else
#define SLAB_CACHE_FLAGS (0)
#endif

219
/* Common flags available with current configuration */
220 221
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)

222 223 224 225 226 227 228 229 230 231 232 233 234
/* Common flags permitted for kmem_cache_create */
#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
			      SLAB_RED_ZONE | \
			      SLAB_POISON | \
			      SLAB_STORE_USER | \
			      SLAB_TRACE | \
			      SLAB_CONSISTENCY_CHECKS | \
			      SLAB_MEM_SPREAD | \
			      SLAB_NOLEAKTRACE | \
			      SLAB_RECLAIM_ACCOUNT | \
			      SLAB_TEMPORARY | \
			      SLAB_ACCOUNT)

235
bool __kmem_cache_empty(struct kmem_cache *);
236
int __kmem_cache_shutdown(struct kmem_cache *);
237
void __kmem_cache_release(struct kmem_cache *);
238 239
int __kmem_cache_shrink(struct kmem_cache *);
void __kmemcg_cache_deactivate(struct kmem_cache *s);
240
void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
241
void slab_kmem_cache_release(struct kmem_cache *);
242
void kmem_cache_shrink_all(struct kmem_cache *s);
243

244 245 246
struct seq_file;
struct file;

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
struct slabinfo {
	unsigned long active_objs;
	unsigned long num_objs;
	unsigned long active_slabs;
	unsigned long num_slabs;
	unsigned long shared_avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int shared;
	unsigned int objects_per_slab;
	unsigned int cache_order;
};

void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
262 263
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
		       size_t count, loff_t *ppos);
G
Glauber Costa 已提交
264

265 266 267
/*
 * Generic implementation of bulk operations
 * These are useful for situations in which the allocator cannot
J
Jesper Dangaard Brouer 已提交
268
 * perform optimizations. In that case segments of the object listed
269 270 271
 * may be allocated or freed using these operations.
 */
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
272
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
273

274 275 276
static inline int cache_vmstat_idx(struct kmem_cache *s)
{
	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
277
		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
278 279
}

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SLUB_DEBUG_ON
DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
#else
DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
extern void print_tracking(struct kmem_cache *s, void *object);
#else
static inline void print_tracking(struct kmem_cache *s, void *object)
{
}
#endif

/*
 * Returns true if any of the specified slub_debug flags is enabled for the
 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
 * the static key.
 */
static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
{
#ifdef CONFIG_SLUB_DEBUG
	VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
	if (static_branch_unlikely(&slub_debug_enabled))
		return s->flags & flags;
#endif
	return false;
}

308
#ifdef CONFIG_MEMCG_KMEM
309 310 311 312 313

/* List of all root caches. */
extern struct list_head		slab_root_caches;
#define root_caches_node	memcg_params.__root_caches_node

314 315 316 317 318
/*
 * Iterate over all memcg caches of the given root cache. The caller must hold
 * slab_mutex.
 */
#define for_each_memcg_cache(iter, root) \
T
Tejun Heo 已提交
319 320
	list_for_each_entry(iter, &(root)->memcg_params.children, \
			    memcg_params.children_node)
321

G
Glauber Costa 已提交
322 323
static inline bool is_root_cache(struct kmem_cache *s)
{
T
Tejun Heo 已提交
324
	return !s->memcg_params.root_cache;
G
Glauber Costa 已提交
325
}
326

327
static inline bool slab_equal_or_root(struct kmem_cache *s,
328
				      struct kmem_cache *p)
329
{
330
	return p == s || p == s->memcg_params.root_cache;
331
}
332 333 334 335 336 337 338 339 340

/*
 * We use suffixes to the name in memcg because we can't have caches
 * created in the system with the same name. But when we print them
 * locally, better refer to them with the base name
 */
static inline const char *cache_name(struct kmem_cache *s)
{
	if (!is_root_cache(s))
341
		s = s->memcg_params.root_cache;
342 343 344
	return s->name;
}

G
Glauber Costa 已提交
345 346 347 348
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{
	if (is_root_cache(s))
		return s;
349
	return s->memcg_params.root_cache;
G
Glauber Costa 已提交
350
}
351

352 353 354 355 356 357 358 359 360 361 362 363
static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
{
	/*
	 * page->mem_cgroup and page->obj_cgroups are sharing the same
	 * space. To distinguish between them in case we don't know for sure
	 * that the page is a slab page (e.g. page_cgroup_ino()), let's
	 * always set the lowest bit of obj_cgroups.
	 */
	return (struct obj_cgroup **)
		((unsigned long)page->obj_cgroups & ~0x1UL);
}

364 365 366 367
/*
 * Expects a pointer to a slab page. Please note, that PageSlab() check
 * isn't sufficient, as it returns true also for tail compound slab pages,
 * which do not have slab_cache pointer set.
368 369
 * So this function assumes that the page can pass PageSlab() && !PageTail()
 * check.
370 371 372
 *
 * The kmem_cache can be reparented asynchronously. The caller must ensure
 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
373 374 375 376 377 378 379
 */
static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
{
	struct kmem_cache *s;

	s = READ_ONCE(page->slab_cache);
	if (s && !is_root_cache(s))
380
		return READ_ONCE(s->memcg_params.memcg);
381 382 383 384

	return NULL;
}

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
static inline int memcg_alloc_page_obj_cgroups(struct page *page,
					       struct kmem_cache *s, gfp_t gfp)
{
	unsigned int objects = objs_per_slab_page(s, page);
	void *vec;

	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
			   page_to_nid(page));
	if (!vec)
		return -ENOMEM;

	kmemleak_not_leak(vec);
	page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL);
	return 0;
}

static inline void memcg_free_page_obj_cgroups(struct page *page)
{
	kfree(page_obj_cgroups(page));
	page->obj_cgroups = NULL;
}

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
static inline size_t obj_full_size(struct kmem_cache *s)
{
	/*
	 * For each accounted object there is an extra space which is used
	 * to store obj_cgroup membership. Charge it too.
	 */
	return s->size + sizeof(struct obj_cgroup *);
}

static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
						struct obj_cgroup **objcgp,
						size_t objects, gfp_t flags)
{
	struct kmem_cache *cachep;

	cachep = memcg_kmem_get_cache(s, objcgp);
	if (is_root_cache(cachep))
		return s;

	if (obj_cgroup_charge(*objcgp, flags, objects * obj_full_size(s))) {
		obj_cgroup_put(*objcgp);
		memcg_kmem_put_cache(cachep);
		cachep = NULL;
	}

	return cachep;
}

static inline void mod_objcg_state(struct obj_cgroup *objcg,
				   struct pglist_data *pgdat,
				   int idx, int nr)
{
	struct mem_cgroup *memcg;
	struct lruvec *lruvec;

	rcu_read_lock();
	memcg = obj_cgroup_memcg(objcg);
	lruvec = mem_cgroup_lruvec(memcg, pgdat);
	mod_memcg_lruvec_state(lruvec, idx, nr);
	rcu_read_unlock();
}

449 450 451 452 453 454 455 456 457 458 459 460 461 462
static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
					      struct obj_cgroup *objcg,
					      size_t size, void **p)
{
	struct page *page;
	unsigned long off;
	size_t i;

	for (i = 0; i < size; i++) {
		if (likely(p[i])) {
			page = virt_to_head_page(p[i]);
			off = obj_to_index(s, page, p[i]);
			obj_cgroup_get(objcg);
			page_obj_cgroups(page)[off] = objcg;
463 464 465 466
			mod_objcg_state(objcg, page_pgdat(page),
					cache_vmstat_idx(s), obj_full_size(s));
		} else {
			obj_cgroup_uncharge(objcg, obj_full_size(s));
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
		}
	}
	obj_cgroup_put(objcg);
	memcg_kmem_put_cache(s);
}

static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
					void *p)
{
	struct obj_cgroup *objcg;
	unsigned int off;

	if (!memcg_kmem_enabled() || is_root_cache(s))
		return;

	off = obj_to_index(s, page, p);
	objcg = page_obj_cgroups(page)[off];
	page_obj_cgroups(page)[off] = NULL;
485 486 487 488 489

	obj_cgroup_uncharge(objcg, obj_full_size(s));
	mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
			-obj_full_size(s));

490 491 492
	obj_cgroup_put(objcg);
}

493
extern void slab_init_memcg_params(struct kmem_cache *);
494
extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
495

496
#else /* CONFIG_MEMCG_KMEM */
497

498 499 500 501
/* If !memcg, all caches are root. */
#define slab_root_caches	slab_caches
#define root_caches_node	list

502 503 504
#define for_each_memcg_cache(iter, root) \
	for ((void)(iter), (void)(root); 0; )

G
Glauber Costa 已提交
505 506 507 508 509
static inline bool is_root_cache(struct kmem_cache *s)
{
	return true;
}

510 511 512
static inline bool slab_equal_or_root(struct kmem_cache *s,
				      struct kmem_cache *p)
{
513
	return s == p;
514
}
515 516 517 518 519 520

static inline const char *cache_name(struct kmem_cache *s)
{
	return s->name;
}

G
Glauber Costa 已提交
521 522 523 524
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{
	return s;
}
525

526 527 528 529 530
static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
{
	return NULL;
}

531 532 533 534 535 536 537 538 539 540
static inline int memcg_alloc_page_obj_cgroups(struct page *page,
					       struct kmem_cache *s, gfp_t gfp)
{
	return 0;
}

static inline void memcg_free_page_obj_cgroups(struct page *page)
{
}

541 542 543 544 545 546 547
static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
						struct obj_cgroup **objcgp,
						size_t objects, gfp_t flags)
{
	return NULL;
}

548 549 550 551 552 553 554 555 556 557 558
static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
					      struct obj_cgroup *objcg,
					      size_t size, void **p)
{
}

static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
					void *p)
{
}

559 560 561
static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
562

563 564
static inline void memcg_link_cache(struct kmem_cache *s,
				    struct mem_cgroup *memcg)
565 566 567
{
}

568
#endif /* CONFIG_MEMCG_KMEM */
569

570 571 572 573 574 575 576 577 578 579 580
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
	struct page *page;

	page = virt_to_head_page(obj);
	if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
					__func__))
		return NULL;
	return page->slab_cache;
}

581 582 583 584
static __always_inline int charge_slab_page(struct page *page,
					    gfp_t gfp, int order,
					    struct kmem_cache *s)
{
585 586 587
#ifdef CONFIG_MEMCG_KMEM
	if (memcg_kmem_enabled() && !is_root_cache(s)) {
		int ret;
588

589 590 591
		ret = memcg_alloc_page_obj_cgroups(page, s, gfp);
		if (ret)
			return ret;
592

593 594 595 596 597 598
		percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
	}
#endif
	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
			    PAGE_SIZE << order);
	return 0;
599 600 601 602 603
}

static __always_inline void uncharge_slab_page(struct page *page, int order,
					       struct kmem_cache *s)
{
604 605 606 607
#ifdef CONFIG_MEMCG_KMEM
	if (memcg_kmem_enabled() && !is_root_cache(s)) {
		memcg_free_page_obj_cgroups(page);
		percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
608
	}
609 610 611
#endif
	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
			    -(PAGE_SIZE << order));
612 613
}

614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
	struct kmem_cache *cachep;

	if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
	    !memcg_kmem_enabled() &&
	    !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
		return s;

	cachep = virt_to_cache(x);
	if (WARN(cachep && !slab_equal_or_root(cachep, s),
		  "%s: Wrong slab cache. %s but object is from %s\n",
		  __func__, s->name, cachep->name))
		print_tracking(cachep, x);
	return cachep;
}

631 632 633 634 635 636 637 638 639 640 641 642 643 644
static inline size_t slab_ksize(const struct kmem_cache *s)
{
#ifndef CONFIG_SLUB
	return s->object_size;

#else /* CONFIG_SLUB */
# ifdef CONFIG_SLUB_DEBUG
	/*
	 * Debugging requires use of the padding between object
	 * and whatever may come after it.
	 */
	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
		return s->object_size;
# endif
645 646
	if (s->flags & SLAB_KASAN)
		return s->object_size;
647 648 649 650 651
	/*
	 * If we have the need to store the freelist pointer
	 * back there or track user information then we can
	 * only use the space before that information.
	 */
652
	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
653 654 655 656 657 658 659 660 661
		return s->inuse;
	/*
	 * Else we can use all the padding etc for the allocation
	 */
	return s->size;
#endif
}

static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
662 663
						     struct obj_cgroup **objcgp,
						     size_t size, gfp_t flags)
664 665
{
	flags &= gfp_allowed_mask;
666 667 668 669

	fs_reclaim_acquire(flags);
	fs_reclaim_release(flags);

670 671
	might_sleep_if(gfpflags_allow_blocking(flags));

672
	if (should_failslab(s, flags))
673 674
		return NULL;

675 676
	if (memcg_kmem_enabled() &&
	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
677
		return memcg_slab_pre_alloc_hook(s, objcgp, size, flags);
678 679

	return s;
680 681
}

682 683 684
static inline void slab_post_alloc_hook(struct kmem_cache *s,
					struct obj_cgroup *objcg,
					gfp_t flags, size_t size, void **p)
685 686 687 688 689
{
	size_t i;

	flags &= gfp_allowed_mask;
	for (i = 0; i < size; i++) {
690
		p[i] = kasan_slab_alloc(s, p[i], flags);
691
		/* As p[i] might get tagged, call kmemleak hook after KASAN. */
692
		kmemleak_alloc_recursive(p[i], s->object_size, 1,
693 694
					 s->flags, flags);
	}
695

696 697
	if (memcg_kmem_enabled() && !is_root_cache(s))
		memcg_slab_post_alloc_hook(s, objcg, size, p);
698 699
}

700
#ifndef CONFIG_SLOB
701 702 703 704 705 706 707 708 709 710
/*
 * The slab lists for all objects.
 */
struct kmem_cache_node {
	spinlock_t list_lock;

#ifdef CONFIG_SLAB
	struct list_head slabs_partial;	/* partial list first, better asm code */
	struct list_head slabs_full;
	struct list_head slabs_free;
711 712
	unsigned long total_slabs;	/* length of all slab lists */
	unsigned long free_slabs;	/* length of free slab list only */
713 714 715 716
	unsigned long free_objects;
	unsigned int free_limit;
	unsigned int colour_next;	/* Per-node cache coloring */
	struct array_cache *shared;	/* shared per node */
J
Joonsoo Kim 已提交
717
	struct alien_cache **alien;	/* on other nodes */
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
	unsigned long next_reap;	/* updated without locking */
	int free_touched;		/* updated without locking */
#endif

#ifdef CONFIG_SLUB
	unsigned long nr_partial;
	struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
	atomic_long_t nr_slabs;
	atomic_long_t total_objects;
	struct list_head full;
#endif
#endif

};
733

734 735 736 737 738 739 740 741 742 743
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
	return s->node[node];
}

/*
 * Iterator over all nodes. The body will be executed for each node that has
 * a kmem_cache_node structure allocated (which is true for all online nodes)
 */
#define for_each_kmem_cache_node(__s, __node, __n) \
744 745
	for (__node = 0; __node < nr_node_ids; __node++) \
		 if ((__n = get_node(__s, __node)))
746 747 748

#endif

749
void *slab_start(struct seq_file *m, loff_t *pos);
750 751
void *slab_next(struct seq_file *m, void *p, loff_t *pos);
void slab_stop(struct seq_file *m, void *p);
752 753 754
void *memcg_slab_start(struct seq_file *m, loff_t *pos);
void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
void memcg_slab_stop(struct seq_file *m, void *p);
755
int memcg_slab_show(struct seq_file *m, void *p);
756

757 758 759 760 761 762 763 764
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
void dump_unreclaimable_slab(void);
#else
static inline void dump_unreclaimable_slab(void)
{
}
#endif

765 766
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);

767 768 769 770 771 772 773 774 775 776 777 778 779
#ifdef CONFIG_SLAB_FREELIST_RANDOM
int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
			gfp_t gfp);
void cache_random_seq_destroy(struct kmem_cache *cachep);
#else
static inline int cache_random_seq_create(struct kmem_cache *cachep,
					unsigned int count, gfp_t gfp)
{
	return 0;
}
static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
#endif /* CONFIG_SLAB_FREELIST_RANDOM */

780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
{
	if (static_branch_unlikely(&init_on_alloc)) {
		if (c->ctor)
			return false;
		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
			return flags & __GFP_ZERO;
		return true;
	}
	return flags & __GFP_ZERO;
}

static inline bool slab_want_init_on_free(struct kmem_cache *c)
{
	if (static_branch_unlikely(&init_on_free))
		return !(c->ctor ||
			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
	return false;
}

800
#endif /* MM_SLAB_H */