percpu.c 66.8 KB
Newer Older
1
/*
2
 * mm/percpu.c - percpu memory allocator
3 4 5 6 7 8 9
 *
 * Copyright (C) 2009		SUSE Linux Products GmbH
 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
 *
 * This file is released under the GPLv2.
 *
 * This is percpu allocator which can handle both static and dynamic
10 11 12
 * areas.  Percpu areas are allocated in chunks.  Each chunk is
 * consisted of boot-time determined number of units and the first
 * chunk is used for static percpu variables in the kernel image
13 14 15
 * (special boot time alloc/init handling necessary as these areas
 * need to be brought up before allocation services are running).
 * Unit grows as necessary and all units grow or shrink in unison.
16
 * When a chunk is filled up, another chunk is allocated.
17 18 19 20 21 22 23 24
 *
 *  c0                           c1                         c2
 *  -------------------          -------------------        ------------
 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
 *  -------------------  ......  -------------------  ....  ------------
 *
 * Allocation is done in offset-size areas of single unit space.  Ie,
 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 26 27 28
 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
 * Percpu access can be done by configuring percpu base registers
 * according to cpu to unit mapping and pcpu_unit_size.
29
 *
30 31
 * There are usually many small percpu allocations many of them being
 * as small as 4 bytes.  The allocator organizes chunks into lists
32 33
 * according to free size and tries to allocate from the fullest one.
 * Each chunk keeps the maximum contiguous area size hint which is
34
 * guaranteed to be equal to or larger than the maximum contiguous
35 36 37 38 39 40 41 42
 * area in the chunk.  This helps the allocator not to iterate the
 * chunk maps unnecessarily.
 *
 * Allocation state in each chunk is kept using an array of integers
 * on chunk->map.  A positive value in the map represents a free
 * region and negative allocated.  Allocation inside a chunk is done
 * by scanning this map sequentially and serving the first matching
 * entry.  This is mostly copied from the percpu_modalloc() allocator.
43 44
 * Chunks can be determined from the address using the index field
 * in the page struct. The index field contains a pointer to the chunk.
45
 *
46
 * To use this allocator, arch code should do the following:
47 48
 *
 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 50
 *   regular address to percpu pointer and back if they need to be
 *   different from the default
51
 *
52 53
 * - use pcpu_setup_first_chunk() during percpu area initialization to
 *   setup the first chunk containing the kernel static percpu area
54 55
 */

56 57
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

58 59
#include <linux/bitmap.h>
#include <linux/bootmem.h>
60
#include <linux/err.h>
61
#include <linux/list.h>
62
#include <linux/log2.h>
63 64 65 66 67 68
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/pfn.h>
#include <linux/slab.h>
69
#include <linux/spinlock.h>
70
#include <linux/vmalloc.h>
71
#include <linux/workqueue.h>
72
#include <linux/kmemleak.h>
73 74

#include <asm/cacheflush.h>
75
#include <asm/sections.h>
76
#include <asm/tlbflush.h>
77
#include <asm/io.h>
78

79 80 81
#define CREATE_TRACE_POINTS
#include <trace/events/percpu.h>

82 83
#include "percpu-internal.h"

84 85
#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
86 87
#define PCPU_ATOMIC_MAP_MARGIN_LOW	32
#define PCPU_ATOMIC_MAP_MARGIN_HIGH	64
88 89
#define PCPU_EMPTY_POP_PAGES_LOW	2
#define PCPU_EMPTY_POP_PAGES_HIGH	4
90

91
#ifdef CONFIG_SMP
92 93 94
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
#ifndef __addr_to_pcpu_ptr
#define __addr_to_pcpu_ptr(addr)					\
95 96 97
	(void __percpu *)((unsigned long)(addr) -			\
			  (unsigned long)pcpu_base_addr	+		\
			  (unsigned long)__per_cpu_start)
98 99 100
#endif
#ifndef __pcpu_ptr_to_addr
#define __pcpu_ptr_to_addr(ptr)						\
101 102 103
	(void __force *)((unsigned long)(ptr) +				\
			 (unsigned long)pcpu_base_addr -		\
			 (unsigned long)__per_cpu_start)
104
#endif
105 106 107 108 109
#else	/* CONFIG_SMP */
/* on UP, it's always identity mapped */
#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
#endif	/* CONFIG_SMP */
110

111 112 113 114
static int pcpu_unit_pages __ro_after_init;
static int pcpu_unit_size __ro_after_init;
static int pcpu_nr_units __ro_after_init;
static int pcpu_atom_size __ro_after_init;
115
int pcpu_nr_slots __ro_after_init;
116
static size_t pcpu_chunk_struct_size __ro_after_init;
117

T
Tejun Heo 已提交
118
/* cpus with the lowest and highest unit addresses */
119 120
static unsigned int pcpu_low_unit_cpu __ro_after_init;
static unsigned int pcpu_high_unit_cpu __ro_after_init;
121

122
/* the address of the first chunk which starts with the kernel static area */
123
void *pcpu_base_addr __ro_after_init;
124 125
EXPORT_SYMBOL_GPL(pcpu_base_addr);

126 127
static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
128

129
/* group information, used for vm allocation */
130 131 132
static int pcpu_nr_groups __ro_after_init;
static const unsigned long *pcpu_group_offsets __ro_after_init;
static const size_t *pcpu_group_sizes __ro_after_init;
133

134 135 136 137 138
/*
 * The first chunk which always exists.  Note that unlike other
 * chunks, this one can be allocated and mapped in several different
 * ways and thus often doesn't live in the vmalloc area.
 */
139
struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
140 141 142 143 144 145 146 147

/*
 * Optional reserved chunk.  This chunk reserves part of the first
 * chunk and serves it for reserved allocations.  The amount of
 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
 * area doesn't exist, the following variables contain NULL and 0
 * respectively.
 */
148
struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
149
static int pcpu_reserved_chunk_limit __ro_after_init;
150

151
DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
152
static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
153

154
struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
155

156 157 158
/* chunks which need their map areas extended, protected by pcpu_lock */
static LIST_HEAD(pcpu_map_extend_chunks);

159 160 161 162 163 164
/*
 * The number of empty populated pages, protected by pcpu_lock.  The
 * reserved chunk doesn't contribute to the count.
 */
static int pcpu_nr_empty_pop_pages;

165 166 167 168 169 170
/*
 * Balance work is used to populate or destroy chunks asynchronously.  We
 * try to keep the number of populated free pages between
 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
 * empty chunk.
 */
171 172
static void pcpu_balance_workfn(struct work_struct *work);
static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
173 174 175 176 177 178 179 180
static bool pcpu_async_enabled __read_mostly;
static bool pcpu_atomic_alloc_failed;

static void pcpu_schedule_balance_work(void)
{
	if (pcpu_async_enabled)
		schedule_work(&pcpu_balance_work);
}
181

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
static bool pcpu_addr_in_first_chunk(void *addr)
{
	void *first_start = pcpu_first_chunk->base_addr;

	return addr >= first_start && addr < first_start + pcpu_unit_size;
}

static bool pcpu_addr_in_reserved_chunk(void *addr)
{
	void *first_start = pcpu_first_chunk->base_addr;

	return addr >= first_start &&
		addr < first_start + pcpu_reserved_chunk_limit;
}

197
static int __pcpu_size_to_slot(int size)
198
{
T
Tejun Heo 已提交
199
	int highbit = fls(size);	/* size is in bytes */
200 201 202
	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
}

203 204 205 206 207 208 209
static int pcpu_size_to_slot(int size)
{
	if (size == pcpu_unit_size)
		return pcpu_nr_slots - 1;
	return __pcpu_size_to_slot(size);
}

210 211 212 213 214 215 216 217
static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
{
	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
		return 0;

	return pcpu_size_to_slot(chunk->free_size);
}

218 219 220 221 222 223 224 225 226 227 228 229 230
/* set the pointer to a chunk in a page struct */
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
{
	page->index = (unsigned long)pcpu;
}

/* obtain pointer to a chunk from a page struct */
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
{
	return (struct pcpu_chunk *)page->index;
}

static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
231
{
232
	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
233 234
}

235 236
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
				     unsigned int cpu, int page_idx)
237
{
T
Tejun Heo 已提交
238
	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
T
Tejun Heo 已提交
239
		(page_idx << PAGE_SHIFT);
240 241
}

242 243
static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
					   int *rs, int *re, int end)
T
Tejun Heo 已提交
244 245 246 247 248
{
	*rs = find_next_zero_bit(chunk->populated, end, *rs);
	*re = find_next_bit(chunk->populated, end, *rs + 1);
}

249 250
static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
					 int *rs, int *re, int end)
T
Tejun Heo 已提交
251 252 253 254 255 256 257
{
	*rs = find_next_bit(chunk->populated, end, *rs);
	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
}

/*
 * (Un)populated page region iterators.  Iterate over (un)populated
258
 * page regions between @start and @end in @chunk.  @rs and @re should
T
Tejun Heo 已提交
259 260 261 262 263 264 265 266 267 268 269 270 271
 * be integer variables and will be set to start and end page index of
 * the current region.
 */
#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))

#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))

272
/**
273
 * pcpu_mem_zalloc - allocate memory
274
 * @size: bytes to allocate
275
 *
276
 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
277
 * kzalloc() is used; otherwise, vzalloc() is used.  The returned
278
 * memory is always zeroed.
279
 *
280 281 282
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
283
 * RETURNS:
284
 * Pointer to the allocated area on success, NULL on failure.
285
 */
286
static void *pcpu_mem_zalloc(size_t size)
287
{
288 289 290
	if (WARN_ON_ONCE(!slab_is_available()))
		return NULL;

291 292
	if (size <= PAGE_SIZE)
		return kzalloc(size, GFP_KERNEL);
293 294
	else
		return vzalloc(size);
295
}
296

297 298 299 300
/**
 * pcpu_mem_free - free memory
 * @ptr: memory to free
 *
301
 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
302
 */
303
static void pcpu_mem_free(void *ptr)
304
{
305
	kvfree(ptr);
306 307
}

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
/**
 * pcpu_count_occupied_pages - count the number of pages an area occupies
 * @chunk: chunk of interest
 * @i: index of the area in question
 *
 * Count the number of pages chunk's @i'th area occupies.  When the area's
 * start and/or end address isn't aligned to page boundary, the straddled
 * page is included in the count iff the rest of the page is free.
 */
static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
{
	int off = chunk->map[i] & ~1;
	int end = chunk->map[i + 1] & ~1;

	if (!PAGE_ALIGNED(off) && i > 0) {
		int prev = chunk->map[i - 1];

		if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
			off = round_down(off, PAGE_SIZE);
	}

	if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
		int next = chunk->map[i + 1];
		int nend = chunk->map[i + 2] & ~1;

		if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
			end = round_up(end, PAGE_SIZE);
	}

	return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
}

340 341 342 343 344 345 346
/**
 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 * @chunk: chunk of interest
 * @oslot: the previous slot it was on
 *
 * This function is called after an allocation or free changed @chunk.
 * New slot according to the changed state is determined and @chunk is
347 348
 * moved to the slot.  Note that the reserved chunk is never put on
 * chunk slots.
349 350 351
 *
 * CONTEXT:
 * pcpu_lock.
352 353 354 355 356
 */
static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
{
	int nslot = pcpu_chunk_slot(chunk);

357
	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
358 359 360 361 362 363 364
		if (oslot < nslot)
			list_move(&chunk->list, &pcpu_slot[nslot]);
		else
			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
	}
}

365
/**
366 367
 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 * @chunk: chunk of interest
368
 * @is_atomic: the allocation context
369
 *
370 371 372 373 374 375
 * Determine whether area map of @chunk needs to be extended.  If
 * @is_atomic, only the amount necessary for a new allocation is
 * considered; however, async extension is scheduled if the left amount is
 * low.  If !@is_atomic, it aims for more empty space.  Combined, this
 * ensures that the map is likely to have enough available space to
 * accomodate atomic allocations which can't extend maps directly.
376
 *
377
 * CONTEXT:
378
 * pcpu_lock.
379
 *
380
 * RETURNS:
381 382
 * New target map allocation length if extension is necessary, 0
 * otherwise.
383
 */
384
static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
385
{
386 387
	int margin, new_alloc;

388 389
	lockdep_assert_held(&pcpu_lock);

390 391
	if (is_atomic) {
		margin = 3;
392

393
		if (chunk->map_alloc <
394 395 396 397 398 399 400
		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
			if (list_empty(&chunk->map_extend_list)) {
				list_add_tail(&chunk->map_extend_list,
					      &pcpu_map_extend_chunks);
				pcpu_schedule_balance_work();
			}
		}
401 402 403 404 405
	} else {
		margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
	}

	if (chunk->map_alloc >= chunk->map_used + margin)
406 407 408
		return 0;

	new_alloc = PCPU_DFL_MAP_ALLOC;
409
	while (new_alloc < chunk->map_used + margin)
410 411
		new_alloc *= 2;

412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	return new_alloc;
}

/**
 * pcpu_extend_area_map - extend area map of a chunk
 * @chunk: chunk of interest
 * @new_alloc: new target allocation length of the area map
 *
 * Extend area map of @chunk to have @new_alloc entries.
 *
 * CONTEXT:
 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
{
	int *old = NULL, *new = NULL;
	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
	unsigned long flags;

434 435
	lockdep_assert_held(&pcpu_alloc_mutex);

436
	new = pcpu_mem_zalloc(new_size);
437
	if (!new)
438
		return -ENOMEM;
439

440 441 442 443 444
	/* acquire pcpu_lock and switch to new area map */
	spin_lock_irqsave(&pcpu_lock, flags);

	if (new_alloc <= chunk->map_alloc)
		goto out_unlock;
445

446
	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
447 448 449
	old = chunk->map;

	memcpy(new, old, old_size);
450 451 452

	chunk->map_alloc = new_alloc;
	chunk->map = new;
453 454 455 456 457 458 459 460 461
	new = NULL;

out_unlock:
	spin_unlock_irqrestore(&pcpu_lock, flags);

	/*
	 * pcpu_mem_free() might end up calling vfree() which uses
	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
	 */
462 463
	pcpu_mem_free(old);
	pcpu_mem_free(new);
464

465 466 467
	return 0;
}

468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
/**
 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
 * @chunk: chunk the candidate area belongs to
 * @off: the offset to the start of the candidate area
 * @this_size: the size of the candidate area
 * @size: the size of the target allocation
 * @align: the alignment of the target allocation
 * @pop_only: only allocate from already populated region
 *
 * We're trying to allocate @size bytes aligned at @align.  @chunk's area
 * at @off sized @this_size is a candidate.  This function determines
 * whether the target allocation fits in the candidate area and returns the
 * number of bytes to pad after @off.  If the target area doesn't fit, -1
 * is returned.
 *
 * If @pop_only is %true, this function only considers the already
 * populated part of the candidate area.
 */
static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
			    int size, int align, bool pop_only)
{
	int cand_off = off;

	while (true) {
		int head = ALIGN(cand_off, align) - off;
		int page_start, page_end, rs, re;

		if (this_size < head + size)
			return -1;

		if (!pop_only)
			return head;

		/*
		 * If the first unpopulated page is beyond the end of the
		 * allocation, the whole allocation is populated;
		 * otherwise, retry from the end of the unpopulated area.
		 */
		page_start = PFN_DOWN(head + off);
		page_end = PFN_UP(head + off + size);

		rs = page_start;
		pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
		if (rs >= page_end)
			return head;
		cand_off = re * PAGE_SIZE;
	}
}

517 518 519
/**
 * pcpu_alloc_area - allocate area from a pcpu_chunk
 * @chunk: chunk of interest
T
Tejun Heo 已提交
520
 * @size: wanted size in bytes
521
 * @align: wanted align
522
 * @pop_only: allocate only from the populated area
523
 * @occ_pages_p: out param for the number of pages the area occupies
524 525 526 527 528
 *
 * Try to allocate @size bytes area aligned at @align from @chunk.
 * Note that this function only allocates the offset.  It doesn't
 * populate or map the area.
 *
529 530
 * @chunk->map must have at least two free slots.
 *
531 532 533
 * CONTEXT:
 * pcpu_lock.
 *
534
 * RETURNS:
535 536
 * Allocated offset in @chunk on success, -1 if no matching area is
 * found.
537
 */
538
static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
539
			   bool pop_only, int *occ_pages_p)
540 541 542 543
{
	int oslot = pcpu_chunk_slot(chunk);
	int max_contig = 0;
	int i, off;
A
Al Viro 已提交
544
	bool seen_free = false;
545
	int *p;
546

A
Al Viro 已提交
547
	for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
548
		int head, tail;
549 550 551 552 553
		int this_size;

		off = *p;
		if (off & 1)
			continue;
554

555
		this_size = (p[1] & ~1) - off;
556 557 558 559

		head = pcpu_fit_in_area(chunk, off, this_size, size, align,
					pop_only);
		if (head < 0) {
A
Al Viro 已提交
560 561 562 563
			if (!seen_free) {
				chunk->first_free = i;
				seen_free = true;
			}
564
			max_contig = max(this_size, max_contig);
565 566 567 568 569 570 571 572 573
			continue;
		}

		/*
		 * If head is small or the previous block is free,
		 * merge'em.  Note that 'small' is defined as smaller
		 * than sizeof(int), which is very small but isn't too
		 * uncommon for percpu allocations.
		 */
574
		if (head && (head < sizeof(int) || !(p[-1] & 1))) {
575
			*p = off += head;
576
			if (p[-1] & 1)
577
				chunk->free_size -= head;
578 579
			else
				max_contig = max(*p - p[-1], max_contig);
580
			this_size -= head;
581 582 583 584
			head = 0;
		}

		/* if tail is small, just keep it around */
585 586
		tail = this_size - head - size;
		if (tail < sizeof(int)) {
587
			tail = 0;
588 589
			size = this_size - head;
		}
590 591 592

		/* split if warranted */
		if (head || tail) {
593 594 595
			int nr_extra = !!head + !!tail;

			/* insert new subblocks */
596
			memmove(p + nr_extra + 1, p + 1,
597 598 599
				sizeof(chunk->map[0]) * (chunk->map_used - i));
			chunk->map_used += nr_extra;

600
			if (head) {
A
Al Viro 已提交
601 602 603 604
				if (!seen_free) {
					chunk->first_free = i;
					seen_free = true;
				}
605 606
				*++p = off += head;
				++i;
607 608 609
				max_contig = max(head, max_contig);
			}
			if (tail) {
610
				p[1] = off + size;
611
				max_contig = max(tail, max_contig);
612 613 614
			}
		}

A
Al Viro 已提交
615 616 617
		if (!seen_free)
			chunk->first_free = i + 1;

618
		/* update hint and mark allocated */
619
		if (i + 1 == chunk->map_used)
620 621 622 623 624
			chunk->contig_hint = max_contig; /* fully scanned */
		else
			chunk->contig_hint = max(chunk->contig_hint,
						 max_contig);

625 626
		chunk->free_size -= size;
		*p |= 1;
627

628
		*occ_pages_p = pcpu_count_occupied_pages(chunk, i);
629 630 631 632 633 634 635
		pcpu_chunk_relocate(chunk, oslot);
		return off;
	}

	chunk->contig_hint = max_contig;	/* fully scanned */
	pcpu_chunk_relocate(chunk, oslot);

636 637
	/* tell the upper layer that this chunk has no matching area */
	return -1;
638 639 640 641 642 643
}

/**
 * pcpu_free_area - free area to a pcpu_chunk
 * @chunk: chunk of interest
 * @freeme: offset of area to free
644
 * @occ_pages_p: out param for the number of pages the area occupies
645 646 647 648
 *
 * Free area starting from @freeme to @chunk.  Note that this function
 * only modifies the allocation map.  It doesn't depopulate or unmap
 * the area.
649 650 651
 *
 * CONTEXT:
 * pcpu_lock.
652
 */
653 654
static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
			   int *occ_pages_p)
655 656
{
	int oslot = pcpu_chunk_slot(chunk);
657 658 659 660 661
	int off = 0;
	unsigned i, j;
	int to_free = 0;
	int *p;

662
	lockdep_assert_held(&pcpu_lock);
663
	pcpu_stats_area_dealloc(chunk);
664

665 666 667 668 669 670 671 672 673 674 675 676 677 678
	freeme |= 1;	/* we are searching for <given offset, in use> pair */

	i = 0;
	j = chunk->map_used;
	while (i != j) {
		unsigned k = (i + j) / 2;
		off = chunk->map[k];
		if (off < freeme)
			i = k + 1;
		else if (off > freeme)
			j = k;
		else
			i = j = k;
	}
679 680
	BUG_ON(off != freeme);

A
Al Viro 已提交
681 682 683
	if (i < chunk->first_free)
		chunk->first_free = i;

684 685 686
	p = chunk->map + i;
	*p = off &= ~1;
	chunk->free_size += (p[1] & ~1) - off;
687

688 689
	*occ_pages_p = pcpu_count_occupied_pages(chunk, i);

690 691 692
	/* merge with next? */
	if (!(p[1] & 1))
		to_free++;
693
	/* merge with previous? */
694 695
	if (i > 0 && !(p[-1] & 1)) {
		to_free++;
696
		i--;
697
		p--;
698
	}
699 700 701 702
	if (to_free) {
		chunk->map_used -= to_free;
		memmove(p + 1, p + 1 + to_free,
			(chunk->map_used - i) * sizeof(chunk->map[0]));
703 704
	}

705
	chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
706 707 708
	pcpu_chunk_relocate(chunk, oslot);
}

709 710 711 712
static struct pcpu_chunk *pcpu_alloc_chunk(void)
{
	struct pcpu_chunk *chunk;

713
	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
714 715 716
	if (!chunk)
		return NULL;

717 718
	chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
						sizeof(chunk->map[0]));
719
	if (!chunk->map) {
720
		pcpu_mem_free(chunk);
721 722 723 724
		return NULL;
	}

	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
725 726 727
	chunk->map[0] = 0;
	chunk->map[1] = pcpu_unit_size | 1;
	chunk->map_used = 1;
728
	chunk->has_reserved = false;
729 730

	INIT_LIST_HEAD(&chunk->list);
731
	INIT_LIST_HEAD(&chunk->map_extend_list);
732 733 734 735 736 737 738 739 740 741
	chunk->free_size = pcpu_unit_size;
	chunk->contig_hint = pcpu_unit_size;

	return chunk;
}

static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
	if (!chunk)
		return;
742 743
	pcpu_mem_free(chunk->map);
	pcpu_mem_free(chunk);
744 745
}

746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
/**
 * pcpu_chunk_populated - post-population bookkeeping
 * @chunk: pcpu_chunk which got populated
 * @page_start: the start page
 * @page_end: the end page
 *
 * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
 * the bookkeeping information accordingly.  Must be called after each
 * successful population.
 */
static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
				 int page_start, int page_end)
{
	int nr = page_end - page_start;

	lockdep_assert_held(&pcpu_lock);

	bitmap_set(chunk->populated, page_start, nr);
	chunk->nr_populated += nr;
	pcpu_nr_empty_pop_pages += nr;
}

/**
 * pcpu_chunk_depopulated - post-depopulation bookkeeping
 * @chunk: pcpu_chunk which got depopulated
 * @page_start: the start page
 * @page_end: the end page
 *
 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
 * Update the bookkeeping information accordingly.  Must be called after
 * each successful depopulation.
 */
static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
				   int page_start, int page_end)
{
	int nr = page_end - page_start;

	lockdep_assert_held(&pcpu_lock);

	bitmap_clear(chunk->populated, page_start, nr);
	chunk->nr_populated -= nr;
	pcpu_nr_empty_pop_pages -= nr;
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803
/*
 * Chunk management implementation.
 *
 * To allow different implementations, chunk alloc/free and
 * [de]population are implemented in a separate file which is pulled
 * into this file and compiled together.  The following functions
 * should be implemented.
 *
 * pcpu_populate_chunk		- populate the specified range of a chunk
 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
 * pcpu_create_chunk		- create a new chunk
 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
 * pcpu_addr_to_page		- translate address to physical address
 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
804
 */
805 806 807 808 809 810
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
static struct pcpu_chunk *pcpu_create_chunk(void);
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
static struct page *pcpu_addr_to_page(void *addr);
static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
811

812 813 814
#ifdef CONFIG_NEED_PER_CPU_KM
#include "percpu-km.c"
#else
815
#include "percpu-vm.c"
816
#endif
817

818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
/**
 * pcpu_chunk_addr_search - determine chunk containing specified address
 * @addr: address for which the chunk needs to be determined.
 *
 * RETURNS:
 * The address of the found chunk.
 */
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
{
	/* is it in the first chunk? */
	if (pcpu_addr_in_first_chunk(addr)) {
		/* is it in the reserved area? */
		if (pcpu_addr_in_reserved_chunk(addr))
			return pcpu_reserved_chunk;
		return pcpu_first_chunk;
	}

	/*
	 * The address is relative to unit0 which might be unused and
	 * thus unmapped.  Offset the address to the unit space of the
	 * current processor before looking it up in the vmalloc
	 * space.  Note that any possible cpu id can be used here, so
	 * there's no need to worry about preemption or cpu hotplug.
	 */
	addr += pcpu_unit_offsets[raw_smp_processor_id()];
843
	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
844 845
}

846
/**
847
 * pcpu_alloc - the percpu allocator
T
Tejun Heo 已提交
848
 * @size: size of area to allocate in bytes
849
 * @align: alignment of area (max PAGE_SIZE)
850
 * @reserved: allocate from the reserved chunk if available
851
 * @gfp: allocation flags
852
 *
853 854
 * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
 * contain %GFP_KERNEL, the allocation is atomic.
855 856 857 858
 *
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
859 860
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
				 gfp_t gfp)
861
{
862
	static int warn_limit = 10;
863
	struct pcpu_chunk *chunk;
864
	const char *err;
865
	bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
866
	int occ_pages = 0;
T
Tejun Heo 已提交
867
	int slot, off, new_alloc, cpu, ret;
868
	unsigned long flags;
869
	void __percpu *ptr;
870

871 872
	/*
	 * We want the lowest bit of offset available for in-use/free
V
Viro 已提交
873
	 * indicator, so force >= 16bit alignment and make size even.
874 875 876 877
	 */
	if (unlikely(align < 2))
		align = 2;

878
	size = ALIGN(size, 2);
V
Viro 已提交
879

880 881
	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
		     !is_power_of_2(align))) {
J
Joe Perches 已提交
882 883
		WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
		     size, align);
884 885 886
		return NULL;
	}

887 888 889
	if (!is_atomic)
		mutex_lock(&pcpu_alloc_mutex);

890
	spin_lock_irqsave(&pcpu_lock, flags);
891

892 893 894
	/* serve reserved allocations from the reserved chunk if available */
	if (reserved && pcpu_reserved_chunk) {
		chunk = pcpu_reserved_chunk;
895 896 897

		if (size > chunk->contig_hint) {
			err = "alloc from reserved chunk failed";
898
			goto fail_unlock;
899
		}
900

901
		while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
902
			spin_unlock_irqrestore(&pcpu_lock, flags);
903 904
			if (is_atomic ||
			    pcpu_extend_area_map(chunk, new_alloc) < 0) {
905
				err = "failed to extend area map of reserved chunk";
T
Tejun Heo 已提交
906
				goto fail;
907 908 909 910
			}
			spin_lock_irqsave(&pcpu_lock, flags);
		}

911 912
		off = pcpu_alloc_area(chunk, size, align, is_atomic,
				      &occ_pages);
913 914
		if (off >= 0)
			goto area_found;
915

916
		err = "alloc from reserved chunk failed";
917
		goto fail_unlock;
918 919
	}

920
restart:
921
	/* search through normal chunks */
922 923 924 925
	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
			if (size > chunk->contig_hint)
				continue;
926

927
			new_alloc = pcpu_need_to_extend(chunk, is_atomic);
928
			if (new_alloc) {
929 930
				if (is_atomic)
					continue;
931 932 933 934
				spin_unlock_irqrestore(&pcpu_lock, flags);
				if (pcpu_extend_area_map(chunk,
							 new_alloc) < 0) {
					err = "failed to extend area map";
T
Tejun Heo 已提交
935
					goto fail;
936 937 938 939 940 941 942
				}
				spin_lock_irqsave(&pcpu_lock, flags);
				/*
				 * pcpu_lock has been dropped, need to
				 * restart cpu_slot list walking.
				 */
				goto restart;
943 944
			}

945 946
			off = pcpu_alloc_area(chunk, size, align, is_atomic,
					      &occ_pages);
947 948 949 950 951
			if (off >= 0)
				goto area_found;
		}
	}

952
	spin_unlock_irqrestore(&pcpu_lock, flags);
953

T
Tejun Heo 已提交
954 955 956 957 958
	/*
	 * No space left.  Create a new chunk.  We don't want multiple
	 * tasks to create chunks simultaneously.  Serialize and create iff
	 * there's still no empty chunk after grabbing the mutex.
	 */
959 960 961
	if (is_atomic)
		goto fail;

T
Tejun Heo 已提交
962 963 964 965 966 967 968 969 970 971 972
	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
		chunk = pcpu_create_chunk();
		if (!chunk) {
			err = "failed to allocate new chunk";
			goto fail;
		}

		spin_lock_irqsave(&pcpu_lock, flags);
		pcpu_chunk_relocate(chunk, -1);
	} else {
		spin_lock_irqsave(&pcpu_lock, flags);
973
	}
974 975

	goto restart;
976 977

area_found:
978
	pcpu_stats_area_alloc(chunk, size);
979
	spin_unlock_irqrestore(&pcpu_lock, flags);
980

981
	/* populate if not all pages are already there */
982
	if (!is_atomic) {
983
		int page_start, page_end, rs, re;
984

985 986
		page_start = PFN_DOWN(off);
		page_end = PFN_UP(off + size);
T
Tejun Heo 已提交
987

988 989 990 991 992 993 994
		pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
			WARN_ON(chunk->immutable);

			ret = pcpu_populate_chunk(chunk, rs, re);

			spin_lock_irqsave(&pcpu_lock, flags);
			if (ret) {
995
				pcpu_free_area(chunk, off, &occ_pages);
996 997 998
				err = "failed to populate";
				goto fail_unlock;
			}
999
			pcpu_chunk_populated(chunk, rs, re);
1000
			spin_unlock_irqrestore(&pcpu_lock, flags);
1001
		}
1002

1003 1004
		mutex_unlock(&pcpu_alloc_mutex);
	}
1005

1006 1007
	if (chunk != pcpu_reserved_chunk) {
		spin_lock_irqsave(&pcpu_lock, flags);
1008
		pcpu_nr_empty_pop_pages -= occ_pages;
1009 1010
		spin_unlock_irqrestore(&pcpu_lock, flags);
	}
1011

1012 1013 1014
	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
		pcpu_schedule_balance_work();

1015 1016 1017 1018
	/* clear the areas and return address relative to base address */
	for_each_possible_cpu(cpu)
		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);

1019
	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1020
	kmemleak_alloc_percpu(ptr, size, gfp);
1021 1022 1023 1024

	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
			chunk->base_addr, off, ptr);

1025
	return ptr;
1026 1027

fail_unlock:
1028
	spin_unlock_irqrestore(&pcpu_lock, flags);
T
Tejun Heo 已提交
1029
fail:
1030 1031
	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);

1032
	if (!is_atomic && warn_limit) {
1033
		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
J
Joe Perches 已提交
1034
			size, align, is_atomic, err);
1035 1036
		dump_stack();
		if (!--warn_limit)
1037
			pr_info("limit reached, disable warning\n");
1038
	}
1039 1040 1041 1042
	if (is_atomic) {
		/* see the flag handling in pcpu_blance_workfn() */
		pcpu_atomic_alloc_failed = true;
		pcpu_schedule_balance_work();
1043 1044
	} else {
		mutex_unlock(&pcpu_alloc_mutex);
1045
	}
1046
	return NULL;
1047
}
1048 1049

/**
1050
 * __alloc_percpu_gfp - allocate dynamic percpu area
1051 1052
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
1053
 * @gfp: allocation flags
1054
 *
1055 1056 1057
 * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
 * be called from any context but is a lot more likely to fail.
1058
 *
1059 1060 1061
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
{
	return pcpu_alloc(size, align, false, gfp);
}
EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);

/**
 * __alloc_percpu - allocate dynamic percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
 */
1075
void __percpu *__alloc_percpu(size_t size, size_t align)
1076
{
1077
	return pcpu_alloc(size, align, false, GFP_KERNEL);
1078
}
1079 1080
EXPORT_SYMBOL_GPL(__alloc_percpu);

1081 1082 1083 1084 1085
/**
 * __alloc_reserved_percpu - allocate reserved percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
1086 1087 1088 1089
 * Allocate zero-filled percpu area of @size bytes aligned at @align
 * from reserved percpu area if arch has set it up; otherwise,
 * allocation is served from the same dynamic area.  Might sleep.
 * Might trigger writeouts.
1090
 *
1091 1092 1093
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
1094 1095 1096
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
1097
void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1098
{
1099
	return pcpu_alloc(size, align, true, GFP_KERNEL);
1100 1101
}

1102
/**
1103
 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1104 1105 1106 1107
 * @work: unused
 *
 * Reclaim all fully free chunks except for the first one.
 */
1108
static void pcpu_balance_workfn(struct work_struct *work)
1109
{
1110 1111
	LIST_HEAD(to_free);
	struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1112
	struct pcpu_chunk *chunk, *next;
1113
	int slot, nr_to_pop, ret;
1114

1115 1116 1117 1118
	/*
	 * There's no reason to keep around multiple unused chunks and VM
	 * areas can be scarce.  Destroy all free chunks except for one.
	 */
1119 1120
	mutex_lock(&pcpu_alloc_mutex);
	spin_lock_irq(&pcpu_lock);
1121

1122
	list_for_each_entry_safe(chunk, next, free_head, list) {
1123 1124 1125
		WARN_ON(chunk->immutable);

		/* spare the first one */
1126
		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1127 1128
			continue;

1129
		list_del_init(&chunk->map_extend_list);
1130
		list_move(&chunk->list, &to_free);
1131 1132
	}

1133
	spin_unlock_irq(&pcpu_lock);
1134

1135
	list_for_each_entry_safe(chunk, next, &to_free, list) {
1136
		int rs, re;
1137

1138 1139
		pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
			pcpu_depopulate_chunk(chunk, rs, re);
1140 1141 1142
			spin_lock_irq(&pcpu_lock);
			pcpu_chunk_depopulated(chunk, rs, re);
			spin_unlock_irq(&pcpu_lock);
1143
		}
1144
		pcpu_destroy_chunk(chunk);
1145
	}
T
Tejun Heo 已提交
1146

1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
	/* service chunks which requested async area map extension */
	do {
		int new_alloc = 0;

		spin_lock_irq(&pcpu_lock);

		chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
					struct pcpu_chunk, map_extend_list);
		if (chunk) {
			list_del_init(&chunk->map_extend_list);
			new_alloc = pcpu_need_to_extend(chunk, false);
		}

		spin_unlock_irq(&pcpu_lock);

		if (new_alloc)
			pcpu_extend_area_map(chunk, new_alloc);
	} while (chunk);

1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
	/*
	 * Ensure there are certain number of free populated pages for
	 * atomic allocs.  Fill up from the most packed so that atomic
	 * allocs don't increase fragmentation.  If atomic allocation
	 * failed previously, always populate the maximum amount.  This
	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
	 * failing indefinitely; however, large atomic allocs are not
	 * something we support properly and can be highly unreliable and
	 * inefficient.
	 */
retry_pop:
	if (pcpu_atomic_alloc_failed) {
		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
		/* best effort anyway, don't worry about synchronization */
		pcpu_atomic_alloc_failed = false;
	} else {
		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
				  pcpu_nr_empty_pop_pages,
				  0, PCPU_EMPTY_POP_PAGES_HIGH);
	}

	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
		int nr_unpop = 0, rs, re;

		if (!nr_to_pop)
			break;

		spin_lock_irq(&pcpu_lock);
		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
			nr_unpop = pcpu_unit_pages - chunk->nr_populated;
			if (nr_unpop)
				break;
		}
		spin_unlock_irq(&pcpu_lock);

		if (!nr_unpop)
			continue;

		/* @chunk can't go away while pcpu_alloc_mutex is held */
		pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
			int nr = min(re - rs, nr_to_pop);

			ret = pcpu_populate_chunk(chunk, rs, rs + nr);
			if (!ret) {
				nr_to_pop -= nr;
				spin_lock_irq(&pcpu_lock);
				pcpu_chunk_populated(chunk, rs, rs + nr);
				spin_unlock_irq(&pcpu_lock);
			} else {
				nr_to_pop = 0;
			}

			if (!nr_to_pop)
				break;
		}
	}

	if (nr_to_pop) {
		/* ran out of chunks to populate, create a new one and retry */
		chunk = pcpu_create_chunk();
		if (chunk) {
			spin_lock_irq(&pcpu_lock);
			pcpu_chunk_relocate(chunk, -1);
			spin_unlock_irq(&pcpu_lock);
			goto retry_pop;
		}
	}

T
Tejun Heo 已提交
1234
	mutex_unlock(&pcpu_alloc_mutex);
1235 1236 1237 1238 1239 1240
}

/**
 * free_percpu - free percpu area
 * @ptr: pointer to area to free
 *
1241 1242 1243 1244
 * Free percpu area @ptr.
 *
 * CONTEXT:
 * Can be called from atomic context.
1245
 */
1246
void free_percpu(void __percpu *ptr)
1247
{
1248
	void *addr;
1249
	struct pcpu_chunk *chunk;
1250
	unsigned long flags;
1251
	int off, occ_pages;
1252 1253 1254 1255

	if (!ptr)
		return;

1256 1257
	kmemleak_free_percpu(ptr);

1258 1259
	addr = __pcpu_ptr_to_addr(ptr);

1260
	spin_lock_irqsave(&pcpu_lock, flags);
1261 1262

	chunk = pcpu_chunk_addr_search(addr);
T
Tejun Heo 已提交
1263
	off = addr - chunk->base_addr;
1264

1265 1266 1267 1268
	pcpu_free_area(chunk, off, &occ_pages);

	if (chunk != pcpu_reserved_chunk)
		pcpu_nr_empty_pop_pages += occ_pages;
1269

1270
	/* if there are more than one fully free chunks, wake up grim reaper */
1271 1272 1273
	if (chunk->free_size == pcpu_unit_size) {
		struct pcpu_chunk *pos;

1274
		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1275
			if (pos != chunk) {
1276
				pcpu_schedule_balance_work();
1277 1278 1279 1280
				break;
			}
	}

1281 1282
	trace_percpu_free_percpu(chunk->base_addr, off, ptr);

1283
	spin_unlock_irqrestore(&pcpu_lock, flags);
1284 1285 1286
}
EXPORT_SYMBOL_GPL(free_percpu);

1287
bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
1288
{
1289
#ifdef CONFIG_SMP
1290 1291 1292 1293 1294 1295
	const size_t static_size = __per_cpu_end - __per_cpu_start;
	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
	unsigned int cpu;

	for_each_possible_cpu(cpu) {
		void *start = per_cpu_ptr(base, cpu);
1296
		void *va = (void *)addr;
1297

1298
		if (va >= start && va < start + static_size) {
1299
			if (can_addr) {
1300
				*can_addr = (unsigned long) (va - start);
1301 1302 1303
				*can_addr += (unsigned long)
					per_cpu_ptr(base, get_boot_cpu_id());
			}
1304
			return true;
1305 1306
		}
	}
1307 1308
#endif
	/* on UP, can't distinguish from other static vars, always false */
1309 1310 1311
	return false;
}

1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
/**
 * is_kernel_percpu_address - test whether address is from static percpu area
 * @addr: address to test
 *
 * Test whether @addr belongs to in-kernel static percpu area.  Module
 * static percpu areas are not considered.  For those, use
 * is_module_percpu_address().
 *
 * RETURNS:
 * %true if @addr is from in-kernel static percpu area, %false otherwise.
 */
bool is_kernel_percpu_address(unsigned long addr)
{
	return __is_kernel_percpu_address(addr, NULL);
}

1328 1329 1330 1331 1332 1333 1334 1335 1336
/**
 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
 * @addr: the address to be converted to physical address
 *
 * Given @addr which is dereferenceable address obtained via one of
 * percpu access macros, this function translates it into its physical
 * address.  The caller is responsible for ensuring @addr stays valid
 * until this function finishes.
 *
1337 1338 1339 1340 1341
 * percpu allocator has special setup for the first chunk, which currently
 * supports either embedding in linear address space or vmalloc mapping,
 * and, from the second one, the backing allocator (currently either vm or
 * km) provides translation.
 *
1342
 * The addr can be translated simply without checking if it falls into the
1343 1344 1345 1346 1347
 * first chunk. But the current code reflects better how percpu allocator
 * actually works, and the verification can discover both bugs in percpu
 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
 * code.
 *
1348 1349 1350 1351 1352
 * RETURNS:
 * The physical address for @addr.
 */
phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
1353 1354
	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
	bool in_first_chunk = false;
T
Tejun Heo 已提交
1355
	unsigned long first_low, first_high;
1356 1357 1358
	unsigned int cpu;

	/*
T
Tejun Heo 已提交
1359
	 * The following test on unit_low/high isn't strictly
1360 1361 1362
	 * necessary but will speed up lookups of addresses which
	 * aren't in the first chunk.
	 */
T
Tejun Heo 已提交
1363 1364 1365 1366 1367
	first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
	first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
				     pcpu_unit_pages);
	if ((unsigned long)addr >= first_low &&
	    (unsigned long)addr < first_high) {
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
		for_each_possible_cpu(cpu) {
			void *start = per_cpu_ptr(base, cpu);

			if (addr >= start && addr < start + pcpu_unit_size) {
				in_first_chunk = true;
				break;
			}
		}
	}

	if (in_first_chunk) {
1379
		if (!is_vmalloc_addr(addr))
1380 1381
			return __pa(addr);
		else
1382 1383
			return page_to_phys(vmalloc_to_page(addr)) +
			       offset_in_page(addr);
1384
	} else
1385 1386
		return page_to_phys(pcpu_addr_to_page(addr)) +
		       offset_in_page(addr);
1387 1388
}

1389
/**
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
 * pcpu_alloc_alloc_info - allocate percpu allocation info
 * @nr_groups: the number of groups
 * @nr_units: the number of units
 *
 * Allocate ai which is large enough for @nr_groups groups containing
 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
 * cpu_map array which is long enough for @nr_units and filled with
 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
 * pointer of other groups.
 *
 * RETURNS:
 * Pointer to the allocated pcpu_alloc_info on success, NULL on
 * failure.
 */
struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
						      int nr_units)
{
	struct pcpu_alloc_info *ai;
	size_t base_size, ai_size;
	void *ptr;
	int unit;

	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
			  __alignof__(ai->groups[0].cpu_map[0]));
	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);

1416
	ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
	if (!ptr)
		return NULL;
	ai = ptr;
	ptr += base_size;

	ai->groups[0].cpu_map = ptr;

	for (unit = 0; unit < nr_units; unit++)
		ai->groups[0].cpu_map[unit] = NR_CPUS;

	ai->nr_groups = nr_groups;
	ai->__ai_size = PFN_ALIGN(ai_size);

	return ai;
}

/**
 * pcpu_free_alloc_info - free percpu allocation info
 * @ai: pcpu_alloc_info to free
 *
 * Free @ai which was allocated by pcpu_alloc_alloc_info().
 */
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
{
1441
	memblock_free_early(__pa(ai), ai->__ai_size);
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
}

/**
 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
 * @lvl: loglevel
 * @ai: allocation info to dump
 *
 * Print out information about @ai using loglevel @lvl.
 */
static void pcpu_dump_alloc_info(const char *lvl,
				 const struct pcpu_alloc_info *ai)
1453
{
1454
	int group_width = 1, cpu_width = 1, width;
1455
	char empty_str[] = "--------";
1456 1457 1458 1459 1460 1461 1462
	int alloc = 0, alloc_end = 0;
	int group, v;
	int upa, apl;	/* units per alloc, allocs per line */

	v = ai->nr_groups;
	while (v /= 10)
		group_width++;
1463

1464
	v = num_possible_cpus();
1465
	while (v /= 10)
1466 1467
		cpu_width++;
	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1468

1469 1470 1471
	upa = ai->alloc_size / ai->unit_size;
	width = upa * (cpu_width + 1) + group_width + 3;
	apl = rounddown_pow_of_two(max(60 / width, 1));
1472

1473 1474 1475
	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1476

1477 1478 1479 1480 1481 1482 1483 1484
	for (group = 0; group < ai->nr_groups; group++) {
		const struct pcpu_group_info *gi = &ai->groups[group];
		int unit = 0, unit_end = 0;

		BUG_ON(gi->nr_units % upa);
		for (alloc_end += gi->nr_units / upa;
		     alloc < alloc_end; alloc++) {
			if (!(alloc % apl)) {
1485
				pr_cont("\n");
1486 1487
				printk("%spcpu-alloc: ", lvl);
			}
1488
			pr_cont("[%0*d] ", group_width, group);
1489 1490 1491

			for (unit_end += upa; unit < unit_end; unit++)
				if (gi->cpu_map[unit] != NR_CPUS)
1492 1493
					pr_cont("%0*d ",
						cpu_width, gi->cpu_map[unit]);
1494
				else
1495
					pr_cont("%s ", empty_str);
1496 1497
		}
	}
1498
	pr_cont("\n");
1499 1500
}

1501
/**
1502
 * pcpu_setup_first_chunk - initialize the first percpu chunk
1503
 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1504
 * @base_addr: mapped address
1505 1506 1507
 *
 * Initialize the first percpu chunk which contains the kernel static
 * perpcu area.  This function is to be called from arch percpu area
1508
 * setup path.
1509
 *
1510 1511 1512 1513 1514 1515
 * @ai contains all information necessary to initialize the first
 * chunk and prime the dynamic percpu allocator.
 *
 * @ai->static_size is the size of static percpu area.
 *
 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1516 1517 1518 1519 1520 1521 1522
 * reserve after the static area in the first chunk.  This reserves
 * the first chunk such that it's available only through reserved
 * percpu allocation.  This is primarily used to serve module percpu
 * static areas on architectures where the addressing model has
 * limited offset range for symbol relocations to guarantee module
 * percpu symbols fall inside the relocatable range.
 *
1523 1524 1525
 * @ai->dyn_size determines the number of bytes available for dynamic
 * allocation in the first chunk.  The area between @ai->static_size +
 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1526
 *
1527 1528 1529
 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
 * and equal to or larger than @ai->static_size + @ai->reserved_size +
 * @ai->dyn_size.
1530
 *
1531 1532
 * @ai->atom_size is the allocation atom size and used as alignment
 * for vm areas.
1533
 *
1534 1535 1536 1537 1538 1539 1540 1541 1542
 * @ai->alloc_size is the allocation size and always multiple of
 * @ai->atom_size.  This is larger than @ai->atom_size if
 * @ai->unit_size is larger than @ai->atom_size.
 *
 * @ai->nr_groups and @ai->groups describe virtual memory layout of
 * percpu areas.  Units which should be colocated are put into the
 * same group.  Dynamic VM areas will be allocated according to these
 * groupings.  If @ai->nr_groups is zero, a single group containing
 * all units is assumed.
1543
 *
1544 1545
 * The caller should have mapped the first chunk at @base_addr and
 * copied static data to each unit.
1546
 *
1547 1548 1549 1550 1551 1552 1553
 * If the first chunk ends up with both reserved and dynamic areas, it
 * is served by two chunks - one to serve the core static and reserved
 * areas and the other for the dynamic area.  They share the same vm
 * and page map but uses different area allocation map to stay away
 * from each other.  The latter chunk is circulated in the chunk slots
 * and available for dynamic allocation like any other chunks.
 *
1554
 * RETURNS:
T
Tejun Heo 已提交
1555
 * 0 on success, -errno on failure.
1556
 */
T
Tejun Heo 已提交
1557 1558
int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
				  void *base_addr)
1559
{
1560 1561
	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1562 1563
	size_t dyn_size = ai->dyn_size;
	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1564
	struct pcpu_chunk *schunk, *dchunk = NULL;
1565 1566
	unsigned long *group_offsets;
	size_t *group_sizes;
T
Tejun Heo 已提交
1567
	unsigned long *unit_off;
1568
	unsigned int cpu;
1569 1570
	int *unit_map;
	int group, unit, i;
1571

1572 1573
#define PCPU_SETUP_BUG_ON(cond)	do {					\
	if (unlikely(cond)) {						\
1574 1575
		pr_emerg("failed to initialize, %s\n", #cond);		\
		pr_emerg("cpu_possible_mask=%*pb\n",			\
1576
			 cpumask_pr_args(cpu_possible_mask));		\
1577 1578 1579 1580 1581
		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
		BUG();							\
	}								\
} while (0)

1582
	/* sanity checks */
1583
	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1584
#ifdef CONFIG_SMP
1585
	PCPU_SETUP_BUG_ON(!ai->static_size);
1586
	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
1587
#endif
1588
	PCPU_SETUP_BUG_ON(!base_addr);
1589
	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
1590
	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1591
	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
1592
	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1593
	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1594
	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1595

1596
	/* process group information and build config tables accordingly */
1597 1598 1599 1600 1601 1602
	group_offsets = memblock_virt_alloc(ai->nr_groups *
					     sizeof(group_offsets[0]), 0);
	group_sizes = memblock_virt_alloc(ai->nr_groups *
					   sizeof(group_sizes[0]), 0);
	unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
	unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1603

1604
	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1605
		unit_map[cpu] = UINT_MAX;
T
Tejun Heo 已提交
1606 1607 1608

	pcpu_low_unit_cpu = NR_CPUS;
	pcpu_high_unit_cpu = NR_CPUS;
1609

1610 1611
	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
		const struct pcpu_group_info *gi = &ai->groups[group];
1612

1613 1614 1615
		group_offsets[group] = gi->base_offset;
		group_sizes[group] = gi->nr_units * ai->unit_size;

1616 1617 1618 1619
		for (i = 0; i < gi->nr_units; i++) {
			cpu = gi->cpu_map[i];
			if (cpu == NR_CPUS)
				continue;
1620

D
Dan Carpenter 已提交
1621
			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
1622 1623
			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1624

1625
			unit_map[cpu] = unit + i;
T
Tejun Heo 已提交
1626 1627
			unit_off[cpu] = gi->base_offset + i * ai->unit_size;

T
Tejun Heo 已提交
1628 1629 1630 1631 1632 1633 1634
			/* determine low/high unit_cpu */
			if (pcpu_low_unit_cpu == NR_CPUS ||
			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
				pcpu_low_unit_cpu = cpu;
			if (pcpu_high_unit_cpu == NR_CPUS ||
			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
				pcpu_high_unit_cpu = cpu;
1635
		}
1636
	}
1637 1638 1639
	pcpu_nr_units = unit;

	for_each_possible_cpu(cpu)
1640 1641 1642 1643
		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);

	/* we're done parsing the input, undefine BUG macro and dump config */
#undef PCPU_SETUP_BUG_ON
1644
	pcpu_dump_alloc_info(KERN_DEBUG, ai);
1645

1646 1647 1648
	pcpu_nr_groups = ai->nr_groups;
	pcpu_group_offsets = group_offsets;
	pcpu_group_sizes = group_sizes;
1649
	pcpu_unit_map = unit_map;
T
Tejun Heo 已提交
1650
	pcpu_unit_offsets = unit_off;
1651 1652

	/* determine basic parameters */
1653
	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1654
	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1655
	pcpu_atom_size = ai->atom_size;
T
Tejun Heo 已提交
1656 1657
	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1658

1659 1660
	pcpu_stats_save_ai(ai);

1661 1662 1663 1664 1665
	/*
	 * Allocate chunk slots.  The additional last slot is for
	 * empty chunks.
	 */
	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1666 1667
	pcpu_slot = memblock_virt_alloc(
			pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1668 1669 1670
	for (i = 0; i < pcpu_nr_slots; i++)
		INIT_LIST_HEAD(&pcpu_slot[i]);

1671 1672 1673 1674 1675 1676 1677
	/*
	 * Initialize static chunk.  If reserved_size is zero, the
	 * static chunk covers static area + dynamic allocation area
	 * in the first chunk.  If reserved_size is not zero, it
	 * covers static area + reserved area (mostly used for module
	 * static percpu allocation).
	 */
1678
	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1679
	INIT_LIST_HEAD(&schunk->list);
1680
	INIT_LIST_HEAD(&schunk->map_extend_list);
T
Tejun Heo 已提交
1681
	schunk->base_addr = base_addr;
1682 1683
	schunk->map = smap;
	schunk->map_alloc = ARRAY_SIZE(smap);
1684
	schunk->immutable = true;
T
Tejun Heo 已提交
1685
	bitmap_fill(schunk->populated, pcpu_unit_pages);
1686
	schunk->nr_populated = pcpu_unit_pages;
1687

1688 1689
	if (ai->reserved_size) {
		schunk->free_size = ai->reserved_size;
1690
		pcpu_reserved_chunk = schunk;
1691
		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1692 1693 1694 1695
	} else {
		schunk->free_size = dyn_size;
		dyn_size = 0;			/* dynamic area covered */
	}
1696
	schunk->contig_hint = schunk->free_size;
1697

1698 1699 1700
	schunk->map[0] = 1;
	schunk->map[1] = ai->static_size;
	schunk->map_used = 1;
1701
	if (schunk->free_size)
1702 1703
		schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size;
	schunk->map[schunk->map_used] |= 1;
1704
	schunk->has_reserved = true;
1705

1706 1707
	/* init dynamic chunk if necessary */
	if (dyn_size) {
1708
		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1709
		INIT_LIST_HEAD(&dchunk->list);
1710
		INIT_LIST_HEAD(&dchunk->map_extend_list);
T
Tejun Heo 已提交
1711
		dchunk->base_addr = base_addr;
1712 1713
		dchunk->map = dmap;
		dchunk->map_alloc = ARRAY_SIZE(dmap);
1714
		dchunk->immutable = true;
T
Tejun Heo 已提交
1715
		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1716
		dchunk->nr_populated = pcpu_unit_pages;
1717 1718

		dchunk->contig_hint = dchunk->free_size = dyn_size;
1719 1720 1721 1722
		dchunk->map[0] = 1;
		dchunk->map[1] = pcpu_reserved_chunk_limit;
		dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
		dchunk->map_used = 2;
1723
		dchunk->has_reserved = true;
1724 1725
	}

1726
	/* link the first chunk in */
1727
	pcpu_first_chunk = dchunk ?: schunk;
1728 1729
	pcpu_nr_empty_pop_pages +=
		pcpu_count_occupied_pages(pcpu_first_chunk, 1);
1730
	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1731

1732
	pcpu_stats_chunk_alloc();
1733
	trace_percpu_create_chunk(base_addr);
1734

1735
	/* we're done */
T
Tejun Heo 已提交
1736
	pcpu_base_addr = base_addr;
T
Tejun Heo 已提交
1737
	return 0;
1738
}
1739

1740 1741
#ifdef CONFIG_SMP

1742
const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1743 1744 1745 1746
	[PCPU_FC_AUTO]	= "auto",
	[PCPU_FC_EMBED]	= "embed",
	[PCPU_FC_PAGE]	= "page",
};
1747

1748
enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1749

1750 1751
static int __init percpu_alloc_setup(char *str)
{
1752 1753 1754
	if (!str)
		return -EINVAL;

1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
	if (0)
		/* nada */;
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
	else if (!strcmp(str, "embed"))
		pcpu_chosen_fc = PCPU_FC_EMBED;
#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
	else if (!strcmp(str, "page"))
		pcpu_chosen_fc = PCPU_FC_PAGE;
#endif
	else
1766
		pr_warn("unknown allocator %s specified\n", str);
1767

1768
	return 0;
1769
}
1770
early_param("percpu_alloc", percpu_alloc_setup);
1771

1772 1773 1774 1775 1776
/*
 * pcpu_embed_first_chunk() is used by the generic percpu setup.
 * Build it if needed by the arch config or the generic setup is going
 * to be used.
 */
1777 1778
#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
#define BUILD_EMBED_FIRST_CHUNK
#endif

/* build pcpu_page_first_chunk() iff needed by the arch config */
#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
#define BUILD_PAGE_FIRST_CHUNK
#endif

/* pcpu_build_alloc_info() is used by both embed and page first chunk */
#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
/**
 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
 * @reserved_size: the size of reserved percpu area in bytes
 * @dyn_size: minimum free size for dynamic allocation in bytes
 * @atom_size: allocation atom size
 * @cpu_distance_fn: callback to determine distance between cpus, optional
 *
 * This function determines grouping of units, their mappings to cpus
 * and other parameters considering needed percpu size, allocation
 * atom size and distances between CPUs.
 *
1800
 * Groups are always multiples of atom size and CPUs which are of
1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
 * LOCAL_DISTANCE both ways are grouped together and share space for
 * units in the same group.  The returned configuration is guaranteed
 * to have CPUs on different nodes on different groups and >=75% usage
 * of allocated virtual address space.
 *
 * RETURNS:
 * On success, pointer to the new allocation_info is returned.  On
 * failure, ERR_PTR value is returned.
 */
static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
				size_t reserved_size, size_t dyn_size,
				size_t atom_size,
				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
{
	static int group_map[NR_CPUS] __initdata;
	static int group_cnt[NR_CPUS] __initdata;
	const size_t static_size = __per_cpu_end - __per_cpu_start;
	int nr_groups = 1, nr_units = 0;
	size_t size_sum, min_unit_size, alloc_size;
	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
	int last_allocs, group, unit;
	unsigned int cpu, tcpu;
	struct pcpu_alloc_info *ai;
	unsigned int *cpu_map;

	/* this function may be called multiple times */
	memset(group_map, 0, sizeof(group_map));
	memset(group_cnt, 0, sizeof(group_cnt));

	/* calculate size_sum and ensure dyn_size is enough for early alloc */
	size_sum = PFN_ALIGN(static_size + reserved_size +
			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
	dyn_size = size_sum - static_size - reserved_size;

	/*
	 * Determine min_unit_size, alloc_size and max_upa such that
	 * alloc_size is multiple of atom_size and is the smallest
L
Lucas De Marchi 已提交
1838
	 * which can accommodate 4k aligned segments which are equal to
1839 1840 1841 1842 1843 1844
	 * or larger than min_unit_size.
	 */
	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);

	alloc_size = roundup(min_unit_size, atom_size);
	upa = alloc_size / min_unit_size;
1845
	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876
		upa--;
	max_upa = upa;

	/* group cpus according to their proximity */
	for_each_possible_cpu(cpu) {
		group = 0;
	next_group:
		for_each_possible_cpu(tcpu) {
			if (cpu == tcpu)
				break;
			if (group_map[tcpu] == group && cpu_distance_fn &&
			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
				group++;
				nr_groups = max(nr_groups, group + 1);
				goto next_group;
			}
		}
		group_map[cpu] = group;
		group_cnt[group]++;
	}

	/*
	 * Expand unit size until address space usage goes over 75%
	 * and then as much as possible without using more address
	 * space.
	 */
	last_allocs = INT_MAX;
	for (upa = max_upa; upa; upa--) {
		int allocs = 0, wasted = 0;

1877
		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
			continue;

		for (group = 0; group < nr_groups; group++) {
			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
			allocs += this_allocs;
			wasted += this_allocs * upa - group_cnt[group];
		}

		/*
		 * Don't accept if wastage is over 1/3.  The
		 * greater-than comparison ensures upa==1 always
		 * passes the following check.
		 */
		if (wasted > num_possible_cpus() / 3)
			continue;

		/* and then don't consume more memory */
		if (allocs > last_allocs)
			break;
		last_allocs = allocs;
		best_upa = upa;
	}
	upa = best_upa;

	/* allocate and fill alloc_info */
	for (group = 0; group < nr_groups; group++)
		nr_units += roundup(group_cnt[group], upa);

	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
	if (!ai)
		return ERR_PTR(-ENOMEM);
	cpu_map = ai->groups[0].cpu_map;

	for (group = 0; group < nr_groups; group++) {
		ai->groups[group].cpu_map = cpu_map;
		cpu_map += roundup(group_cnt[group], upa);
	}

	ai->static_size = static_size;
	ai->reserved_size = reserved_size;
	ai->dyn_size = dyn_size;
	ai->unit_size = alloc_size / upa;
	ai->atom_size = atom_size;
	ai->alloc_size = alloc_size;

	for (group = 0, unit = 0; group_cnt[group]; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];

		/*
		 * Initialize base_offset as if all groups are located
		 * back-to-back.  The caller should update this to
		 * reflect actual allocation.
		 */
		gi->base_offset = unit * ai->unit_size;

		for_each_possible_cpu(cpu)
			if (group_map[cpu] == group)
				gi->cpu_map[gi->nr_units++] = cpu;
		gi->nr_units = roundup(gi->nr_units, upa);
		unit += gi->nr_units;
	}
	BUG_ON(unit != nr_units);

	return ai;
}
#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */

#if defined(BUILD_EMBED_FIRST_CHUNK)
1946 1947 1948
/**
 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
 * @reserved_size: the size of reserved percpu area in bytes
1949
 * @dyn_size: minimum free size for dynamic allocation in bytes
1950 1951 1952
 * @atom_size: allocation atom size
 * @cpu_distance_fn: callback to determine distance between cpus, optional
 * @alloc_fn: function to allocate percpu page
L
Lucas De Marchi 已提交
1953
 * @free_fn: function to free percpu page
1954 1955 1956 1957 1958
 *
 * This is a helper to ease setting up embedded first percpu chunk and
 * can be called where pcpu_setup_first_chunk() is expected.
 *
 * If this function is used to setup the first chunk, it is allocated
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
 * by calling @alloc_fn and used as-is without being mapped into
 * vmalloc area.  Allocations are always whole multiples of @atom_size
 * aligned to @atom_size.
 *
 * This enables the first chunk to piggy back on the linear physical
 * mapping which often uses larger page size.  Please note that this
 * can result in very sparse cpu->unit mapping on NUMA machines thus
 * requiring large vmalloc address space.  Don't use this allocator if
 * vmalloc space is not orders of magnitude larger than distances
 * between node memory addresses (ie. 32bit NUMA machines).
1969
 *
1970
 * @dyn_size specifies the minimum dynamic area size.
1971 1972
 *
 * If the needed size is smaller than the minimum or specified unit
1973
 * size, the leftover is returned using @free_fn.
1974 1975
 *
 * RETURNS:
T
Tejun Heo 已提交
1976
 * 0 on success, -errno on failure.
1977
 */
1978
int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1979 1980 1981 1982
				  size_t atom_size,
				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
				  pcpu_fc_alloc_fn_t alloc_fn,
				  pcpu_fc_free_fn_t free_fn)
1983
{
1984 1985
	void *base = (void *)ULONG_MAX;
	void **areas = NULL;
1986
	struct pcpu_alloc_info *ai;
1987 1988
	size_t size_sum, areas_size;
	unsigned long max_distance;
1989
	int group, i, highest_group, rc;
1990

1991 1992
	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
				   cpu_distance_fn);
1993 1994
	if (IS_ERR(ai))
		return PTR_ERR(ai);
1995

1996
	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1997
	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1998

1999
	areas = memblock_virt_alloc_nopanic(areas_size, 0);
2000
	if (!areas) {
T
Tejun Heo 已提交
2001
		rc = -ENOMEM;
2002
		goto out_free;
2003
	}
2004

2005 2006
	/* allocate, copy and determine base address & max_distance */
	highest_group = 0;
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
	for (group = 0; group < ai->nr_groups; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];
		unsigned int cpu = NR_CPUS;
		void *ptr;

		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
			cpu = gi->cpu_map[i];
		BUG_ON(cpu == NR_CPUS);

		/* allocate space for the whole group */
		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
		if (!ptr) {
			rc = -ENOMEM;
			goto out_free_areas;
		}
2022 2023
		/* kmemleak tracks the percpu allocations separately */
		kmemleak_free(ptr);
2024
		areas[group] = ptr;
2025

2026
		base = min(ptr, base);
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041
		if (ptr > areas[highest_group])
			highest_group = group;
	}
	max_distance = areas[highest_group] - base;
	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;

	/* warn if maximum distance is further than 75% of vmalloc space */
	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
				max_distance, VMALLOC_TOTAL);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
		/* and fail if we have fallback */
		rc = -EINVAL;
		goto out_free_areas;
#endif
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
	}

	/*
	 * Copy data and free unused parts.  This should happen after all
	 * allocations are complete; otherwise, we may end up with
	 * overlapping groups.
	 */
	for (group = 0; group < ai->nr_groups; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];
		void *ptr = areas[group];
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062

		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
			if (gi->cpu_map[i] == NR_CPUS) {
				/* unused unit, free whole */
				free_fn(ptr, ai->unit_size);
				continue;
			}
			/* copy and return the unused part */
			memcpy(ptr, __per_cpu_load, ai->static_size);
			free_fn(ptr + size_sum, ai->unit_size - size_sum);
		}
2063
	}
2064

2065
	/* base address is now known, determine group base offsets */
2066
	for (group = 0; group < ai->nr_groups; group++) {
2067
		ai->groups[group].base_offset = areas[group] - base;
2068
	}
2069

2070
	pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2071 2072
		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
		ai->dyn_size, ai->unit_size);
2073

T
Tejun Heo 已提交
2074
	rc = pcpu_setup_first_chunk(ai, base);
2075 2076 2077 2078
	goto out_free;

out_free_areas:
	for (group = 0; group < ai->nr_groups; group++)
2079 2080 2081
		if (areas[group])
			free_fn(areas[group],
				ai->groups[group].nr_units * ai->unit_size);
2082
out_free:
2083
	pcpu_free_alloc_info(ai);
2084
	if (areas)
2085
		memblock_free_early(__pa(areas), areas_size);
T
Tejun Heo 已提交
2086
	return rc;
2087
}
2088
#endif /* BUILD_EMBED_FIRST_CHUNK */
2089

2090
#ifdef BUILD_PAGE_FIRST_CHUNK
2091
/**
2092
 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2093 2094
 * @reserved_size: the size of reserved percpu area in bytes
 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
L
Lucas De Marchi 已提交
2095
 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2096 2097
 * @populate_pte_fn: function to populate pte
 *
2098 2099
 * This is a helper to ease setting up page-remapped first percpu
 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2100 2101 2102 2103 2104
 *
 * This is the basic allocator.  Static percpu area is allocated
 * page-by-page into vmalloc area.
 *
 * RETURNS:
T
Tejun Heo 已提交
2105
 * 0 on success, -errno on failure.
2106
 */
T
Tejun Heo 已提交
2107 2108 2109 2110
int __init pcpu_page_first_chunk(size_t reserved_size,
				 pcpu_fc_alloc_fn_t alloc_fn,
				 pcpu_fc_free_fn_t free_fn,
				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2111
{
2112
	static struct vm_struct vm;
2113
	struct pcpu_alloc_info *ai;
2114
	char psize_str[16];
T
Tejun Heo 已提交
2115
	int unit_pages;
2116
	size_t pages_size;
T
Tejun Heo 已提交
2117
	struct page **pages;
T
Tejun Heo 已提交
2118
	int unit, i, j, rc;
2119 2120
	int upa;
	int nr_g0_units;
2121

2122 2123
	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);

2124
	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2125 2126 2127
	if (IS_ERR(ai))
		return PTR_ERR(ai);
	BUG_ON(ai->nr_groups != 1);
2128 2129 2130 2131 2132 2133
	upa = ai->alloc_size/ai->unit_size;
	nr_g0_units = roundup(num_possible_cpus(), upa);
	if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
		pcpu_free_alloc_info(ai);
		return -EINVAL;
	}
2134 2135

	unit_pages = ai->unit_size >> PAGE_SHIFT;
2136 2137

	/* unaligned allocations can't be freed, round up to page size */
2138 2139
	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
			       sizeof(pages[0]));
2140
	pages = memblock_virt_alloc(pages_size, 0);
2141

2142
	/* allocate pages */
2143
	j = 0;
2144 2145
	for (unit = 0; unit < num_possible_cpus(); unit++) {
		unsigned int cpu = ai->groups[0].cpu_map[unit];
T
Tejun Heo 已提交
2146
		for (i = 0; i < unit_pages; i++) {
2147 2148
			void *ptr;

2149
			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2150
			if (!ptr) {
2151
				pr_warn("failed to allocate %s page for cpu%u\n",
2152
						psize_str, cpu);
2153 2154
				goto enomem;
			}
2155 2156
			/* kmemleak tracks the percpu allocations separately */
			kmemleak_free(ptr);
T
Tejun Heo 已提交
2157
			pages[j++] = virt_to_page(ptr);
2158
		}
2159
	}
2160

2161 2162
	/* allocate vm area, map the pages and copy static data */
	vm.flags = VM_ALLOC;
2163
	vm.size = num_possible_cpus() * ai->unit_size;
2164 2165
	vm_area_register_early(&vm, PAGE_SIZE);

2166
	for (unit = 0; unit < num_possible_cpus(); unit++) {
2167
		unsigned long unit_addr =
2168
			(unsigned long)vm.addr + unit * ai->unit_size;
2169

T
Tejun Heo 已提交
2170
		for (i = 0; i < unit_pages; i++)
2171 2172 2173
			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));

		/* pte already populated, the following shouldn't fail */
T
Tejun Heo 已提交
2174 2175 2176 2177
		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
				      unit_pages);
		if (rc < 0)
			panic("failed to map percpu area, err=%d\n", rc);
2178

2179 2180 2181 2182 2183 2184 2185 2186 2187
		/*
		 * FIXME: Archs with virtual cache should flush local
		 * cache for the linear mapping here - something
		 * equivalent to flush_cache_vmap() on the local cpu.
		 * flush_cache_vmap() can't be used as most supporting
		 * data structures are not set up yet.
		 */

		/* copy static data */
2188
		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2189 2190 2191
	}

	/* we're ready, commit */
2192
	pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2193 2194
		unit_pages, psize_str, vm.addr, ai->static_size,
		ai->reserved_size, ai->dyn_size);
2195

T
Tejun Heo 已提交
2196
	rc = pcpu_setup_first_chunk(ai, vm.addr);
2197 2198 2199 2200
	goto out_free_ar;

enomem:
	while (--j >= 0)
T
Tejun Heo 已提交
2201
		free_fn(page_address(pages[j]), PAGE_SIZE);
T
Tejun Heo 已提交
2202
	rc = -ENOMEM;
2203
out_free_ar:
2204
	memblock_free_early(__pa(pages), pages_size);
2205
	pcpu_free_alloc_info(ai);
T
Tejun Heo 已提交
2206
	return rc;
2207
}
2208
#endif /* BUILD_PAGE_FIRST_CHUNK */
2209

2210
#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
2211
/*
2212
 * Generic SMP percpu area setup.
2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225
 *
 * The embedding helper is used because its behavior closely resembles
 * the original non-dynamic generic percpu area setup.  This is
 * important because many archs have addressing restrictions and might
 * fail if the percpu area is located far away from the previous
 * location.  As an added bonus, in non-NUMA cases, embedding is
 * generally a good idea TLB-wise because percpu area can piggy back
 * on the physical linear memory mapping which uses large page
 * mappings on applicable archs.
 */
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);

2226 2227 2228
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
				       size_t align)
{
2229 2230
	return  memblock_virt_alloc_from_nopanic(
			size, align, __pa(MAX_DMA_ADDRESS));
2231
}
2232

2233 2234
static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
{
2235
	memblock_free_early(__pa(ptr), size);
2236 2237
}

2238 2239 2240 2241
void __init setup_per_cpu_areas(void)
{
	unsigned long delta;
	unsigned int cpu;
T
Tejun Heo 已提交
2242
	int rc;
2243 2244 2245 2246 2247

	/*
	 * Always reserve area for module percpu variables.  That's
	 * what the legacy allocator did.
	 */
T
Tejun Heo 已提交
2248
	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2249 2250
				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
T
Tejun Heo 已提交
2251
	if (rc < 0)
2252
		panic("Failed to initialize percpu areas.");
2253 2254 2255

	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
	for_each_possible_cpu(cpu)
T
Tejun Heo 已提交
2256
		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2257
}
2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277
#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */

#else	/* CONFIG_SMP */

/*
 * UP percpu area setup.
 *
 * UP always uses km-based percpu allocator with identity mapping.
 * Static percpu variables are indistinguishable from the usual static
 * variables and don't require any special preparation.
 */
void __init setup_per_cpu_areas(void)
{
	const size_t unit_size =
		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
					 PERCPU_DYNAMIC_RESERVE));
	struct pcpu_alloc_info *ai;
	void *fc;

	ai = pcpu_alloc_alloc_info(1, 1);
2278 2279 2280
	fc = memblock_virt_alloc_from_nopanic(unit_size,
					      PAGE_SIZE,
					      __pa(MAX_DMA_ADDRESS));
2281 2282
	if (!ai || !fc)
		panic("Failed to allocate memory for percpu areas.");
2283 2284
	/* kmemleak tracks the percpu allocations separately */
	kmemleak_free(fc);
2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297

	ai->dyn_size = unit_size;
	ai->unit_size = unit_size;
	ai->atom_size = unit_size;
	ai->alloc_size = unit_size;
	ai->groups[0].nr_units = 1;
	ai->groups[0].cpu_map[0] = 0;

	if (pcpu_setup_first_chunk(ai, fc) < 0)
		panic("Failed to initialize percpu areas.");
}

#endif	/* CONFIG_SMP */
2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318

/*
 * First and reserved chunks are initialized with temporary allocation
 * map in initdata so that they can be used before slab is online.
 * This function is called after slab is brought up and replaces those
 * with properly allocated maps.
 */
void __init percpu_init_late(void)
{
	struct pcpu_chunk *target_chunks[] =
		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
	struct pcpu_chunk *chunk;
	unsigned long flags;
	int i;

	for (i = 0; (chunk = target_chunks[i]); i++) {
		int *map;
		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);

		BUILD_BUG_ON(size > PAGE_SIZE);

2319
		map = pcpu_mem_zalloc(size);
2320 2321 2322 2323 2324 2325 2326 2327
		BUG_ON(!map);

		spin_lock_irqsave(&pcpu_lock, flags);
		memcpy(map, chunk->map, size);
		chunk->map = map;
		spin_unlock_irqrestore(&pcpu_lock, flags);
	}
}
2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339

/*
 * Percpu allocator is initialized early during boot when neither slab or
 * workqueue is available.  Plug async management until everything is up
 * and running.
 */
static int __init percpu_enable_async(void)
{
	pcpu_async_enabled = true;
	return 0;
}
subsys_initcall(percpu_enable_async);