percpu.c 66.8 KB
Newer Older
1
/*
2
 * mm/percpu.c - percpu memory allocator
3 4 5 6
 *
 * Copyright (C) 2009		SUSE Linux Products GmbH
 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
 *
7
 * This file is released under the GPLv2 license.
8
 *
9 10 11 12
 * The percpu allocator handles both static and dynamic areas.  Percpu
 * areas are allocated in chunks which are divided into units.  There is
 * a 1-to-1 mapping for units to possible cpus.  These units are grouped
 * based on NUMA properties of the machine.
13 14 15 16 17 18
 *
 *  c0                           c1                         c2
 *  -------------------          -------------------        ------------
 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
 *  -------------------  ......  -------------------  ....  ------------
 *
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 * Allocation is done by offsets into a unit's address space.  Ie., an
 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
 * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
 * and even sparse.  Access is handled by configuring percpu base
 * registers according to the cpu to unit mappings and offsetting the
 * base address using pcpu_unit_size.
 *
 * There is special consideration for the first chunk which must handle
 * the static percpu variables in the kernel image as allocation services
 * are not online yet.  In short, the first chunk is structure like so:
 *
 *                  <Static | [Reserved] | Dynamic>
 *
 * The static data is copied from the original section managed by the
 * linker.  The reserved section, if non-zero, primarily manages static
 * percpu variables from kernel modules.  Finally, the dynamic section
 * takes care of normal allocations.
36 37 38 39 40 41
 *
 * Allocation state in each chunk is kept using an array of integers
 * on chunk->map.  A positive value in the map represents a free
 * region and negative allocated.  Allocation inside a chunk is done
 * by scanning this map sequentially and serving the first matching
 * entry.  This is mostly copied from the percpu_modalloc() allocator.
42 43
 * Chunks can be determined from the address using the index field
 * in the page struct. The index field contains a pointer to the chunk.
44
 *
45 46 47 48 49 50
 * These chunks are organized into lists according to free_size and
 * tries to allocate from the fullest chunk first. Each chunk maintains
 * a maximum contiguous area size hint which is guaranteed to be equal
 * to or larger than the maximum contiguous area in the chunk. This
 * helps prevent the allocator from iterating over chunks unnecessarily.
 *
51
 * To use this allocator, arch code should do the following:
52 53
 *
 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
54 55
 *   regular address to percpu pointer and back if they need to be
 *   different from the default
56
 *
57 58
 * - use pcpu_setup_first_chunk() during percpu area initialization to
 *   setup the first chunk containing the kernel static percpu area
59 60
 */

61 62
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

63 64
#include <linux/bitmap.h>
#include <linux/bootmem.h>
65
#include <linux/err.h>
66
#include <linux/list.h>
67
#include <linux/log2.h>
68 69 70 71 72 73
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/pfn.h>
#include <linux/slab.h>
74
#include <linux/spinlock.h>
75
#include <linux/vmalloc.h>
76
#include <linux/workqueue.h>
77
#include <linux/kmemleak.h>
78 79

#include <asm/cacheflush.h>
80
#include <asm/sections.h>
81
#include <asm/tlbflush.h>
82
#include <asm/io.h>
83

84 85 86
#define CREATE_TRACE_POINTS
#include <trace/events/percpu.h>

87 88
#include "percpu-internal.h"

89 90
#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
91 92
#define PCPU_ATOMIC_MAP_MARGIN_LOW	32
#define PCPU_ATOMIC_MAP_MARGIN_HIGH	64
93 94
#define PCPU_EMPTY_POP_PAGES_LOW	2
#define PCPU_EMPTY_POP_PAGES_HIGH	4
95

96
#ifdef CONFIG_SMP
97 98 99
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
#ifndef __addr_to_pcpu_ptr
#define __addr_to_pcpu_ptr(addr)					\
100 101 102
	(void __percpu *)((unsigned long)(addr) -			\
			  (unsigned long)pcpu_base_addr	+		\
			  (unsigned long)__per_cpu_start)
103 104 105
#endif
#ifndef __pcpu_ptr_to_addr
#define __pcpu_ptr_to_addr(ptr)						\
106 107 108
	(void __force *)((unsigned long)(ptr) +				\
			 (unsigned long)pcpu_base_addr -		\
			 (unsigned long)__per_cpu_start)
109
#endif
110 111 112 113 114
#else	/* CONFIG_SMP */
/* on UP, it's always identity mapped */
#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
#endif	/* CONFIG_SMP */
115

116 117 118 119
static int pcpu_unit_pages __ro_after_init;
static int pcpu_unit_size __ro_after_init;
static int pcpu_nr_units __ro_after_init;
static int pcpu_atom_size __ro_after_init;
120
int pcpu_nr_slots __ro_after_init;
121
static size_t pcpu_chunk_struct_size __ro_after_init;
122

T
Tejun Heo 已提交
123
/* cpus with the lowest and highest unit addresses */
124 125
static unsigned int pcpu_low_unit_cpu __ro_after_init;
static unsigned int pcpu_high_unit_cpu __ro_after_init;
126

127
/* the address of the first chunk which starts with the kernel static area */
128
void *pcpu_base_addr __ro_after_init;
129 130
EXPORT_SYMBOL_GPL(pcpu_base_addr);

131 132
static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
133

134
/* group information, used for vm allocation */
135 136 137
static int pcpu_nr_groups __ro_after_init;
static const unsigned long *pcpu_group_offsets __ro_after_init;
static const size_t *pcpu_group_sizes __ro_after_init;
138

139 140 141 142 143
/*
 * The first chunk which always exists.  Note that unlike other
 * chunks, this one can be allocated and mapped in several different
 * ways and thus often doesn't live in the vmalloc area.
 */
144
struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
145 146 147

/*
 * Optional reserved chunk.  This chunk reserves part of the first
148 149
 * chunk and serves it for reserved allocations.  When the reserved
 * region doesn't exist, the following variable is NULL.
150
 */
151
struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
152

153
DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
154
static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
155

156
struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
157

158 159 160
/* chunks which need their map areas extended, protected by pcpu_lock */
static LIST_HEAD(pcpu_map_extend_chunks);

161 162 163 164
/*
 * The number of empty populated pages, protected by pcpu_lock.  The
 * reserved chunk doesn't contribute to the count.
 */
165
int pcpu_nr_empty_pop_pages;
166

167 168 169 170 171 172
/*
 * Balance work is used to populate or destroy chunks asynchronously.  We
 * try to keep the number of populated free pages between
 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
 * empty chunk.
 */
173 174
static void pcpu_balance_workfn(struct work_struct *work);
static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
175 176 177 178 179 180 181 182
static bool pcpu_async_enabled __read_mostly;
static bool pcpu_atomic_alloc_failed;

static void pcpu_schedule_balance_work(void)
{
	if (pcpu_async_enabled)
		schedule_work(&pcpu_balance_work);
}
183

184 185 186 187 188 189 190 191 192 193 194 195
static bool pcpu_addr_in_first_chunk(void *addr)
{
	void *first_start = pcpu_first_chunk->base_addr;

	return addr >= first_start && addr < first_start + pcpu_unit_size;
}

static bool pcpu_addr_in_reserved_chunk(void *addr)
{
	void *first_start = pcpu_first_chunk->base_addr;

	return addr >= first_start &&
196
		addr < first_start + pcpu_first_chunk->start_offset;
197 198
}

199
static int __pcpu_size_to_slot(int size)
200
{
T
Tejun Heo 已提交
201
	int highbit = fls(size);	/* size is in bytes */
202 203 204
	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
}

205 206 207 208 209 210 211
static int pcpu_size_to_slot(int size)
{
	if (size == pcpu_unit_size)
		return pcpu_nr_slots - 1;
	return __pcpu_size_to_slot(size);
}

212 213 214 215 216 217 218 219
static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
{
	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
		return 0;

	return pcpu_size_to_slot(chunk->free_size);
}

220 221 222 223 224 225 226 227 228 229 230 231 232
/* set the pointer to a chunk in a page struct */
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
{
	page->index = (unsigned long)pcpu;
}

/* obtain pointer to a chunk from a page struct */
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
{
	return (struct pcpu_chunk *)page->index;
}

static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
233
{
234
	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
235 236
}

237 238
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
				     unsigned int cpu, int page_idx)
239
{
T
Tejun Heo 已提交
240
	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
T
Tejun Heo 已提交
241
		(page_idx << PAGE_SHIFT);
242 243
}

244 245
static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
					   int *rs, int *re, int end)
T
Tejun Heo 已提交
246 247 248 249 250
{
	*rs = find_next_zero_bit(chunk->populated, end, *rs);
	*re = find_next_bit(chunk->populated, end, *rs + 1);
}

251 252
static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
					 int *rs, int *re, int end)
T
Tejun Heo 已提交
253 254 255 256 257 258 259
{
	*rs = find_next_bit(chunk->populated, end, *rs);
	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
}

/*
 * (Un)populated page region iterators.  Iterate over (un)populated
260
 * page regions between @start and @end in @chunk.  @rs and @re should
T
Tejun Heo 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273
 * be integer variables and will be set to start and end page index of
 * the current region.
 */
#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))

#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))

274
/**
275
 * pcpu_mem_zalloc - allocate memory
276
 * @size: bytes to allocate
277
 *
278
 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
279
 * kzalloc() is used; otherwise, vzalloc() is used.  The returned
280
 * memory is always zeroed.
281
 *
282 283 284
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
285
 * RETURNS:
286
 * Pointer to the allocated area on success, NULL on failure.
287
 */
288
static void *pcpu_mem_zalloc(size_t size)
289
{
290 291 292
	if (WARN_ON_ONCE(!slab_is_available()))
		return NULL;

293 294
	if (size <= PAGE_SIZE)
		return kzalloc(size, GFP_KERNEL);
295 296
	else
		return vzalloc(size);
297
}
298

299 300 301 302
/**
 * pcpu_mem_free - free memory
 * @ptr: memory to free
 *
303
 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
304
 */
305
static void pcpu_mem_free(void *ptr)
306
{
307
	kvfree(ptr);
308 309
}

310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
/**
 * pcpu_count_occupied_pages - count the number of pages an area occupies
 * @chunk: chunk of interest
 * @i: index of the area in question
 *
 * Count the number of pages chunk's @i'th area occupies.  When the area's
 * start and/or end address isn't aligned to page boundary, the straddled
 * page is included in the count iff the rest of the page is free.
 */
static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
{
	int off = chunk->map[i] & ~1;
	int end = chunk->map[i + 1] & ~1;

	if (!PAGE_ALIGNED(off) && i > 0) {
		int prev = chunk->map[i - 1];

		if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
			off = round_down(off, PAGE_SIZE);
	}

	if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
		int next = chunk->map[i + 1];
		int nend = chunk->map[i + 2] & ~1;

		if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
			end = round_up(end, PAGE_SIZE);
	}

	return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
}

342 343 344 345 346 347 348
/**
 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 * @chunk: chunk of interest
 * @oslot: the previous slot it was on
 *
 * This function is called after an allocation or free changed @chunk.
 * New slot according to the changed state is determined and @chunk is
349 350
 * moved to the slot.  Note that the reserved chunk is never put on
 * chunk slots.
351 352 353
 *
 * CONTEXT:
 * pcpu_lock.
354 355 356 357 358
 */
static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
{
	int nslot = pcpu_chunk_slot(chunk);

359
	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
360 361 362 363 364 365 366
		if (oslot < nslot)
			list_move(&chunk->list, &pcpu_slot[nslot]);
		else
			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
	}
}

367
/**
368 369
 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 * @chunk: chunk of interest
370
 * @is_atomic: the allocation context
371
 *
372 373 374 375 376 377
 * Determine whether area map of @chunk needs to be extended.  If
 * @is_atomic, only the amount necessary for a new allocation is
 * considered; however, async extension is scheduled if the left amount is
 * low.  If !@is_atomic, it aims for more empty space.  Combined, this
 * ensures that the map is likely to have enough available space to
 * accomodate atomic allocations which can't extend maps directly.
378
 *
379
 * CONTEXT:
380
 * pcpu_lock.
381
 *
382
 * RETURNS:
383 384
 * New target map allocation length if extension is necessary, 0
 * otherwise.
385
 */
386
static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
387
{
388 389
	int margin, new_alloc;

390 391
	lockdep_assert_held(&pcpu_lock);

392 393
	if (is_atomic) {
		margin = 3;
394

395
		if (chunk->map_alloc <
396 397 398 399 400 401 402
		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
			if (list_empty(&chunk->map_extend_list)) {
				list_add_tail(&chunk->map_extend_list,
					      &pcpu_map_extend_chunks);
				pcpu_schedule_balance_work();
			}
		}
403 404 405 406 407
	} else {
		margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
	}

	if (chunk->map_alloc >= chunk->map_used + margin)
408 409 410
		return 0;

	new_alloc = PCPU_DFL_MAP_ALLOC;
411
	while (new_alloc < chunk->map_used + margin)
412 413
		new_alloc *= 2;

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
	return new_alloc;
}

/**
 * pcpu_extend_area_map - extend area map of a chunk
 * @chunk: chunk of interest
 * @new_alloc: new target allocation length of the area map
 *
 * Extend area map of @chunk to have @new_alloc entries.
 *
 * CONTEXT:
 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
{
	int *old = NULL, *new = NULL;
	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
	unsigned long flags;

436 437
	lockdep_assert_held(&pcpu_alloc_mutex);

438
	new = pcpu_mem_zalloc(new_size);
439
	if (!new)
440
		return -ENOMEM;
441

442 443 444 445 446
	/* acquire pcpu_lock and switch to new area map */
	spin_lock_irqsave(&pcpu_lock, flags);

	if (new_alloc <= chunk->map_alloc)
		goto out_unlock;
447

448
	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
449 450 451
	old = chunk->map;

	memcpy(new, old, old_size);
452 453 454

	chunk->map_alloc = new_alloc;
	chunk->map = new;
455 456 457 458 459 460 461 462 463
	new = NULL;

out_unlock:
	spin_unlock_irqrestore(&pcpu_lock, flags);

	/*
	 * pcpu_mem_free() might end up calling vfree() which uses
	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
	 */
464 465
	pcpu_mem_free(old);
	pcpu_mem_free(new);
466

467 468 469
	return 0;
}

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
/**
 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
 * @chunk: chunk the candidate area belongs to
 * @off: the offset to the start of the candidate area
 * @this_size: the size of the candidate area
 * @size: the size of the target allocation
 * @align: the alignment of the target allocation
 * @pop_only: only allocate from already populated region
 *
 * We're trying to allocate @size bytes aligned at @align.  @chunk's area
 * at @off sized @this_size is a candidate.  This function determines
 * whether the target allocation fits in the candidate area and returns the
 * number of bytes to pad after @off.  If the target area doesn't fit, -1
 * is returned.
 *
 * If @pop_only is %true, this function only considers the already
 * populated part of the candidate area.
 */
static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
			    int size, int align, bool pop_only)
{
	int cand_off = off;

	while (true) {
		int head = ALIGN(cand_off, align) - off;
		int page_start, page_end, rs, re;

		if (this_size < head + size)
			return -1;

		if (!pop_only)
			return head;

		/*
		 * If the first unpopulated page is beyond the end of the
		 * allocation, the whole allocation is populated;
		 * otherwise, retry from the end of the unpopulated area.
		 */
		page_start = PFN_DOWN(head + off);
		page_end = PFN_UP(head + off + size);

		rs = page_start;
		pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
		if (rs >= page_end)
			return head;
		cand_off = re * PAGE_SIZE;
	}
}

519 520 521
/**
 * pcpu_alloc_area - allocate area from a pcpu_chunk
 * @chunk: chunk of interest
T
Tejun Heo 已提交
522
 * @size: wanted size in bytes
523
 * @align: wanted align
524
 * @pop_only: allocate only from the populated area
525
 * @occ_pages_p: out param for the number of pages the area occupies
526 527 528 529 530
 *
 * Try to allocate @size bytes area aligned at @align from @chunk.
 * Note that this function only allocates the offset.  It doesn't
 * populate or map the area.
 *
531 532
 * @chunk->map must have at least two free slots.
 *
533 534 535
 * CONTEXT:
 * pcpu_lock.
 *
536
 * RETURNS:
537 538
 * Allocated offset in @chunk on success, -1 if no matching area is
 * found.
539
 */
540
static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
541
			   bool pop_only, int *occ_pages_p)
542 543 544 545
{
	int oslot = pcpu_chunk_slot(chunk);
	int max_contig = 0;
	int i, off;
A
Al Viro 已提交
546
	bool seen_free = false;
547
	int *p;
548

A
Al Viro 已提交
549
	for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
550
		int head, tail;
551 552 553 554 555
		int this_size;

		off = *p;
		if (off & 1)
			continue;
556

557
		this_size = (p[1] & ~1) - off;
558 559 560 561

		head = pcpu_fit_in_area(chunk, off, this_size, size, align,
					pop_only);
		if (head < 0) {
A
Al Viro 已提交
562 563 564 565
			if (!seen_free) {
				chunk->first_free = i;
				seen_free = true;
			}
566
			max_contig = max(this_size, max_contig);
567 568 569 570 571 572 573 574 575
			continue;
		}

		/*
		 * If head is small or the previous block is free,
		 * merge'em.  Note that 'small' is defined as smaller
		 * than sizeof(int), which is very small but isn't too
		 * uncommon for percpu allocations.
		 */
576
		if (head && (head < sizeof(int) || !(p[-1] & 1))) {
577
			*p = off += head;
578
			if (p[-1] & 1)
579
				chunk->free_size -= head;
580 581
			else
				max_contig = max(*p - p[-1], max_contig);
582
			this_size -= head;
583 584 585 586
			head = 0;
		}

		/* if tail is small, just keep it around */
587 588
		tail = this_size - head - size;
		if (tail < sizeof(int)) {
589
			tail = 0;
590 591
			size = this_size - head;
		}
592 593 594

		/* split if warranted */
		if (head || tail) {
595 596 597
			int nr_extra = !!head + !!tail;

			/* insert new subblocks */
598
			memmove(p + nr_extra + 1, p + 1,
599 600 601
				sizeof(chunk->map[0]) * (chunk->map_used - i));
			chunk->map_used += nr_extra;

602
			if (head) {
A
Al Viro 已提交
603 604 605 606
				if (!seen_free) {
					chunk->first_free = i;
					seen_free = true;
				}
607 608
				*++p = off += head;
				++i;
609 610 611
				max_contig = max(head, max_contig);
			}
			if (tail) {
612
				p[1] = off + size;
613
				max_contig = max(tail, max_contig);
614 615 616
			}
		}

A
Al Viro 已提交
617 618 619
		if (!seen_free)
			chunk->first_free = i + 1;

620
		/* update hint and mark allocated */
621
		if (i + 1 == chunk->map_used)
622 623 624 625 626
			chunk->contig_hint = max_contig; /* fully scanned */
		else
			chunk->contig_hint = max(chunk->contig_hint,
						 max_contig);

627 628
		chunk->free_size -= size;
		*p |= 1;
629

630
		*occ_pages_p = pcpu_count_occupied_pages(chunk, i);
631 632 633 634 635 636 637
		pcpu_chunk_relocate(chunk, oslot);
		return off;
	}

	chunk->contig_hint = max_contig;	/* fully scanned */
	pcpu_chunk_relocate(chunk, oslot);

638 639
	/* tell the upper layer that this chunk has no matching area */
	return -1;
640 641 642 643 644 645
}

/**
 * pcpu_free_area - free area to a pcpu_chunk
 * @chunk: chunk of interest
 * @freeme: offset of area to free
646
 * @occ_pages_p: out param for the number of pages the area occupies
647 648 649 650
 *
 * Free area starting from @freeme to @chunk.  Note that this function
 * only modifies the allocation map.  It doesn't depopulate or unmap
 * the area.
651 652 653
 *
 * CONTEXT:
 * pcpu_lock.
654
 */
655 656
static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
			   int *occ_pages_p)
657 658
{
	int oslot = pcpu_chunk_slot(chunk);
659 660 661 662 663
	int off = 0;
	unsigned i, j;
	int to_free = 0;
	int *p;

664
	lockdep_assert_held(&pcpu_lock);
665
	pcpu_stats_area_dealloc(chunk);
666

667 668 669 670 671 672 673 674 675 676 677 678 679 680
	freeme |= 1;	/* we are searching for <given offset, in use> pair */

	i = 0;
	j = chunk->map_used;
	while (i != j) {
		unsigned k = (i + j) / 2;
		off = chunk->map[k];
		if (off < freeme)
			i = k + 1;
		else if (off > freeme)
			j = k;
		else
			i = j = k;
	}
681 682
	BUG_ON(off != freeme);

A
Al Viro 已提交
683 684 685
	if (i < chunk->first_free)
		chunk->first_free = i;

686 687 688
	p = chunk->map + i;
	*p = off &= ~1;
	chunk->free_size += (p[1] & ~1) - off;
689

690 691
	*occ_pages_p = pcpu_count_occupied_pages(chunk, i);

692 693 694
	/* merge with next? */
	if (!(p[1] & 1))
		to_free++;
695
	/* merge with previous? */
696 697
	if (i > 0 && !(p[-1] & 1)) {
		to_free++;
698
		i--;
699
		p--;
700
	}
701 702 703 704
	if (to_free) {
		chunk->map_used -= to_free;
		memmove(p + 1, p + 1 + to_free,
			(chunk->map_used - i) * sizeof(chunk->map[0]));
705 706
	}

707
	chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
708 709 710
	pcpu_chunk_relocate(chunk, oslot);
}

711 712 713 714
static struct pcpu_chunk *pcpu_alloc_chunk(void)
{
	struct pcpu_chunk *chunk;

715
	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
716 717 718
	if (!chunk)
		return NULL;

719 720
	chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
						sizeof(chunk->map[0]));
721
	if (!chunk->map) {
722
		pcpu_mem_free(chunk);
723 724 725 726
		return NULL;
	}

	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
727 728 729
	chunk->map[0] = 0;
	chunk->map[1] = pcpu_unit_size | 1;
	chunk->map_used = 1;
730 731

	INIT_LIST_HEAD(&chunk->list);
732
	INIT_LIST_HEAD(&chunk->map_extend_list);
733 734 735 736 737 738 739 740 741 742
	chunk->free_size = pcpu_unit_size;
	chunk->contig_hint = pcpu_unit_size;

	return chunk;
}

static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
	if (!chunk)
		return;
743 744
	pcpu_mem_free(chunk->map);
	pcpu_mem_free(chunk);
745 746
}

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
/**
 * pcpu_chunk_populated - post-population bookkeeping
 * @chunk: pcpu_chunk which got populated
 * @page_start: the start page
 * @page_end: the end page
 *
 * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
 * the bookkeeping information accordingly.  Must be called after each
 * successful population.
 */
static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
				 int page_start, int page_end)
{
	int nr = page_end - page_start;

	lockdep_assert_held(&pcpu_lock);

	bitmap_set(chunk->populated, page_start, nr);
	chunk->nr_populated += nr;
	pcpu_nr_empty_pop_pages += nr;
}

/**
 * pcpu_chunk_depopulated - post-depopulation bookkeeping
 * @chunk: pcpu_chunk which got depopulated
 * @page_start: the start page
 * @page_end: the end page
 *
 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
 * Update the bookkeeping information accordingly.  Must be called after
 * each successful depopulation.
 */
static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
				   int page_start, int page_end)
{
	int nr = page_end - page_start;

	lockdep_assert_held(&pcpu_lock);

	bitmap_clear(chunk->populated, page_start, nr);
	chunk->nr_populated -= nr;
	pcpu_nr_empty_pop_pages -= nr;
}

791 792 793 794 795 796 797 798 799 800 801 802 803 804
/*
 * Chunk management implementation.
 *
 * To allow different implementations, chunk alloc/free and
 * [de]population are implemented in a separate file which is pulled
 * into this file and compiled together.  The following functions
 * should be implemented.
 *
 * pcpu_populate_chunk		- populate the specified range of a chunk
 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
 * pcpu_create_chunk		- create a new chunk
 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
 * pcpu_addr_to_page		- translate address to physical address
 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
805
 */
806 807 808 809 810 811
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
static struct pcpu_chunk *pcpu_create_chunk(void);
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
static struct page *pcpu_addr_to_page(void *addr);
static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
812

813 814 815
#ifdef CONFIG_NEED_PER_CPU_KM
#include "percpu-km.c"
#else
816
#include "percpu-vm.c"
817
#endif
818

819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
/**
 * pcpu_chunk_addr_search - determine chunk containing specified address
 * @addr: address for which the chunk needs to be determined.
 *
 * RETURNS:
 * The address of the found chunk.
 */
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
{
	/* is it in the first chunk? */
	if (pcpu_addr_in_first_chunk(addr)) {
		/* is it in the reserved area? */
		if (pcpu_addr_in_reserved_chunk(addr))
			return pcpu_reserved_chunk;
		return pcpu_first_chunk;
	}

	/*
	 * The address is relative to unit0 which might be unused and
	 * thus unmapped.  Offset the address to the unit space of the
	 * current processor before looking it up in the vmalloc
	 * space.  Note that any possible cpu id can be used here, so
	 * there's no need to worry about preemption or cpu hotplug.
	 */
	addr += pcpu_unit_offsets[raw_smp_processor_id()];
844
	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
845 846
}

847
/**
848
 * pcpu_alloc - the percpu allocator
T
Tejun Heo 已提交
849
 * @size: size of area to allocate in bytes
850
 * @align: alignment of area (max PAGE_SIZE)
851
 * @reserved: allocate from the reserved chunk if available
852
 * @gfp: allocation flags
853
 *
854 855
 * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
 * contain %GFP_KERNEL, the allocation is atomic.
856 857 858 859
 *
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
860 861
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
				 gfp_t gfp)
862
{
863
	static int warn_limit = 10;
864
	struct pcpu_chunk *chunk;
865
	const char *err;
866
	bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
867
	int occ_pages = 0;
T
Tejun Heo 已提交
868
	int slot, off, new_alloc, cpu, ret;
869
	unsigned long flags;
870
	void __percpu *ptr;
871

872 873
	/*
	 * We want the lowest bit of offset available for in-use/free
V
Viro 已提交
874
	 * indicator, so force >= 16bit alignment and make size even.
875 876 877 878
	 */
	if (unlikely(align < 2))
		align = 2;

879
	size = ALIGN(size, 2);
V
Viro 已提交
880

881 882
	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
		     !is_power_of_2(align))) {
J
Joe Perches 已提交
883 884
		WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
		     size, align);
885 886 887
		return NULL;
	}

888 889 890
	if (!is_atomic)
		mutex_lock(&pcpu_alloc_mutex);

891
	spin_lock_irqsave(&pcpu_lock, flags);
892

893 894 895
	/* serve reserved allocations from the reserved chunk if available */
	if (reserved && pcpu_reserved_chunk) {
		chunk = pcpu_reserved_chunk;
896 897 898

		if (size > chunk->contig_hint) {
			err = "alloc from reserved chunk failed";
899
			goto fail_unlock;
900
		}
901

902
		while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
903
			spin_unlock_irqrestore(&pcpu_lock, flags);
904 905
			if (is_atomic ||
			    pcpu_extend_area_map(chunk, new_alloc) < 0) {
906
				err = "failed to extend area map of reserved chunk";
T
Tejun Heo 已提交
907
				goto fail;
908 909 910 911
			}
			spin_lock_irqsave(&pcpu_lock, flags);
		}

912 913
		off = pcpu_alloc_area(chunk, size, align, is_atomic,
				      &occ_pages);
914 915
		if (off >= 0)
			goto area_found;
916

917
		err = "alloc from reserved chunk failed";
918
		goto fail_unlock;
919 920
	}

921
restart:
922
	/* search through normal chunks */
923 924 925 926
	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
			if (size > chunk->contig_hint)
				continue;
927

928
			new_alloc = pcpu_need_to_extend(chunk, is_atomic);
929
			if (new_alloc) {
930 931
				if (is_atomic)
					continue;
932 933 934 935
				spin_unlock_irqrestore(&pcpu_lock, flags);
				if (pcpu_extend_area_map(chunk,
							 new_alloc) < 0) {
					err = "failed to extend area map";
T
Tejun Heo 已提交
936
					goto fail;
937 938 939 940 941 942 943
				}
				spin_lock_irqsave(&pcpu_lock, flags);
				/*
				 * pcpu_lock has been dropped, need to
				 * restart cpu_slot list walking.
				 */
				goto restart;
944 945
			}

946 947
			off = pcpu_alloc_area(chunk, size, align, is_atomic,
					      &occ_pages);
948 949 950 951 952
			if (off >= 0)
				goto area_found;
		}
	}

953
	spin_unlock_irqrestore(&pcpu_lock, flags);
954

T
Tejun Heo 已提交
955 956 957 958 959
	/*
	 * No space left.  Create a new chunk.  We don't want multiple
	 * tasks to create chunks simultaneously.  Serialize and create iff
	 * there's still no empty chunk after grabbing the mutex.
	 */
960 961
	if (is_atomic) {
		err = "atomic alloc failed, no space left";
962
		goto fail;
963
	}
964

T
Tejun Heo 已提交
965 966 967 968 969 970 971 972 973 974 975
	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
		chunk = pcpu_create_chunk();
		if (!chunk) {
			err = "failed to allocate new chunk";
			goto fail;
		}

		spin_lock_irqsave(&pcpu_lock, flags);
		pcpu_chunk_relocate(chunk, -1);
	} else {
		spin_lock_irqsave(&pcpu_lock, flags);
976
	}
977 978

	goto restart;
979 980

area_found:
981
	pcpu_stats_area_alloc(chunk, size);
982
	spin_unlock_irqrestore(&pcpu_lock, flags);
983

984
	/* populate if not all pages are already there */
985
	if (!is_atomic) {
986
		int page_start, page_end, rs, re;
987

988 989
		page_start = PFN_DOWN(off);
		page_end = PFN_UP(off + size);
T
Tejun Heo 已提交
990

991 992 993 994 995 996 997
		pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
			WARN_ON(chunk->immutable);

			ret = pcpu_populate_chunk(chunk, rs, re);

			spin_lock_irqsave(&pcpu_lock, flags);
			if (ret) {
998
				pcpu_free_area(chunk, off, &occ_pages);
999 1000 1001
				err = "failed to populate";
				goto fail_unlock;
			}
1002
			pcpu_chunk_populated(chunk, rs, re);
1003
			spin_unlock_irqrestore(&pcpu_lock, flags);
1004
		}
1005

1006 1007
		mutex_unlock(&pcpu_alloc_mutex);
	}
1008

1009 1010
	if (chunk != pcpu_reserved_chunk) {
		spin_lock_irqsave(&pcpu_lock, flags);
1011
		pcpu_nr_empty_pop_pages -= occ_pages;
1012 1013
		spin_unlock_irqrestore(&pcpu_lock, flags);
	}
1014

1015 1016 1017
	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
		pcpu_schedule_balance_work();

1018 1019 1020 1021
	/* clear the areas and return address relative to base address */
	for_each_possible_cpu(cpu)
		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);

1022
	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1023
	kmemleak_alloc_percpu(ptr, size, gfp);
1024 1025 1026 1027

	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
			chunk->base_addr, off, ptr);

1028
	return ptr;
1029 1030

fail_unlock:
1031
	spin_unlock_irqrestore(&pcpu_lock, flags);
T
Tejun Heo 已提交
1032
fail:
1033 1034
	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);

1035
	if (!is_atomic && warn_limit) {
1036
		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
J
Joe Perches 已提交
1037
			size, align, is_atomic, err);
1038 1039
		dump_stack();
		if (!--warn_limit)
1040
			pr_info("limit reached, disable warning\n");
1041
	}
1042 1043 1044 1045
	if (is_atomic) {
		/* see the flag handling in pcpu_blance_workfn() */
		pcpu_atomic_alloc_failed = true;
		pcpu_schedule_balance_work();
1046 1047
	} else {
		mutex_unlock(&pcpu_alloc_mutex);
1048
	}
1049
	return NULL;
1050
}
1051 1052

/**
1053
 * __alloc_percpu_gfp - allocate dynamic percpu area
1054 1055
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
1056
 * @gfp: allocation flags
1057
 *
1058 1059 1060
 * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
 * be called from any context but is a lot more likely to fail.
1061
 *
1062 1063 1064
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
{
	return pcpu_alloc(size, align, false, gfp);
}
EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);

/**
 * __alloc_percpu - allocate dynamic percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
 */
1078
void __percpu *__alloc_percpu(size_t size, size_t align)
1079
{
1080
	return pcpu_alloc(size, align, false, GFP_KERNEL);
1081
}
1082 1083
EXPORT_SYMBOL_GPL(__alloc_percpu);

1084 1085 1086 1087 1088
/**
 * __alloc_reserved_percpu - allocate reserved percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
1089 1090 1091 1092
 * Allocate zero-filled percpu area of @size bytes aligned at @align
 * from reserved percpu area if arch has set it up; otherwise,
 * allocation is served from the same dynamic area.  Might sleep.
 * Might trigger writeouts.
1093
 *
1094 1095 1096
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
1097 1098 1099
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
1100
void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1101
{
1102
	return pcpu_alloc(size, align, true, GFP_KERNEL);
1103 1104
}

1105
/**
1106
 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1107 1108 1109 1110
 * @work: unused
 *
 * Reclaim all fully free chunks except for the first one.
 */
1111
static void pcpu_balance_workfn(struct work_struct *work)
1112
{
1113 1114
	LIST_HEAD(to_free);
	struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1115
	struct pcpu_chunk *chunk, *next;
1116
	int slot, nr_to_pop, ret;
1117

1118 1119 1120 1121
	/*
	 * There's no reason to keep around multiple unused chunks and VM
	 * areas can be scarce.  Destroy all free chunks except for one.
	 */
1122 1123
	mutex_lock(&pcpu_alloc_mutex);
	spin_lock_irq(&pcpu_lock);
1124

1125
	list_for_each_entry_safe(chunk, next, free_head, list) {
1126 1127 1128
		WARN_ON(chunk->immutable);

		/* spare the first one */
1129
		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1130 1131
			continue;

1132
		list_del_init(&chunk->map_extend_list);
1133
		list_move(&chunk->list, &to_free);
1134 1135
	}

1136
	spin_unlock_irq(&pcpu_lock);
1137

1138
	list_for_each_entry_safe(chunk, next, &to_free, list) {
1139
		int rs, re;
1140

1141 1142
		pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
			pcpu_depopulate_chunk(chunk, rs, re);
1143 1144 1145
			spin_lock_irq(&pcpu_lock);
			pcpu_chunk_depopulated(chunk, rs, re);
			spin_unlock_irq(&pcpu_lock);
1146
		}
1147
		pcpu_destroy_chunk(chunk);
1148
	}
T
Tejun Heo 已提交
1149

1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	/* service chunks which requested async area map extension */
	do {
		int new_alloc = 0;

		spin_lock_irq(&pcpu_lock);

		chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
					struct pcpu_chunk, map_extend_list);
		if (chunk) {
			list_del_init(&chunk->map_extend_list);
			new_alloc = pcpu_need_to_extend(chunk, false);
		}

		spin_unlock_irq(&pcpu_lock);

		if (new_alloc)
			pcpu_extend_area_map(chunk, new_alloc);
	} while (chunk);

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
	/*
	 * Ensure there are certain number of free populated pages for
	 * atomic allocs.  Fill up from the most packed so that atomic
	 * allocs don't increase fragmentation.  If atomic allocation
	 * failed previously, always populate the maximum amount.  This
	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
	 * failing indefinitely; however, large atomic allocs are not
	 * something we support properly and can be highly unreliable and
	 * inefficient.
	 */
retry_pop:
	if (pcpu_atomic_alloc_failed) {
		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
		/* best effort anyway, don't worry about synchronization */
		pcpu_atomic_alloc_failed = false;
	} else {
		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
				  pcpu_nr_empty_pop_pages,
				  0, PCPU_EMPTY_POP_PAGES_HIGH);
	}

	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
		int nr_unpop = 0, rs, re;

		if (!nr_to_pop)
			break;

		spin_lock_irq(&pcpu_lock);
		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
			nr_unpop = pcpu_unit_pages - chunk->nr_populated;
			if (nr_unpop)
				break;
		}
		spin_unlock_irq(&pcpu_lock);

		if (!nr_unpop)
			continue;

		/* @chunk can't go away while pcpu_alloc_mutex is held */
		pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
			int nr = min(re - rs, nr_to_pop);

			ret = pcpu_populate_chunk(chunk, rs, rs + nr);
			if (!ret) {
				nr_to_pop -= nr;
				spin_lock_irq(&pcpu_lock);
				pcpu_chunk_populated(chunk, rs, rs + nr);
				spin_unlock_irq(&pcpu_lock);
			} else {
				nr_to_pop = 0;
			}

			if (!nr_to_pop)
				break;
		}
	}

	if (nr_to_pop) {
		/* ran out of chunks to populate, create a new one and retry */
		chunk = pcpu_create_chunk();
		if (chunk) {
			spin_lock_irq(&pcpu_lock);
			pcpu_chunk_relocate(chunk, -1);
			spin_unlock_irq(&pcpu_lock);
			goto retry_pop;
		}
	}

T
Tejun Heo 已提交
1237
	mutex_unlock(&pcpu_alloc_mutex);
1238 1239 1240 1241 1242 1243
}

/**
 * free_percpu - free percpu area
 * @ptr: pointer to area to free
 *
1244 1245 1246 1247
 * Free percpu area @ptr.
 *
 * CONTEXT:
 * Can be called from atomic context.
1248
 */
1249
void free_percpu(void __percpu *ptr)
1250
{
1251
	void *addr;
1252
	struct pcpu_chunk *chunk;
1253
	unsigned long flags;
1254
	int off, occ_pages;
1255 1256 1257 1258

	if (!ptr)
		return;

1259 1260
	kmemleak_free_percpu(ptr);

1261 1262
	addr = __pcpu_ptr_to_addr(ptr);

1263
	spin_lock_irqsave(&pcpu_lock, flags);
1264 1265

	chunk = pcpu_chunk_addr_search(addr);
T
Tejun Heo 已提交
1266
	off = addr - chunk->base_addr;
1267

1268 1269 1270 1271
	pcpu_free_area(chunk, off, &occ_pages);

	if (chunk != pcpu_reserved_chunk)
		pcpu_nr_empty_pop_pages += occ_pages;
1272

1273
	/* if there are more than one fully free chunks, wake up grim reaper */
1274 1275 1276
	if (chunk->free_size == pcpu_unit_size) {
		struct pcpu_chunk *pos;

1277
		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1278
			if (pos != chunk) {
1279
				pcpu_schedule_balance_work();
1280 1281 1282 1283
				break;
			}
	}

1284 1285
	trace_percpu_free_percpu(chunk->base_addr, off, ptr);

1286
	spin_unlock_irqrestore(&pcpu_lock, flags);
1287 1288 1289
}
EXPORT_SYMBOL_GPL(free_percpu);

1290
bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
1291
{
1292
#ifdef CONFIG_SMP
1293 1294 1295 1296 1297 1298
	const size_t static_size = __per_cpu_end - __per_cpu_start;
	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
	unsigned int cpu;

	for_each_possible_cpu(cpu) {
		void *start = per_cpu_ptr(base, cpu);
1299
		void *va = (void *)addr;
1300

1301
		if (va >= start && va < start + static_size) {
1302
			if (can_addr) {
1303
				*can_addr = (unsigned long) (va - start);
1304 1305 1306
				*can_addr += (unsigned long)
					per_cpu_ptr(base, get_boot_cpu_id());
			}
1307
			return true;
1308 1309
		}
	}
1310 1311
#endif
	/* on UP, can't distinguish from other static vars, always false */
1312 1313 1314
	return false;
}

1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
/**
 * is_kernel_percpu_address - test whether address is from static percpu area
 * @addr: address to test
 *
 * Test whether @addr belongs to in-kernel static percpu area.  Module
 * static percpu areas are not considered.  For those, use
 * is_module_percpu_address().
 *
 * RETURNS:
 * %true if @addr is from in-kernel static percpu area, %false otherwise.
 */
bool is_kernel_percpu_address(unsigned long addr)
{
	return __is_kernel_percpu_address(addr, NULL);
}

1331 1332 1333 1334 1335 1336 1337 1338 1339
/**
 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
 * @addr: the address to be converted to physical address
 *
 * Given @addr which is dereferenceable address obtained via one of
 * percpu access macros, this function translates it into its physical
 * address.  The caller is responsible for ensuring @addr stays valid
 * until this function finishes.
 *
1340 1341 1342 1343 1344
 * percpu allocator has special setup for the first chunk, which currently
 * supports either embedding in linear address space or vmalloc mapping,
 * and, from the second one, the backing allocator (currently either vm or
 * km) provides translation.
 *
1345
 * The addr can be translated simply without checking if it falls into the
1346 1347 1348 1349 1350
 * first chunk. But the current code reflects better how percpu allocator
 * actually works, and the verification can discover both bugs in percpu
 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
 * code.
 *
1351 1352 1353 1354 1355
 * RETURNS:
 * The physical address for @addr.
 */
phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
1356 1357
	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
	bool in_first_chunk = false;
T
Tejun Heo 已提交
1358
	unsigned long first_low, first_high;
1359 1360 1361
	unsigned int cpu;

	/*
T
Tejun Heo 已提交
1362
	 * The following test on unit_low/high isn't strictly
1363 1364 1365
	 * necessary but will speed up lookups of addresses which
	 * aren't in the first chunk.
	 */
T
Tejun Heo 已提交
1366 1367 1368 1369 1370
	first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
	first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
				     pcpu_unit_pages);
	if ((unsigned long)addr >= first_low &&
	    (unsigned long)addr < first_high) {
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
		for_each_possible_cpu(cpu) {
			void *start = per_cpu_ptr(base, cpu);

			if (addr >= start && addr < start + pcpu_unit_size) {
				in_first_chunk = true;
				break;
			}
		}
	}

	if (in_first_chunk) {
1382
		if (!is_vmalloc_addr(addr))
1383 1384
			return __pa(addr);
		else
1385 1386
			return page_to_phys(vmalloc_to_page(addr)) +
			       offset_in_page(addr);
1387
	} else
1388 1389
		return page_to_phys(pcpu_addr_to_page(addr)) +
		       offset_in_page(addr);
1390 1391
}

1392
/**
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
 * pcpu_alloc_alloc_info - allocate percpu allocation info
 * @nr_groups: the number of groups
 * @nr_units: the number of units
 *
 * Allocate ai which is large enough for @nr_groups groups containing
 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
 * cpu_map array which is long enough for @nr_units and filled with
 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
 * pointer of other groups.
 *
 * RETURNS:
 * Pointer to the allocated pcpu_alloc_info on success, NULL on
 * failure.
 */
struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
						      int nr_units)
{
	struct pcpu_alloc_info *ai;
	size_t base_size, ai_size;
	void *ptr;
	int unit;

	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
			  __alignof__(ai->groups[0].cpu_map[0]));
	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);

1419
	ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
	if (!ptr)
		return NULL;
	ai = ptr;
	ptr += base_size;

	ai->groups[0].cpu_map = ptr;

	for (unit = 0; unit < nr_units; unit++)
		ai->groups[0].cpu_map[unit] = NR_CPUS;

	ai->nr_groups = nr_groups;
	ai->__ai_size = PFN_ALIGN(ai_size);

	return ai;
}

/**
 * pcpu_free_alloc_info - free percpu allocation info
 * @ai: pcpu_alloc_info to free
 *
 * Free @ai which was allocated by pcpu_alloc_alloc_info().
 */
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
{
1444
	memblock_free_early(__pa(ai), ai->__ai_size);
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
}

/**
 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
 * @lvl: loglevel
 * @ai: allocation info to dump
 *
 * Print out information about @ai using loglevel @lvl.
 */
static void pcpu_dump_alloc_info(const char *lvl,
				 const struct pcpu_alloc_info *ai)
1456
{
1457
	int group_width = 1, cpu_width = 1, width;
1458
	char empty_str[] = "--------";
1459 1460 1461 1462 1463 1464 1465
	int alloc = 0, alloc_end = 0;
	int group, v;
	int upa, apl;	/* units per alloc, allocs per line */

	v = ai->nr_groups;
	while (v /= 10)
		group_width++;
1466

1467
	v = num_possible_cpus();
1468
	while (v /= 10)
1469 1470
		cpu_width++;
	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1471

1472 1473 1474
	upa = ai->alloc_size / ai->unit_size;
	width = upa * (cpu_width + 1) + group_width + 3;
	apl = rounddown_pow_of_two(max(60 / width, 1));
1475

1476 1477 1478
	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1479

1480 1481 1482 1483 1484 1485 1486 1487
	for (group = 0; group < ai->nr_groups; group++) {
		const struct pcpu_group_info *gi = &ai->groups[group];
		int unit = 0, unit_end = 0;

		BUG_ON(gi->nr_units % upa);
		for (alloc_end += gi->nr_units / upa;
		     alloc < alloc_end; alloc++) {
			if (!(alloc % apl)) {
1488
				pr_cont("\n");
1489 1490
				printk("%spcpu-alloc: ", lvl);
			}
1491
			pr_cont("[%0*d] ", group_width, group);
1492 1493 1494

			for (unit_end += upa; unit < unit_end; unit++)
				if (gi->cpu_map[unit] != NR_CPUS)
1495 1496
					pr_cont("%0*d ",
						cpu_width, gi->cpu_map[unit]);
1497
				else
1498
					pr_cont("%s ", empty_str);
1499 1500
		}
	}
1501
	pr_cont("\n");
1502 1503
}

1504
/**
1505
 * pcpu_setup_first_chunk - initialize the first percpu chunk
1506
 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1507
 * @base_addr: mapped address
1508 1509 1510
 *
 * Initialize the first percpu chunk which contains the kernel static
 * perpcu area.  This function is to be called from arch percpu area
1511
 * setup path.
1512
 *
1513 1514 1515 1516 1517 1518
 * @ai contains all information necessary to initialize the first
 * chunk and prime the dynamic percpu allocator.
 *
 * @ai->static_size is the size of static percpu area.
 *
 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1519 1520 1521 1522 1523 1524 1525
 * reserve after the static area in the first chunk.  This reserves
 * the first chunk such that it's available only through reserved
 * percpu allocation.  This is primarily used to serve module percpu
 * static areas on architectures where the addressing model has
 * limited offset range for symbol relocations to guarantee module
 * percpu symbols fall inside the relocatable range.
 *
1526 1527 1528
 * @ai->dyn_size determines the number of bytes available for dynamic
 * allocation in the first chunk.  The area between @ai->static_size +
 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1529
 *
1530 1531 1532
 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
 * and equal to or larger than @ai->static_size + @ai->reserved_size +
 * @ai->dyn_size.
1533
 *
1534 1535
 * @ai->atom_size is the allocation atom size and used as alignment
 * for vm areas.
1536
 *
1537 1538 1539 1540 1541 1542 1543 1544 1545
 * @ai->alloc_size is the allocation size and always multiple of
 * @ai->atom_size.  This is larger than @ai->atom_size if
 * @ai->unit_size is larger than @ai->atom_size.
 *
 * @ai->nr_groups and @ai->groups describe virtual memory layout of
 * percpu areas.  Units which should be colocated are put into the
 * same group.  Dynamic VM areas will be allocated according to these
 * groupings.  If @ai->nr_groups is zero, a single group containing
 * all units is assumed.
1546
 *
1547 1548
 * The caller should have mapped the first chunk at @base_addr and
 * copied static data to each unit.
1549
 *
1550 1551 1552 1553 1554 1555 1556
 * If the first chunk ends up with both reserved and dynamic areas, it
 * is served by two chunks - one to serve the core static and reserved
 * areas and the other for the dynamic area.  They share the same vm
 * and page map but uses different area allocation map to stay away
 * from each other.  The latter chunk is circulated in the chunk slots
 * and available for dynamic allocation like any other chunks.
 *
1557
 * RETURNS:
T
Tejun Heo 已提交
1558
 * 0 on success, -errno on failure.
1559
 */
T
Tejun Heo 已提交
1560 1561
int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
				  void *base_addr)
1562
{
1563 1564
	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1565
	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1566
	struct pcpu_chunk *schunk, *dchunk = NULL;
1567 1568
	unsigned long *group_offsets;
	size_t *group_sizes;
T
Tejun Heo 已提交
1569
	unsigned long *unit_off;
1570
	unsigned int cpu;
1571 1572
	int *unit_map;
	int group, unit, i;
1573

1574 1575
#define PCPU_SETUP_BUG_ON(cond)	do {					\
	if (unlikely(cond)) {						\
1576 1577
		pr_emerg("failed to initialize, %s\n", #cond);		\
		pr_emerg("cpu_possible_mask=%*pb\n",			\
1578
			 cpumask_pr_args(cpu_possible_mask));		\
1579 1580 1581 1582 1583
		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
		BUG();							\
	}								\
} while (0)

1584
	/* sanity checks */
1585
	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1586
#ifdef CONFIG_SMP
1587
	PCPU_SETUP_BUG_ON(!ai->static_size);
1588
	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
1589
#endif
1590
	PCPU_SETUP_BUG_ON(!base_addr);
1591
	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
1592
	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1593
	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
1594
	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1595
	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1596
	PCPU_SETUP_BUG_ON(!ai->dyn_size);
1597
	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1598

1599
	/* process group information and build config tables accordingly */
1600 1601 1602 1603 1604 1605
	group_offsets = memblock_virt_alloc(ai->nr_groups *
					     sizeof(group_offsets[0]), 0);
	group_sizes = memblock_virt_alloc(ai->nr_groups *
					   sizeof(group_sizes[0]), 0);
	unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
	unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1606

1607
	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1608
		unit_map[cpu] = UINT_MAX;
T
Tejun Heo 已提交
1609 1610 1611

	pcpu_low_unit_cpu = NR_CPUS;
	pcpu_high_unit_cpu = NR_CPUS;
1612

1613 1614
	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
		const struct pcpu_group_info *gi = &ai->groups[group];
1615

1616 1617 1618
		group_offsets[group] = gi->base_offset;
		group_sizes[group] = gi->nr_units * ai->unit_size;

1619 1620 1621 1622
		for (i = 0; i < gi->nr_units; i++) {
			cpu = gi->cpu_map[i];
			if (cpu == NR_CPUS)
				continue;
1623

D
Dan Carpenter 已提交
1624
			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
1625 1626
			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1627

1628
			unit_map[cpu] = unit + i;
T
Tejun Heo 已提交
1629 1630
			unit_off[cpu] = gi->base_offset + i * ai->unit_size;

T
Tejun Heo 已提交
1631 1632 1633 1634 1635 1636 1637
			/* determine low/high unit_cpu */
			if (pcpu_low_unit_cpu == NR_CPUS ||
			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
				pcpu_low_unit_cpu = cpu;
			if (pcpu_high_unit_cpu == NR_CPUS ||
			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
				pcpu_high_unit_cpu = cpu;
1638
		}
1639
	}
1640 1641 1642
	pcpu_nr_units = unit;

	for_each_possible_cpu(cpu)
1643 1644 1645 1646
		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);

	/* we're done parsing the input, undefine BUG macro and dump config */
#undef PCPU_SETUP_BUG_ON
1647
	pcpu_dump_alloc_info(KERN_DEBUG, ai);
1648

1649 1650 1651
	pcpu_nr_groups = ai->nr_groups;
	pcpu_group_offsets = group_offsets;
	pcpu_group_sizes = group_sizes;
1652
	pcpu_unit_map = unit_map;
T
Tejun Heo 已提交
1653
	pcpu_unit_offsets = unit_off;
1654 1655

	/* determine basic parameters */
1656
	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1657
	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1658
	pcpu_atom_size = ai->atom_size;
T
Tejun Heo 已提交
1659 1660
	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1661

1662 1663
	pcpu_stats_save_ai(ai);

1664 1665 1666 1667 1668
	/*
	 * Allocate chunk slots.  The additional last slot is for
	 * empty chunks.
	 */
	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1669 1670
	pcpu_slot = memblock_virt_alloc(
			pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1671 1672 1673
	for (i = 0; i < pcpu_nr_slots; i++)
		INIT_LIST_HEAD(&pcpu_slot[i]);

1674 1675 1676 1677 1678 1679 1680
	/*
	 * Initialize static chunk.  If reserved_size is zero, the
	 * static chunk covers static area + dynamic allocation area
	 * in the first chunk.  If reserved_size is not zero, it
	 * covers static area + reserved area (mostly used for module
	 * static percpu allocation).
	 */
1681
	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1682
	INIT_LIST_HEAD(&schunk->list);
1683
	INIT_LIST_HEAD(&schunk->map_extend_list);
T
Tejun Heo 已提交
1684
	schunk->base_addr = base_addr;
1685
	schunk->start_offset = ai->static_size;
1686 1687
	schunk->map = smap;
	schunk->map_alloc = ARRAY_SIZE(smap);
1688
	schunk->immutable = true;
T
Tejun Heo 已提交
1689
	bitmap_fill(schunk->populated, pcpu_unit_pages);
1690
	schunk->nr_populated = pcpu_unit_pages;
1691

1692
	schunk->free_size = ai->reserved_size ?: ai->dyn_size;
1693
	schunk->contig_hint = schunk->free_size;
1694
	schunk->map[0] = 1;
1695
	schunk->map[1] = schunk->start_offset;
1696 1697
	schunk->map[2] = (ai->static_size + schunk->free_size) | 1;
	schunk->map_used = 2;
1698

1699
	/* init dynamic chunk if necessary */
1700 1701 1702
	if (ai->reserved_size) {
		pcpu_reserved_chunk = schunk;

1703
		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1704
		INIT_LIST_HEAD(&dchunk->list);
1705
		INIT_LIST_HEAD(&dchunk->map_extend_list);
T
Tejun Heo 已提交
1706
		dchunk->base_addr = base_addr;
1707
		dchunk->start_offset = ai->static_size + ai->reserved_size;
1708 1709
		dchunk->map = dmap;
		dchunk->map_alloc = ARRAY_SIZE(dmap);
1710
		dchunk->immutable = true;
T
Tejun Heo 已提交
1711
		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1712
		dchunk->nr_populated = pcpu_unit_pages;
1713

1714
		dchunk->contig_hint = dchunk->free_size = ai->dyn_size;
1715
		dchunk->map[0] = 1;
1716 1717
		dchunk->map[1] = dchunk->start_offset;
		dchunk->map[2] = (dchunk->start_offset + dchunk->free_size) | 1;
1718
		dchunk->map_used = 2;
1719 1720
	}

1721
	/* link the first chunk in */
1722
	pcpu_first_chunk = dchunk ?: schunk;
1723
	i = (pcpu_first_chunk->start_offset) ? 1 : 0;
1724
	pcpu_nr_empty_pop_pages +=
1725
		pcpu_count_occupied_pages(pcpu_first_chunk, i);
1726
	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1727

1728
	pcpu_stats_chunk_alloc();
1729
	trace_percpu_create_chunk(base_addr);
1730

1731
	/* we're done */
T
Tejun Heo 已提交
1732
	pcpu_base_addr = base_addr;
T
Tejun Heo 已提交
1733
	return 0;
1734
}
1735

1736 1737
#ifdef CONFIG_SMP

1738
const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1739 1740 1741 1742
	[PCPU_FC_AUTO]	= "auto",
	[PCPU_FC_EMBED]	= "embed",
	[PCPU_FC_PAGE]	= "page",
};
1743

1744
enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1745

1746 1747
static int __init percpu_alloc_setup(char *str)
{
1748 1749 1750
	if (!str)
		return -EINVAL;

1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
	if (0)
		/* nada */;
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
	else if (!strcmp(str, "embed"))
		pcpu_chosen_fc = PCPU_FC_EMBED;
#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
	else if (!strcmp(str, "page"))
		pcpu_chosen_fc = PCPU_FC_PAGE;
#endif
	else
1762
		pr_warn("unknown allocator %s specified\n", str);
1763

1764
	return 0;
1765
}
1766
early_param("percpu_alloc", percpu_alloc_setup);
1767

1768 1769 1770 1771 1772
/*
 * pcpu_embed_first_chunk() is used by the generic percpu setup.
 * Build it if needed by the arch config or the generic setup is going
 * to be used.
 */
1773 1774
#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
#define BUILD_EMBED_FIRST_CHUNK
#endif

/* build pcpu_page_first_chunk() iff needed by the arch config */
#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
#define BUILD_PAGE_FIRST_CHUNK
#endif

/* pcpu_build_alloc_info() is used by both embed and page first chunk */
#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
/**
 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
 * @reserved_size: the size of reserved percpu area in bytes
 * @dyn_size: minimum free size for dynamic allocation in bytes
 * @atom_size: allocation atom size
 * @cpu_distance_fn: callback to determine distance between cpus, optional
 *
 * This function determines grouping of units, their mappings to cpus
 * and other parameters considering needed percpu size, allocation
 * atom size and distances between CPUs.
 *
1796
 * Groups are always multiples of atom size and CPUs which are of
1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
 * LOCAL_DISTANCE both ways are grouped together and share space for
 * units in the same group.  The returned configuration is guaranteed
 * to have CPUs on different nodes on different groups and >=75% usage
 * of allocated virtual address space.
 *
 * RETURNS:
 * On success, pointer to the new allocation_info is returned.  On
 * failure, ERR_PTR value is returned.
 */
static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
				size_t reserved_size, size_t dyn_size,
				size_t atom_size,
				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
{
	static int group_map[NR_CPUS] __initdata;
	static int group_cnt[NR_CPUS] __initdata;
	const size_t static_size = __per_cpu_end - __per_cpu_start;
	int nr_groups = 1, nr_units = 0;
	size_t size_sum, min_unit_size, alloc_size;
	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
	int last_allocs, group, unit;
	unsigned int cpu, tcpu;
	struct pcpu_alloc_info *ai;
	unsigned int *cpu_map;

	/* this function may be called multiple times */
	memset(group_map, 0, sizeof(group_map));
	memset(group_cnt, 0, sizeof(group_cnt));

	/* calculate size_sum and ensure dyn_size is enough for early alloc */
	size_sum = PFN_ALIGN(static_size + reserved_size +
			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
	dyn_size = size_sum - static_size - reserved_size;

	/*
	 * Determine min_unit_size, alloc_size and max_upa such that
	 * alloc_size is multiple of atom_size and is the smallest
L
Lucas De Marchi 已提交
1834
	 * which can accommodate 4k aligned segments which are equal to
1835 1836 1837 1838
	 * or larger than min_unit_size.
	 */
	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);

1839
	/* determine the maximum # of units that can fit in an allocation */
1840 1841
	alloc_size = roundup(min_unit_size, atom_size);
	upa = alloc_size / min_unit_size;
1842
	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
		upa--;
	max_upa = upa;

	/* group cpus according to their proximity */
	for_each_possible_cpu(cpu) {
		group = 0;
	next_group:
		for_each_possible_cpu(tcpu) {
			if (cpu == tcpu)
				break;
			if (group_map[tcpu] == group && cpu_distance_fn &&
			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
				group++;
				nr_groups = max(nr_groups, group + 1);
				goto next_group;
			}
		}
		group_map[cpu] = group;
		group_cnt[group]++;
	}

	/*
1866 1867 1868
	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
	 * Expand the unit_size until we use >= 75% of the units allocated.
	 * Related to atom_size, which could be much larger than the unit_size.
1869 1870 1871 1872 1873
	 */
	last_allocs = INT_MAX;
	for (upa = max_upa; upa; upa--) {
		int allocs = 0, wasted = 0;

1874
		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
			continue;

		for (group = 0; group < nr_groups; group++) {
			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
			allocs += this_allocs;
			wasted += this_allocs * upa - group_cnt[group];
		}

		/*
		 * Don't accept if wastage is over 1/3.  The
		 * greater-than comparison ensures upa==1 always
		 * passes the following check.
		 */
		if (wasted > num_possible_cpus() / 3)
			continue;

		/* and then don't consume more memory */
		if (allocs > last_allocs)
			break;
		last_allocs = allocs;
		best_upa = upa;
	}
	upa = best_upa;

	/* allocate and fill alloc_info */
	for (group = 0; group < nr_groups; group++)
		nr_units += roundup(group_cnt[group], upa);

	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
	if (!ai)
		return ERR_PTR(-ENOMEM);
	cpu_map = ai->groups[0].cpu_map;

	for (group = 0; group < nr_groups; group++) {
		ai->groups[group].cpu_map = cpu_map;
		cpu_map += roundup(group_cnt[group], upa);
	}

	ai->static_size = static_size;
	ai->reserved_size = reserved_size;
	ai->dyn_size = dyn_size;
	ai->unit_size = alloc_size / upa;
	ai->atom_size = atom_size;
	ai->alloc_size = alloc_size;

	for (group = 0, unit = 0; group_cnt[group]; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];

		/*
		 * Initialize base_offset as if all groups are located
		 * back-to-back.  The caller should update this to
		 * reflect actual allocation.
		 */
		gi->base_offset = unit * ai->unit_size;

		for_each_possible_cpu(cpu)
			if (group_map[cpu] == group)
				gi->cpu_map[gi->nr_units++] = cpu;
		gi->nr_units = roundup(gi->nr_units, upa);
		unit += gi->nr_units;
	}
	BUG_ON(unit != nr_units);

	return ai;
}
#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */

#if defined(BUILD_EMBED_FIRST_CHUNK)
1943 1944 1945
/**
 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
 * @reserved_size: the size of reserved percpu area in bytes
1946
 * @dyn_size: minimum free size for dynamic allocation in bytes
1947 1948 1949
 * @atom_size: allocation atom size
 * @cpu_distance_fn: callback to determine distance between cpus, optional
 * @alloc_fn: function to allocate percpu page
L
Lucas De Marchi 已提交
1950
 * @free_fn: function to free percpu page
1951 1952 1953 1954 1955
 *
 * This is a helper to ease setting up embedded first percpu chunk and
 * can be called where pcpu_setup_first_chunk() is expected.
 *
 * If this function is used to setup the first chunk, it is allocated
1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
 * by calling @alloc_fn and used as-is without being mapped into
 * vmalloc area.  Allocations are always whole multiples of @atom_size
 * aligned to @atom_size.
 *
 * This enables the first chunk to piggy back on the linear physical
 * mapping which often uses larger page size.  Please note that this
 * can result in very sparse cpu->unit mapping on NUMA machines thus
 * requiring large vmalloc address space.  Don't use this allocator if
 * vmalloc space is not orders of magnitude larger than distances
 * between node memory addresses (ie. 32bit NUMA machines).
1966
 *
1967
 * @dyn_size specifies the minimum dynamic area size.
1968 1969
 *
 * If the needed size is smaller than the minimum or specified unit
1970
 * size, the leftover is returned using @free_fn.
1971 1972
 *
 * RETURNS:
T
Tejun Heo 已提交
1973
 * 0 on success, -errno on failure.
1974
 */
1975
int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1976 1977 1978 1979
				  size_t atom_size,
				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
				  pcpu_fc_alloc_fn_t alloc_fn,
				  pcpu_fc_free_fn_t free_fn)
1980
{
1981 1982
	void *base = (void *)ULONG_MAX;
	void **areas = NULL;
1983
	struct pcpu_alloc_info *ai;
1984 1985
	size_t size_sum, areas_size;
	unsigned long max_distance;
1986
	int group, i, highest_group, rc;
1987

1988 1989
	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
				   cpu_distance_fn);
1990 1991
	if (IS_ERR(ai))
		return PTR_ERR(ai);
1992

1993
	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1994
	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1995

1996
	areas = memblock_virt_alloc_nopanic(areas_size, 0);
1997
	if (!areas) {
T
Tejun Heo 已提交
1998
		rc = -ENOMEM;
1999
		goto out_free;
2000
	}
2001

2002 2003
	/* allocate, copy and determine base address & max_distance */
	highest_group = 0;
2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
	for (group = 0; group < ai->nr_groups; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];
		unsigned int cpu = NR_CPUS;
		void *ptr;

		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
			cpu = gi->cpu_map[i];
		BUG_ON(cpu == NR_CPUS);

		/* allocate space for the whole group */
		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
		if (!ptr) {
			rc = -ENOMEM;
			goto out_free_areas;
		}
2019 2020
		/* kmemleak tracks the percpu allocations separately */
		kmemleak_free(ptr);
2021
		areas[group] = ptr;
2022

2023
		base = min(ptr, base);
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
		if (ptr > areas[highest_group])
			highest_group = group;
	}
	max_distance = areas[highest_group] - base;
	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;

	/* warn if maximum distance is further than 75% of vmalloc space */
	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
				max_distance, VMALLOC_TOTAL);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
		/* and fail if we have fallback */
		rc = -EINVAL;
		goto out_free_areas;
#endif
2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
	}

	/*
	 * Copy data and free unused parts.  This should happen after all
	 * allocations are complete; otherwise, we may end up with
	 * overlapping groups.
	 */
	for (group = 0; group < ai->nr_groups; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];
		void *ptr = areas[group];
2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059

		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
			if (gi->cpu_map[i] == NR_CPUS) {
				/* unused unit, free whole */
				free_fn(ptr, ai->unit_size);
				continue;
			}
			/* copy and return the unused part */
			memcpy(ptr, __per_cpu_load, ai->static_size);
			free_fn(ptr + size_sum, ai->unit_size - size_sum);
		}
2060
	}
2061

2062
	/* base address is now known, determine group base offsets */
2063
	for (group = 0; group < ai->nr_groups; group++) {
2064
		ai->groups[group].base_offset = areas[group] - base;
2065
	}
2066

2067
	pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2068 2069
		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
		ai->dyn_size, ai->unit_size);
2070

T
Tejun Heo 已提交
2071
	rc = pcpu_setup_first_chunk(ai, base);
2072 2073 2074 2075
	goto out_free;

out_free_areas:
	for (group = 0; group < ai->nr_groups; group++)
2076 2077 2078
		if (areas[group])
			free_fn(areas[group],
				ai->groups[group].nr_units * ai->unit_size);
2079
out_free:
2080
	pcpu_free_alloc_info(ai);
2081
	if (areas)
2082
		memblock_free_early(__pa(areas), areas_size);
T
Tejun Heo 已提交
2083
	return rc;
2084
}
2085
#endif /* BUILD_EMBED_FIRST_CHUNK */
2086

2087
#ifdef BUILD_PAGE_FIRST_CHUNK
2088
/**
2089
 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2090 2091
 * @reserved_size: the size of reserved percpu area in bytes
 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
L
Lucas De Marchi 已提交
2092
 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2093 2094
 * @populate_pte_fn: function to populate pte
 *
2095 2096
 * This is a helper to ease setting up page-remapped first percpu
 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2097 2098 2099 2100 2101
 *
 * This is the basic allocator.  Static percpu area is allocated
 * page-by-page into vmalloc area.
 *
 * RETURNS:
T
Tejun Heo 已提交
2102
 * 0 on success, -errno on failure.
2103
 */
T
Tejun Heo 已提交
2104 2105 2106 2107
int __init pcpu_page_first_chunk(size_t reserved_size,
				 pcpu_fc_alloc_fn_t alloc_fn,
				 pcpu_fc_free_fn_t free_fn,
				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2108
{
2109
	static struct vm_struct vm;
2110
	struct pcpu_alloc_info *ai;
2111
	char psize_str[16];
T
Tejun Heo 已提交
2112
	int unit_pages;
2113
	size_t pages_size;
T
Tejun Heo 已提交
2114
	struct page **pages;
T
Tejun Heo 已提交
2115
	int unit, i, j, rc;
2116 2117
	int upa;
	int nr_g0_units;
2118

2119 2120
	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);

2121
	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2122 2123 2124
	if (IS_ERR(ai))
		return PTR_ERR(ai);
	BUG_ON(ai->nr_groups != 1);
2125 2126 2127 2128 2129 2130
	upa = ai->alloc_size/ai->unit_size;
	nr_g0_units = roundup(num_possible_cpus(), upa);
	if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
		pcpu_free_alloc_info(ai);
		return -EINVAL;
	}
2131 2132

	unit_pages = ai->unit_size >> PAGE_SHIFT;
2133 2134

	/* unaligned allocations can't be freed, round up to page size */
2135 2136
	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
			       sizeof(pages[0]));
2137
	pages = memblock_virt_alloc(pages_size, 0);
2138

2139
	/* allocate pages */
2140
	j = 0;
2141 2142
	for (unit = 0; unit < num_possible_cpus(); unit++) {
		unsigned int cpu = ai->groups[0].cpu_map[unit];
T
Tejun Heo 已提交
2143
		for (i = 0; i < unit_pages; i++) {
2144 2145
			void *ptr;

2146
			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2147
			if (!ptr) {
2148
				pr_warn("failed to allocate %s page for cpu%u\n",
2149
						psize_str, cpu);
2150 2151
				goto enomem;
			}
2152 2153
			/* kmemleak tracks the percpu allocations separately */
			kmemleak_free(ptr);
T
Tejun Heo 已提交
2154
			pages[j++] = virt_to_page(ptr);
2155
		}
2156
	}
2157

2158 2159
	/* allocate vm area, map the pages and copy static data */
	vm.flags = VM_ALLOC;
2160
	vm.size = num_possible_cpus() * ai->unit_size;
2161 2162
	vm_area_register_early(&vm, PAGE_SIZE);

2163
	for (unit = 0; unit < num_possible_cpus(); unit++) {
2164
		unsigned long unit_addr =
2165
			(unsigned long)vm.addr + unit * ai->unit_size;
2166

T
Tejun Heo 已提交
2167
		for (i = 0; i < unit_pages; i++)
2168 2169 2170
			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));

		/* pte already populated, the following shouldn't fail */
T
Tejun Heo 已提交
2171 2172 2173 2174
		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
				      unit_pages);
		if (rc < 0)
			panic("failed to map percpu area, err=%d\n", rc);
2175

2176 2177 2178 2179 2180 2181 2182 2183 2184
		/*
		 * FIXME: Archs with virtual cache should flush local
		 * cache for the linear mapping here - something
		 * equivalent to flush_cache_vmap() on the local cpu.
		 * flush_cache_vmap() can't be used as most supporting
		 * data structures are not set up yet.
		 */

		/* copy static data */
2185
		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2186 2187 2188
	}

	/* we're ready, commit */
2189
	pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2190 2191
		unit_pages, psize_str, vm.addr, ai->static_size,
		ai->reserved_size, ai->dyn_size);
2192

T
Tejun Heo 已提交
2193
	rc = pcpu_setup_first_chunk(ai, vm.addr);
2194 2195 2196 2197
	goto out_free_ar;

enomem:
	while (--j >= 0)
T
Tejun Heo 已提交
2198
		free_fn(page_address(pages[j]), PAGE_SIZE);
T
Tejun Heo 已提交
2199
	rc = -ENOMEM;
2200
out_free_ar:
2201
	memblock_free_early(__pa(pages), pages_size);
2202
	pcpu_free_alloc_info(ai);
T
Tejun Heo 已提交
2203
	return rc;
2204
}
2205
#endif /* BUILD_PAGE_FIRST_CHUNK */
2206

2207
#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
2208
/*
2209
 * Generic SMP percpu area setup.
2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
 *
 * The embedding helper is used because its behavior closely resembles
 * the original non-dynamic generic percpu area setup.  This is
 * important because many archs have addressing restrictions and might
 * fail if the percpu area is located far away from the previous
 * location.  As an added bonus, in non-NUMA cases, embedding is
 * generally a good idea TLB-wise because percpu area can piggy back
 * on the physical linear memory mapping which uses large page
 * mappings on applicable archs.
 */
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);

2223 2224 2225
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
				       size_t align)
{
2226 2227
	return  memblock_virt_alloc_from_nopanic(
			size, align, __pa(MAX_DMA_ADDRESS));
2228
}
2229

2230 2231
static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
{
2232
	memblock_free_early(__pa(ptr), size);
2233 2234
}

2235 2236 2237 2238
void __init setup_per_cpu_areas(void)
{
	unsigned long delta;
	unsigned int cpu;
T
Tejun Heo 已提交
2239
	int rc;
2240 2241 2242 2243 2244

	/*
	 * Always reserve area for module percpu variables.  That's
	 * what the legacy allocator did.
	 */
T
Tejun Heo 已提交
2245
	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2246 2247
				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
T
Tejun Heo 已提交
2248
	if (rc < 0)
2249
		panic("Failed to initialize percpu areas.");
2250 2251 2252

	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
	for_each_possible_cpu(cpu)
T
Tejun Heo 已提交
2253
		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2254
}
2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */

#else	/* CONFIG_SMP */

/*
 * UP percpu area setup.
 *
 * UP always uses km-based percpu allocator with identity mapping.
 * Static percpu variables are indistinguishable from the usual static
 * variables and don't require any special preparation.
 */
void __init setup_per_cpu_areas(void)
{
	const size_t unit_size =
		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
					 PERCPU_DYNAMIC_RESERVE));
	struct pcpu_alloc_info *ai;
	void *fc;

	ai = pcpu_alloc_alloc_info(1, 1);
2275 2276 2277
	fc = memblock_virt_alloc_from_nopanic(unit_size,
					      PAGE_SIZE,
					      __pa(MAX_DMA_ADDRESS));
2278 2279
	if (!ai || !fc)
		panic("Failed to allocate memory for percpu areas.");
2280 2281
	/* kmemleak tracks the percpu allocations separately */
	kmemleak_free(fc);
2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294

	ai->dyn_size = unit_size;
	ai->unit_size = unit_size;
	ai->atom_size = unit_size;
	ai->alloc_size = unit_size;
	ai->groups[0].nr_units = 1;
	ai->groups[0].cpu_map[0] = 0;

	if (pcpu_setup_first_chunk(ai, fc) < 0)
		panic("Failed to initialize percpu areas.");
}

#endif	/* CONFIG_SMP */
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315

/*
 * First and reserved chunks are initialized with temporary allocation
 * map in initdata so that they can be used before slab is online.
 * This function is called after slab is brought up and replaces those
 * with properly allocated maps.
 */
void __init percpu_init_late(void)
{
	struct pcpu_chunk *target_chunks[] =
		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
	struct pcpu_chunk *chunk;
	unsigned long flags;
	int i;

	for (i = 0; (chunk = target_chunks[i]); i++) {
		int *map;
		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);

		BUILD_BUG_ON(size > PAGE_SIZE);

2316
		map = pcpu_mem_zalloc(size);
2317 2318 2319 2320 2321 2322 2323 2324
		BUG_ON(!map);

		spin_lock_irqsave(&pcpu_lock, flags);
		memcpy(map, chunk->map, size);
		chunk->map = map;
		spin_unlock_irqrestore(&pcpu_lock, flags);
	}
}
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336

/*
 * Percpu allocator is initialized early during boot when neither slab or
 * workqueue is available.  Plug async management until everything is up
 * and running.
 */
static int __init percpu_enable_async(void)
{
	pcpu_async_enabled = true;
	return 0;
}
subsys_initcall(percpu_enable_async);