percpu.c 68.5 KB
Newer Older
1
/*
2
 * mm/percpu.c - percpu memory allocator
3 4 5 6
 *
 * Copyright (C) 2009		SUSE Linux Products GmbH
 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
 *
7
 * This file is released under the GPLv2 license.
8
 *
9 10 11 12
 * The percpu allocator handles both static and dynamic areas.  Percpu
 * areas are allocated in chunks which are divided into units.  There is
 * a 1-to-1 mapping for units to possible cpus.  These units are grouped
 * based on NUMA properties of the machine.
13 14 15 16 17 18
 *
 *  c0                           c1                         c2
 *  -------------------          -------------------        ------------
 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
 *  -------------------  ......  -------------------  ....  ------------
 *
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 * Allocation is done by offsets into a unit's address space.  Ie., an
 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
 * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
 * and even sparse.  Access is handled by configuring percpu base
 * registers according to the cpu to unit mappings and offsetting the
 * base address using pcpu_unit_size.
 *
 * There is special consideration for the first chunk which must handle
 * the static percpu variables in the kernel image as allocation services
 * are not online yet.  In short, the first chunk is structure like so:
 *
 *                  <Static | [Reserved] | Dynamic>
 *
 * The static data is copied from the original section managed by the
 * linker.  The reserved section, if non-zero, primarily manages static
 * percpu variables from kernel modules.  Finally, the dynamic section
 * takes care of normal allocations.
36 37 38 39 40 41
 *
 * Allocation state in each chunk is kept using an array of integers
 * on chunk->map.  A positive value in the map represents a free
 * region and negative allocated.  Allocation inside a chunk is done
 * by scanning this map sequentially and serving the first matching
 * entry.  This is mostly copied from the percpu_modalloc() allocator.
42 43
 * Chunks can be determined from the address using the index field
 * in the page struct. The index field contains a pointer to the chunk.
44
 *
45 46 47 48 49 50
 * These chunks are organized into lists according to free_size and
 * tries to allocate from the fullest chunk first. Each chunk maintains
 * a maximum contiguous area size hint which is guaranteed to be equal
 * to or larger than the maximum contiguous area in the chunk. This
 * helps prevent the allocator from iterating over chunks unnecessarily.
 *
51
 * To use this allocator, arch code should do the following:
52 53
 *
 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
54 55
 *   regular address to percpu pointer and back if they need to be
 *   different from the default
56
 *
57 58
 * - use pcpu_setup_first_chunk() during percpu area initialization to
 *   setup the first chunk containing the kernel static percpu area
59 60
 */

61 62
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

63 64
#include <linux/bitmap.h>
#include <linux/bootmem.h>
65
#include <linux/err.h>
66
#include <linux/list.h>
67
#include <linux/log2.h>
68 69 70 71 72 73
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/pfn.h>
#include <linux/slab.h>
74
#include <linux/spinlock.h>
75
#include <linux/vmalloc.h>
76
#include <linux/workqueue.h>
77
#include <linux/kmemleak.h>
78 79

#include <asm/cacheflush.h>
80
#include <asm/sections.h>
81
#include <asm/tlbflush.h>
82
#include <asm/io.h>
83

84 85 86
#define CREATE_TRACE_POINTS
#include <trace/events/percpu.h>

87 88
#include "percpu-internal.h"

89 90
#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
91 92
#define PCPU_ATOMIC_MAP_MARGIN_LOW	32
#define PCPU_ATOMIC_MAP_MARGIN_HIGH	64
93 94
#define PCPU_EMPTY_POP_PAGES_LOW	2
#define PCPU_EMPTY_POP_PAGES_HIGH	4
95

96
#ifdef CONFIG_SMP
97 98 99
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
#ifndef __addr_to_pcpu_ptr
#define __addr_to_pcpu_ptr(addr)					\
100 101 102
	(void __percpu *)((unsigned long)(addr) -			\
			  (unsigned long)pcpu_base_addr	+		\
			  (unsigned long)__per_cpu_start)
103 104 105
#endif
#ifndef __pcpu_ptr_to_addr
#define __pcpu_ptr_to_addr(ptr)						\
106 107 108
	(void __force *)((unsigned long)(ptr) +				\
			 (unsigned long)pcpu_base_addr -		\
			 (unsigned long)__per_cpu_start)
109
#endif
110 111 112 113 114
#else	/* CONFIG_SMP */
/* on UP, it's always identity mapped */
#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
#endif	/* CONFIG_SMP */
115

116 117 118 119
static int pcpu_unit_pages __ro_after_init;
static int pcpu_unit_size __ro_after_init;
static int pcpu_nr_units __ro_after_init;
static int pcpu_atom_size __ro_after_init;
120
int pcpu_nr_slots __ro_after_init;
121
static size_t pcpu_chunk_struct_size __ro_after_init;
122

T
Tejun Heo 已提交
123
/* cpus with the lowest and highest unit addresses */
124 125
static unsigned int pcpu_low_unit_cpu __ro_after_init;
static unsigned int pcpu_high_unit_cpu __ro_after_init;
126

127
/* the address of the first chunk which starts with the kernel static area */
128
void *pcpu_base_addr __ro_after_init;
129 130
EXPORT_SYMBOL_GPL(pcpu_base_addr);

131 132
static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
133

134
/* group information, used for vm allocation */
135 136 137
static int pcpu_nr_groups __ro_after_init;
static const unsigned long *pcpu_group_offsets __ro_after_init;
static const size_t *pcpu_group_sizes __ro_after_init;
138

139 140 141 142 143
/*
 * The first chunk which always exists.  Note that unlike other
 * chunks, this one can be allocated and mapped in several different
 * ways and thus often doesn't live in the vmalloc area.
 */
144
struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
145 146 147

/*
 * Optional reserved chunk.  This chunk reserves part of the first
148 149
 * chunk and serves it for reserved allocations.  When the reserved
 * region doesn't exist, the following variable is NULL.
150
 */
151
struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
152

153
DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
154
static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
155

156
struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
157

158 159 160
/* chunks which need their map areas extended, protected by pcpu_lock */
static LIST_HEAD(pcpu_map_extend_chunks);

161 162 163 164
/*
 * The number of empty populated pages, protected by pcpu_lock.  The
 * reserved chunk doesn't contribute to the count.
 */
165
int pcpu_nr_empty_pop_pages;
166

167 168 169 170 171 172
/*
 * Balance work is used to populate or destroy chunks asynchronously.  We
 * try to keep the number of populated free pages between
 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
 * empty chunk.
 */
173 174
static void pcpu_balance_workfn(struct work_struct *work);
static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
175 176 177 178 179 180 181 182
static bool pcpu_async_enabled __read_mostly;
static bool pcpu_atomic_alloc_failed;

static void pcpu_schedule_balance_work(void)
{
	if (pcpu_async_enabled)
		schedule_work(&pcpu_balance_work);
}
183

184
/**
185 186 187
 * pcpu_addr_in_chunk - check if the address is served from this chunk
 * @chunk: chunk of interest
 * @addr: percpu address
188 189
 *
 * RETURNS:
190
 * True if the address is served from this chunk.
191
 */
192
static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
193
{
194 195
	void *start_addr, *end_addr;

196
	if (!chunk)
197
		return false;
198

199 200 201
	start_addr = chunk->base_addr + chunk->start_offset;
	end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
		   chunk->end_offset;
202 203

	return addr >= start_addr && addr < end_addr;
204 205
}

206
static int __pcpu_size_to_slot(int size)
207
{
T
Tejun Heo 已提交
208
	int highbit = fls(size);	/* size is in bytes */
209 210 211
	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
}

212 213 214 215 216 217 218
static int pcpu_size_to_slot(int size)
{
	if (size == pcpu_unit_size)
		return pcpu_nr_slots - 1;
	return __pcpu_size_to_slot(size);
}

219 220 221 222 223 224 225 226
static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
{
	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
		return 0;

	return pcpu_size_to_slot(chunk->free_size);
}

227 228 229 230 231 232 233 234 235 236 237 238 239
/* set the pointer to a chunk in a page struct */
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
{
	page->index = (unsigned long)pcpu;
}

/* obtain pointer to a chunk from a page struct */
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
{
	return (struct pcpu_chunk *)page->index;
}

static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
240
{
241
	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
242 243
}

244 245 246 247 248
static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
{
	return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
}

249 250
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
				     unsigned int cpu, int page_idx)
251
{
252 253
	return (unsigned long)chunk->base_addr +
	       pcpu_unit_page_offset(cpu, page_idx);
254 255
}

256 257
static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
					   int *rs, int *re, int end)
T
Tejun Heo 已提交
258 259 260 261 262
{
	*rs = find_next_zero_bit(chunk->populated, end, *rs);
	*re = find_next_bit(chunk->populated, end, *rs + 1);
}

263 264
static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
					 int *rs, int *re, int end)
T
Tejun Heo 已提交
265 266 267 268 269 270 271
{
	*rs = find_next_bit(chunk->populated, end, *rs);
	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
}

/*
 * (Un)populated page region iterators.  Iterate over (un)populated
272
 * page regions between @start and @end in @chunk.  @rs and @re should
T
Tejun Heo 已提交
273 274 275 276 277 278 279 280 281 282 283 284 285
 * be integer variables and will be set to start and end page index of
 * the current region.
 */
#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))

#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))

286
/**
287
 * pcpu_mem_zalloc - allocate memory
288
 * @size: bytes to allocate
289
 *
290
 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
291
 * kzalloc() is used; otherwise, vzalloc() is used.  The returned
292
 * memory is always zeroed.
293
 *
294 295 296
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
297
 * RETURNS:
298
 * Pointer to the allocated area on success, NULL on failure.
299
 */
300
static void *pcpu_mem_zalloc(size_t size)
301
{
302 303 304
	if (WARN_ON_ONCE(!slab_is_available()))
		return NULL;

305 306
	if (size <= PAGE_SIZE)
		return kzalloc(size, GFP_KERNEL);
307 308
	else
		return vzalloc(size);
309
}
310

311 312 313 314
/**
 * pcpu_mem_free - free memory
 * @ptr: memory to free
 *
315
 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
316
 */
317
static void pcpu_mem_free(void *ptr)
318
{
319
	kvfree(ptr);
320 321
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
/**
 * pcpu_count_occupied_pages - count the number of pages an area occupies
 * @chunk: chunk of interest
 * @i: index of the area in question
 *
 * Count the number of pages chunk's @i'th area occupies.  When the area's
 * start and/or end address isn't aligned to page boundary, the straddled
 * page is included in the count iff the rest of the page is free.
 */
static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
{
	int off = chunk->map[i] & ~1;
	int end = chunk->map[i + 1] & ~1;

	if (!PAGE_ALIGNED(off) && i > 0) {
		int prev = chunk->map[i - 1];

		if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
			off = round_down(off, PAGE_SIZE);
	}

	if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
		int next = chunk->map[i + 1];
		int nend = chunk->map[i + 2] & ~1;

		if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
			end = round_up(end, PAGE_SIZE);
	}

	return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
}

354 355 356 357 358 359 360
/**
 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 * @chunk: chunk of interest
 * @oslot: the previous slot it was on
 *
 * This function is called after an allocation or free changed @chunk.
 * New slot according to the changed state is determined and @chunk is
361 362
 * moved to the slot.  Note that the reserved chunk is never put on
 * chunk slots.
363 364 365
 *
 * CONTEXT:
 * pcpu_lock.
366 367 368 369 370
 */
static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
{
	int nslot = pcpu_chunk_slot(chunk);

371
	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
372 373 374 375 376 377 378
		if (oslot < nslot)
			list_move(&chunk->list, &pcpu_slot[nslot]);
		else
			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
	}
}

379
/**
380 381
 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 * @chunk: chunk of interest
382
 * @is_atomic: the allocation context
383
 *
384 385 386 387 388 389
 * Determine whether area map of @chunk needs to be extended.  If
 * @is_atomic, only the amount necessary for a new allocation is
 * considered; however, async extension is scheduled if the left amount is
 * low.  If !@is_atomic, it aims for more empty space.  Combined, this
 * ensures that the map is likely to have enough available space to
 * accomodate atomic allocations which can't extend maps directly.
390
 *
391
 * CONTEXT:
392
 * pcpu_lock.
393
 *
394
 * RETURNS:
395 396
 * New target map allocation length if extension is necessary, 0
 * otherwise.
397
 */
398
static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
399
{
400 401
	int margin, new_alloc;

402 403
	lockdep_assert_held(&pcpu_lock);

404 405
	if (is_atomic) {
		margin = 3;
406

407
		if (chunk->map_alloc <
408 409 410 411 412 413 414
		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
			if (list_empty(&chunk->map_extend_list)) {
				list_add_tail(&chunk->map_extend_list,
					      &pcpu_map_extend_chunks);
				pcpu_schedule_balance_work();
			}
		}
415 416 417 418 419
	} else {
		margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
	}

	if (chunk->map_alloc >= chunk->map_used + margin)
420 421 422
		return 0;

	new_alloc = PCPU_DFL_MAP_ALLOC;
423
	while (new_alloc < chunk->map_used + margin)
424 425
		new_alloc *= 2;

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	return new_alloc;
}

/**
 * pcpu_extend_area_map - extend area map of a chunk
 * @chunk: chunk of interest
 * @new_alloc: new target allocation length of the area map
 *
 * Extend area map of @chunk to have @new_alloc entries.
 *
 * CONTEXT:
 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
{
	int *old = NULL, *new = NULL;
	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
	unsigned long flags;

448 449
	lockdep_assert_held(&pcpu_alloc_mutex);

450
	new = pcpu_mem_zalloc(new_size);
451
	if (!new)
452
		return -ENOMEM;
453

454 455 456 457 458
	/* acquire pcpu_lock and switch to new area map */
	spin_lock_irqsave(&pcpu_lock, flags);

	if (new_alloc <= chunk->map_alloc)
		goto out_unlock;
459

460
	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
461 462 463
	old = chunk->map;

	memcpy(new, old, old_size);
464 465 466

	chunk->map_alloc = new_alloc;
	chunk->map = new;
467 468 469 470 471 472 473 474 475
	new = NULL;

out_unlock:
	spin_unlock_irqrestore(&pcpu_lock, flags);

	/*
	 * pcpu_mem_free() might end up calling vfree() which uses
	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
	 */
476 477
	pcpu_mem_free(old);
	pcpu_mem_free(new);
478

479 480 481
	return 0;
}

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
/**
 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
 * @chunk: chunk the candidate area belongs to
 * @off: the offset to the start of the candidate area
 * @this_size: the size of the candidate area
 * @size: the size of the target allocation
 * @align: the alignment of the target allocation
 * @pop_only: only allocate from already populated region
 *
 * We're trying to allocate @size bytes aligned at @align.  @chunk's area
 * at @off sized @this_size is a candidate.  This function determines
 * whether the target allocation fits in the candidate area and returns the
 * number of bytes to pad after @off.  If the target area doesn't fit, -1
 * is returned.
 *
 * If @pop_only is %true, this function only considers the already
 * populated part of the candidate area.
 */
static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
			    int size, int align, bool pop_only)
{
	int cand_off = off;

	while (true) {
		int head = ALIGN(cand_off, align) - off;
		int page_start, page_end, rs, re;

		if (this_size < head + size)
			return -1;

		if (!pop_only)
			return head;

		/*
		 * If the first unpopulated page is beyond the end of the
		 * allocation, the whole allocation is populated;
		 * otherwise, retry from the end of the unpopulated area.
		 */
		page_start = PFN_DOWN(head + off);
		page_end = PFN_UP(head + off + size);

		rs = page_start;
		pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
		if (rs >= page_end)
			return head;
		cand_off = re * PAGE_SIZE;
	}
}

531 532 533
/**
 * pcpu_alloc_area - allocate area from a pcpu_chunk
 * @chunk: chunk of interest
T
Tejun Heo 已提交
534
 * @size: wanted size in bytes
535
 * @align: wanted align
536
 * @pop_only: allocate only from the populated area
537
 * @occ_pages_p: out param for the number of pages the area occupies
538 539 540 541 542
 *
 * Try to allocate @size bytes area aligned at @align from @chunk.
 * Note that this function only allocates the offset.  It doesn't
 * populate or map the area.
 *
543 544
 * @chunk->map must have at least two free slots.
 *
545 546 547
 * CONTEXT:
 * pcpu_lock.
 *
548
 * RETURNS:
549 550
 * Allocated offset in @chunk on success, -1 if no matching area is
 * found.
551
 */
552
static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
553
			   bool pop_only, int *occ_pages_p)
554 555 556 557
{
	int oslot = pcpu_chunk_slot(chunk);
	int max_contig = 0;
	int i, off;
A
Al Viro 已提交
558
	bool seen_free = false;
559
	int *p;
560

A
Al Viro 已提交
561
	for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
562
		int head, tail;
563 564 565 566 567
		int this_size;

		off = *p;
		if (off & 1)
			continue;
568

569
		this_size = (p[1] & ~1) - off;
570 571 572 573

		head = pcpu_fit_in_area(chunk, off, this_size, size, align,
					pop_only);
		if (head < 0) {
A
Al Viro 已提交
574 575 576 577
			if (!seen_free) {
				chunk->first_free = i;
				seen_free = true;
			}
578
			max_contig = max(this_size, max_contig);
579 580 581 582 583 584 585 586 587
			continue;
		}

		/*
		 * If head is small or the previous block is free,
		 * merge'em.  Note that 'small' is defined as smaller
		 * than sizeof(int), which is very small but isn't too
		 * uncommon for percpu allocations.
		 */
588
		if (head && (head < sizeof(int) || !(p[-1] & 1))) {
589
			*p = off += head;
590
			if (p[-1] & 1)
591
				chunk->free_size -= head;
592 593
			else
				max_contig = max(*p - p[-1], max_contig);
594
			this_size -= head;
595 596 597 598
			head = 0;
		}

		/* if tail is small, just keep it around */
599 600
		tail = this_size - head - size;
		if (tail < sizeof(int)) {
601
			tail = 0;
602 603
			size = this_size - head;
		}
604 605 606

		/* split if warranted */
		if (head || tail) {
607 608 609
			int nr_extra = !!head + !!tail;

			/* insert new subblocks */
610
			memmove(p + nr_extra + 1, p + 1,
611 612 613
				sizeof(chunk->map[0]) * (chunk->map_used - i));
			chunk->map_used += nr_extra;

614
			if (head) {
A
Al Viro 已提交
615 616 617 618
				if (!seen_free) {
					chunk->first_free = i;
					seen_free = true;
				}
619 620
				*++p = off += head;
				++i;
621 622 623
				max_contig = max(head, max_contig);
			}
			if (tail) {
624
				p[1] = off + size;
625
				max_contig = max(tail, max_contig);
626 627 628
			}
		}

A
Al Viro 已提交
629 630 631
		if (!seen_free)
			chunk->first_free = i + 1;

632
		/* update hint and mark allocated */
633
		if (i + 1 == chunk->map_used)
634 635 636 637 638
			chunk->contig_hint = max_contig; /* fully scanned */
		else
			chunk->contig_hint = max(chunk->contig_hint,
						 max_contig);

639 640
		chunk->free_size -= size;
		*p |= 1;
641

642
		*occ_pages_p = pcpu_count_occupied_pages(chunk, i);
643 644 645 646 647 648 649
		pcpu_chunk_relocate(chunk, oslot);
		return off;
	}

	chunk->contig_hint = max_contig;	/* fully scanned */
	pcpu_chunk_relocate(chunk, oslot);

650 651
	/* tell the upper layer that this chunk has no matching area */
	return -1;
652 653 654 655 656 657
}

/**
 * pcpu_free_area - free area to a pcpu_chunk
 * @chunk: chunk of interest
 * @freeme: offset of area to free
658
 * @occ_pages_p: out param for the number of pages the area occupies
659 660 661 662
 *
 * Free area starting from @freeme to @chunk.  Note that this function
 * only modifies the allocation map.  It doesn't depopulate or unmap
 * the area.
663 664 665
 *
 * CONTEXT:
 * pcpu_lock.
666
 */
667 668
static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
			   int *occ_pages_p)
669 670
{
	int oslot = pcpu_chunk_slot(chunk);
671 672 673 674 675
	int off = 0;
	unsigned i, j;
	int to_free = 0;
	int *p;

676
	lockdep_assert_held(&pcpu_lock);
677
	pcpu_stats_area_dealloc(chunk);
678

679 680 681 682 683 684 685 686 687 688 689 690 691 692
	freeme |= 1;	/* we are searching for <given offset, in use> pair */

	i = 0;
	j = chunk->map_used;
	while (i != j) {
		unsigned k = (i + j) / 2;
		off = chunk->map[k];
		if (off < freeme)
			i = k + 1;
		else if (off > freeme)
			j = k;
		else
			i = j = k;
	}
693 694
	BUG_ON(off != freeme);

A
Al Viro 已提交
695 696 697
	if (i < chunk->first_free)
		chunk->first_free = i;

698 699 700
	p = chunk->map + i;
	*p = off &= ~1;
	chunk->free_size += (p[1] & ~1) - off;
701

702 703
	*occ_pages_p = pcpu_count_occupied_pages(chunk, i);

704 705 706
	/* merge with next? */
	if (!(p[1] & 1))
		to_free++;
707
	/* merge with previous? */
708 709
	if (i > 0 && !(p[-1] & 1)) {
		to_free++;
710
		i--;
711
		p--;
712
	}
713 714 715 716
	if (to_free) {
		chunk->map_used -= to_free;
		memmove(p + 1, p + 1 + to_free,
			(chunk->map_used - i) * sizeof(chunk->map[0]));
717 718
	}

719
	chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
720 721 722
	pcpu_chunk_relocate(chunk, oslot);
}

723
static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
724 725 726 727 728
							 int map_size,
							 int *map,
							 int init_map_size)
{
	struct pcpu_chunk *chunk;
729 730 731 732 733 734 735
	unsigned long aligned_addr;
	int start_offset, region_size;

	/* region calculations */
	aligned_addr = tmp_addr & PAGE_MASK;

	start_offset = tmp_addr - aligned_addr;
736 737

	region_size = PFN_ALIGN(start_offset + map_size);
738

739
	/* allocate chunk */
740 741 742
	chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) +
				    BITS_TO_LONGS(region_size >> PAGE_SHIFT),
				    0);
743

744 745
	INIT_LIST_HEAD(&chunk->list);
	INIT_LIST_HEAD(&chunk->map_extend_list);
746 747

	chunk->base_addr = (void *)aligned_addr;
748
	chunk->start_offset = start_offset;
749
	chunk->end_offset = region_size - chunk->start_offset - map_size;
750

751
	chunk->nr_pages = region_size >> PAGE_SHIFT;
752

753 754 755 756 757
	chunk->map = map;
	chunk->map_alloc = init_map_size;

	/* manage populated page bitmap */
	chunk->immutable = true;
758 759
	bitmap_fill(chunk->populated, chunk->nr_pages);
	chunk->nr_populated = chunk->nr_pages;
760
	chunk->nr_empty_pop_pages = chunk->nr_pages;
761 762

	chunk->contig_hint = chunk->free_size = map_size;
763 764 765

	if (chunk->start_offset) {
		/* hide the beginning of the bitmap */
766 767
		chunk->nr_empty_pop_pages--;

768 769 770 771 772 773 774 775
		chunk->map[0] = 1;
		chunk->map[1] = chunk->start_offset;
		chunk->map_used = 1;
	}

	/* set chunk's free region */
	chunk->map[++chunk->map_used] =
		(chunk->start_offset + chunk->free_size) | 1;
776

777 778
	if (chunk->end_offset) {
		/* hide the end of the bitmap */
779 780
		chunk->nr_empty_pop_pages--;

781 782 783
		chunk->map[++chunk->map_used] = region_size | 1;
	}

784 785 786
	return chunk;
}

787 788 789 790
static struct pcpu_chunk *pcpu_alloc_chunk(void)
{
	struct pcpu_chunk *chunk;

791
	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
792 793 794
	if (!chunk)
		return NULL;

795 796
	chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
						sizeof(chunk->map[0]));
797
	if (!chunk->map) {
798
		pcpu_mem_free(chunk);
799 800 801 802
		return NULL;
	}

	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
803 804 805
	chunk->map[0] = 0;
	chunk->map[1] = pcpu_unit_size | 1;
	chunk->map_used = 1;
806 807

	INIT_LIST_HEAD(&chunk->list);
808
	INIT_LIST_HEAD(&chunk->map_extend_list);
809 810 811
	chunk->free_size = pcpu_unit_size;
	chunk->contig_hint = pcpu_unit_size;

812 813
	chunk->nr_pages = pcpu_unit_pages;

814 815 816 817 818 819 820
	return chunk;
}

static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
	if (!chunk)
		return;
821 822
	pcpu_mem_free(chunk->map);
	pcpu_mem_free(chunk);
823 824
}

825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
/**
 * pcpu_chunk_populated - post-population bookkeeping
 * @chunk: pcpu_chunk which got populated
 * @page_start: the start page
 * @page_end: the end page
 *
 * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
 * the bookkeeping information accordingly.  Must be called after each
 * successful population.
 */
static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
				 int page_start, int page_end)
{
	int nr = page_end - page_start;

	lockdep_assert_held(&pcpu_lock);

	bitmap_set(chunk->populated, page_start, nr);
	chunk->nr_populated += nr;
844
	chunk->nr_empty_pop_pages += nr;
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
	pcpu_nr_empty_pop_pages += nr;
}

/**
 * pcpu_chunk_depopulated - post-depopulation bookkeeping
 * @chunk: pcpu_chunk which got depopulated
 * @page_start: the start page
 * @page_end: the end page
 *
 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
 * Update the bookkeeping information accordingly.  Must be called after
 * each successful depopulation.
 */
static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
				   int page_start, int page_end)
{
	int nr = page_end - page_start;

	lockdep_assert_held(&pcpu_lock);

	bitmap_clear(chunk->populated, page_start, nr);
	chunk->nr_populated -= nr;
867
	chunk->nr_empty_pop_pages -= nr;
868 869 870
	pcpu_nr_empty_pop_pages -= nr;
}

871 872 873 874 875 876 877 878 879 880 881 882 883 884
/*
 * Chunk management implementation.
 *
 * To allow different implementations, chunk alloc/free and
 * [de]population are implemented in a separate file which is pulled
 * into this file and compiled together.  The following functions
 * should be implemented.
 *
 * pcpu_populate_chunk		- populate the specified range of a chunk
 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
 * pcpu_create_chunk		- create a new chunk
 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
 * pcpu_addr_to_page		- translate address to physical address
 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
885
 */
886 887 888 889 890 891
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
static struct pcpu_chunk *pcpu_create_chunk(void);
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
static struct page *pcpu_addr_to_page(void *addr);
static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
892

893 894 895
#ifdef CONFIG_NEED_PER_CPU_KM
#include "percpu-km.c"
#else
896
#include "percpu-vm.c"
897
#endif
898

899 900 901 902
/**
 * pcpu_chunk_addr_search - determine chunk containing specified address
 * @addr: address for which the chunk needs to be determined.
 *
903 904 905
 * This is an internal function that handles all but static allocations.
 * Static percpu address values should never be passed into the allocator.
 *
906 907 908 909 910
 * RETURNS:
 * The address of the found chunk.
 */
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
{
911
	/* is it in the dynamic region (first chunk)? */
912
	if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
913
		return pcpu_first_chunk;
914 915

	/* is it in the reserved region? */
916
	if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
917
		return pcpu_reserved_chunk;
918 919 920 921 922 923 924 925 926

	/*
	 * The address is relative to unit0 which might be unused and
	 * thus unmapped.  Offset the address to the unit space of the
	 * current processor before looking it up in the vmalloc
	 * space.  Note that any possible cpu id can be used here, so
	 * there's no need to worry about preemption or cpu hotplug.
	 */
	addr += pcpu_unit_offsets[raw_smp_processor_id()];
927
	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
928 929
}

930
/**
931
 * pcpu_alloc - the percpu allocator
T
Tejun Heo 已提交
932
 * @size: size of area to allocate in bytes
933
 * @align: alignment of area (max PAGE_SIZE)
934
 * @reserved: allocate from the reserved chunk if available
935
 * @gfp: allocation flags
936
 *
937 938
 * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
 * contain %GFP_KERNEL, the allocation is atomic.
939 940 941 942
 *
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
943 944
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
				 gfp_t gfp)
945
{
946
	static int warn_limit = 10;
947
	struct pcpu_chunk *chunk;
948
	const char *err;
949
	bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
950
	int occ_pages = 0;
T
Tejun Heo 已提交
951
	int slot, off, new_alloc, cpu, ret;
952
	unsigned long flags;
953
	void __percpu *ptr;
954

955 956
	/*
	 * We want the lowest bit of offset available for in-use/free
V
Viro 已提交
957
	 * indicator, so force >= 16bit alignment and make size even.
958 959 960 961
	 */
	if (unlikely(align < 2))
		align = 2;

962
	size = ALIGN(size, 2);
V
Viro 已提交
963

964 965
	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
		     !is_power_of_2(align))) {
J
Joe Perches 已提交
966 967
		WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
		     size, align);
968 969 970
		return NULL;
	}

971 972 973
	if (!is_atomic)
		mutex_lock(&pcpu_alloc_mutex);

974
	spin_lock_irqsave(&pcpu_lock, flags);
975

976 977 978
	/* serve reserved allocations from the reserved chunk if available */
	if (reserved && pcpu_reserved_chunk) {
		chunk = pcpu_reserved_chunk;
979 980 981

		if (size > chunk->contig_hint) {
			err = "alloc from reserved chunk failed";
982
			goto fail_unlock;
983
		}
984

985
		while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
986
			spin_unlock_irqrestore(&pcpu_lock, flags);
987 988
			if (is_atomic ||
			    pcpu_extend_area_map(chunk, new_alloc) < 0) {
989
				err = "failed to extend area map of reserved chunk";
T
Tejun Heo 已提交
990
				goto fail;
991 992 993 994
			}
			spin_lock_irqsave(&pcpu_lock, flags);
		}

995 996
		off = pcpu_alloc_area(chunk, size, align, is_atomic,
				      &occ_pages);
997 998
		if (off >= 0)
			goto area_found;
999

1000
		err = "alloc from reserved chunk failed";
1001
		goto fail_unlock;
1002 1003
	}

1004
restart:
1005
	/* search through normal chunks */
1006 1007 1008 1009
	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
			if (size > chunk->contig_hint)
				continue;
1010

1011
			new_alloc = pcpu_need_to_extend(chunk, is_atomic);
1012
			if (new_alloc) {
1013 1014
				if (is_atomic)
					continue;
1015 1016 1017 1018
				spin_unlock_irqrestore(&pcpu_lock, flags);
				if (pcpu_extend_area_map(chunk,
							 new_alloc) < 0) {
					err = "failed to extend area map";
T
Tejun Heo 已提交
1019
					goto fail;
1020 1021 1022 1023 1024 1025 1026
				}
				spin_lock_irqsave(&pcpu_lock, flags);
				/*
				 * pcpu_lock has been dropped, need to
				 * restart cpu_slot list walking.
				 */
				goto restart;
1027 1028
			}

1029 1030
			off = pcpu_alloc_area(chunk, size, align, is_atomic,
					      &occ_pages);
1031 1032 1033 1034 1035
			if (off >= 0)
				goto area_found;
		}
	}

1036
	spin_unlock_irqrestore(&pcpu_lock, flags);
1037

T
Tejun Heo 已提交
1038 1039 1040 1041 1042
	/*
	 * No space left.  Create a new chunk.  We don't want multiple
	 * tasks to create chunks simultaneously.  Serialize and create iff
	 * there's still no empty chunk after grabbing the mutex.
	 */
1043 1044
	if (is_atomic) {
		err = "atomic alloc failed, no space left";
1045
		goto fail;
1046
	}
1047

T
Tejun Heo 已提交
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
		chunk = pcpu_create_chunk();
		if (!chunk) {
			err = "failed to allocate new chunk";
			goto fail;
		}

		spin_lock_irqsave(&pcpu_lock, flags);
		pcpu_chunk_relocate(chunk, -1);
	} else {
		spin_lock_irqsave(&pcpu_lock, flags);
1059
	}
1060 1061

	goto restart;
1062 1063

area_found:
1064
	pcpu_stats_area_alloc(chunk, size);
1065
	spin_unlock_irqrestore(&pcpu_lock, flags);
1066

1067
	/* populate if not all pages are already there */
1068
	if (!is_atomic) {
1069
		int page_start, page_end, rs, re;
1070

1071 1072
		page_start = PFN_DOWN(off);
		page_end = PFN_UP(off + size);
T
Tejun Heo 已提交
1073

1074 1075 1076 1077 1078 1079 1080
		pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
			WARN_ON(chunk->immutable);

			ret = pcpu_populate_chunk(chunk, rs, re);

			spin_lock_irqsave(&pcpu_lock, flags);
			if (ret) {
1081
				pcpu_free_area(chunk, off, &occ_pages);
1082 1083 1084
				err = "failed to populate";
				goto fail_unlock;
			}
1085
			pcpu_chunk_populated(chunk, rs, re);
1086
			spin_unlock_irqrestore(&pcpu_lock, flags);
1087
		}
1088

1089 1090
		mutex_unlock(&pcpu_alloc_mutex);
	}
1091

1092 1093
	if (chunk != pcpu_reserved_chunk) {
		spin_lock_irqsave(&pcpu_lock, flags);
1094
		pcpu_nr_empty_pop_pages -= occ_pages;
1095 1096
		spin_unlock_irqrestore(&pcpu_lock, flags);
	}
1097

1098 1099 1100
	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
		pcpu_schedule_balance_work();

1101 1102 1103 1104
	/* clear the areas and return address relative to base address */
	for_each_possible_cpu(cpu)
		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);

1105
	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1106
	kmemleak_alloc_percpu(ptr, size, gfp);
1107 1108 1109 1110

	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
			chunk->base_addr, off, ptr);

1111
	return ptr;
1112 1113

fail_unlock:
1114
	spin_unlock_irqrestore(&pcpu_lock, flags);
T
Tejun Heo 已提交
1115
fail:
1116 1117
	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);

1118
	if (!is_atomic && warn_limit) {
1119
		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
J
Joe Perches 已提交
1120
			size, align, is_atomic, err);
1121 1122
		dump_stack();
		if (!--warn_limit)
1123
			pr_info("limit reached, disable warning\n");
1124
	}
1125 1126 1127 1128
	if (is_atomic) {
		/* see the flag handling in pcpu_blance_workfn() */
		pcpu_atomic_alloc_failed = true;
		pcpu_schedule_balance_work();
1129 1130
	} else {
		mutex_unlock(&pcpu_alloc_mutex);
1131
	}
1132
	return NULL;
1133
}
1134 1135

/**
1136
 * __alloc_percpu_gfp - allocate dynamic percpu area
1137 1138
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
1139
 * @gfp: allocation flags
1140
 *
1141 1142 1143
 * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
 * be called from any context but is a lot more likely to fail.
1144
 *
1145 1146 1147
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
{
	return pcpu_alloc(size, align, false, gfp);
}
EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);

/**
 * __alloc_percpu - allocate dynamic percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
 */
1161
void __percpu *__alloc_percpu(size_t size, size_t align)
1162
{
1163
	return pcpu_alloc(size, align, false, GFP_KERNEL);
1164
}
1165 1166
EXPORT_SYMBOL_GPL(__alloc_percpu);

1167 1168 1169 1170 1171
/**
 * __alloc_reserved_percpu - allocate reserved percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
1172 1173 1174 1175
 * Allocate zero-filled percpu area of @size bytes aligned at @align
 * from reserved percpu area if arch has set it up; otherwise,
 * allocation is served from the same dynamic area.  Might sleep.
 * Might trigger writeouts.
1176
 *
1177 1178 1179
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
1180 1181 1182
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
1183
void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1184
{
1185
	return pcpu_alloc(size, align, true, GFP_KERNEL);
1186 1187
}

1188
/**
1189
 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1190 1191 1192 1193
 * @work: unused
 *
 * Reclaim all fully free chunks except for the first one.
 */
1194
static void pcpu_balance_workfn(struct work_struct *work)
1195
{
1196 1197
	LIST_HEAD(to_free);
	struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1198
	struct pcpu_chunk *chunk, *next;
1199
	int slot, nr_to_pop, ret;
1200

1201 1202 1203 1204
	/*
	 * There's no reason to keep around multiple unused chunks and VM
	 * areas can be scarce.  Destroy all free chunks except for one.
	 */
1205 1206
	mutex_lock(&pcpu_alloc_mutex);
	spin_lock_irq(&pcpu_lock);
1207

1208
	list_for_each_entry_safe(chunk, next, free_head, list) {
1209 1210 1211
		WARN_ON(chunk->immutable);

		/* spare the first one */
1212
		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1213 1214
			continue;

1215
		list_del_init(&chunk->map_extend_list);
1216
		list_move(&chunk->list, &to_free);
1217 1218
	}

1219
	spin_unlock_irq(&pcpu_lock);
1220

1221
	list_for_each_entry_safe(chunk, next, &to_free, list) {
1222
		int rs, re;
1223

1224
		pcpu_for_each_pop_region(chunk, rs, re, 0, chunk->nr_pages) {
1225
			pcpu_depopulate_chunk(chunk, rs, re);
1226 1227 1228
			spin_lock_irq(&pcpu_lock);
			pcpu_chunk_depopulated(chunk, rs, re);
			spin_unlock_irq(&pcpu_lock);
1229
		}
1230
		pcpu_destroy_chunk(chunk);
1231
	}
T
Tejun Heo 已提交
1232

1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
	/* service chunks which requested async area map extension */
	do {
		int new_alloc = 0;

		spin_lock_irq(&pcpu_lock);

		chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
					struct pcpu_chunk, map_extend_list);
		if (chunk) {
			list_del_init(&chunk->map_extend_list);
			new_alloc = pcpu_need_to_extend(chunk, false);
		}

		spin_unlock_irq(&pcpu_lock);

		if (new_alloc)
			pcpu_extend_area_map(chunk, new_alloc);
	} while (chunk);

1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
	/*
	 * Ensure there are certain number of free populated pages for
	 * atomic allocs.  Fill up from the most packed so that atomic
	 * allocs don't increase fragmentation.  If atomic allocation
	 * failed previously, always populate the maximum amount.  This
	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
	 * failing indefinitely; however, large atomic allocs are not
	 * something we support properly and can be highly unreliable and
	 * inefficient.
	 */
retry_pop:
	if (pcpu_atomic_alloc_failed) {
		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
		/* best effort anyway, don't worry about synchronization */
		pcpu_atomic_alloc_failed = false;
	} else {
		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
				  pcpu_nr_empty_pop_pages,
				  0, PCPU_EMPTY_POP_PAGES_HIGH);
	}

	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
		int nr_unpop = 0, rs, re;

		if (!nr_to_pop)
			break;

		spin_lock_irq(&pcpu_lock);
		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1281
			nr_unpop = chunk->nr_pages - chunk->nr_populated;
1282 1283 1284 1285 1286 1287 1288 1289 1290
			if (nr_unpop)
				break;
		}
		spin_unlock_irq(&pcpu_lock);

		if (!nr_unpop)
			continue;

		/* @chunk can't go away while pcpu_alloc_mutex is held */
1291
		pcpu_for_each_unpop_region(chunk, rs, re, 0, chunk->nr_pages) {
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
			int nr = min(re - rs, nr_to_pop);

			ret = pcpu_populate_chunk(chunk, rs, rs + nr);
			if (!ret) {
				nr_to_pop -= nr;
				spin_lock_irq(&pcpu_lock);
				pcpu_chunk_populated(chunk, rs, rs + nr);
				spin_unlock_irq(&pcpu_lock);
			} else {
				nr_to_pop = 0;
			}

			if (!nr_to_pop)
				break;
		}
	}

	if (nr_to_pop) {
		/* ran out of chunks to populate, create a new one and retry */
		chunk = pcpu_create_chunk();
		if (chunk) {
			spin_lock_irq(&pcpu_lock);
			pcpu_chunk_relocate(chunk, -1);
			spin_unlock_irq(&pcpu_lock);
			goto retry_pop;
		}
	}

T
Tejun Heo 已提交
1320
	mutex_unlock(&pcpu_alloc_mutex);
1321 1322 1323 1324 1325 1326
}

/**
 * free_percpu - free percpu area
 * @ptr: pointer to area to free
 *
1327 1328 1329 1330
 * Free percpu area @ptr.
 *
 * CONTEXT:
 * Can be called from atomic context.
1331
 */
1332
void free_percpu(void __percpu *ptr)
1333
{
1334
	void *addr;
1335
	struct pcpu_chunk *chunk;
1336
	unsigned long flags;
1337
	int off, occ_pages;
1338 1339 1340 1341

	if (!ptr)
		return;

1342 1343
	kmemleak_free_percpu(ptr);

1344 1345
	addr = __pcpu_ptr_to_addr(ptr);

1346
	spin_lock_irqsave(&pcpu_lock, flags);
1347 1348

	chunk = pcpu_chunk_addr_search(addr);
T
Tejun Heo 已提交
1349
	off = addr - chunk->base_addr;
1350

1351 1352 1353 1354
	pcpu_free_area(chunk, off, &occ_pages);

	if (chunk != pcpu_reserved_chunk)
		pcpu_nr_empty_pop_pages += occ_pages;
1355

1356
	/* if there are more than one fully free chunks, wake up grim reaper */
1357 1358 1359
	if (chunk->free_size == pcpu_unit_size) {
		struct pcpu_chunk *pos;

1360
		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1361
			if (pos != chunk) {
1362
				pcpu_schedule_balance_work();
1363 1364 1365 1366
				break;
			}
	}

1367 1368
	trace_percpu_free_percpu(chunk->base_addr, off, ptr);

1369
	spin_unlock_irqrestore(&pcpu_lock, flags);
1370 1371 1372
}
EXPORT_SYMBOL_GPL(free_percpu);

1373
bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
1374
{
1375
#ifdef CONFIG_SMP
1376 1377 1378 1379 1380 1381
	const size_t static_size = __per_cpu_end - __per_cpu_start;
	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
	unsigned int cpu;

	for_each_possible_cpu(cpu) {
		void *start = per_cpu_ptr(base, cpu);
1382
		void *va = (void *)addr;
1383

1384
		if (va >= start && va < start + static_size) {
1385
			if (can_addr) {
1386
				*can_addr = (unsigned long) (va - start);
1387 1388 1389
				*can_addr += (unsigned long)
					per_cpu_ptr(base, get_boot_cpu_id());
			}
1390
			return true;
1391 1392
		}
	}
1393 1394
#endif
	/* on UP, can't distinguish from other static vars, always false */
1395 1396 1397
	return false;
}

1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
/**
 * is_kernel_percpu_address - test whether address is from static percpu area
 * @addr: address to test
 *
 * Test whether @addr belongs to in-kernel static percpu area.  Module
 * static percpu areas are not considered.  For those, use
 * is_module_percpu_address().
 *
 * RETURNS:
 * %true if @addr is from in-kernel static percpu area, %false otherwise.
 */
bool is_kernel_percpu_address(unsigned long addr)
{
	return __is_kernel_percpu_address(addr, NULL);
}

1414 1415 1416 1417 1418 1419 1420 1421 1422
/**
 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
 * @addr: the address to be converted to physical address
 *
 * Given @addr which is dereferenceable address obtained via one of
 * percpu access macros, this function translates it into its physical
 * address.  The caller is responsible for ensuring @addr stays valid
 * until this function finishes.
 *
1423 1424 1425 1426 1427
 * percpu allocator has special setup for the first chunk, which currently
 * supports either embedding in linear address space or vmalloc mapping,
 * and, from the second one, the backing allocator (currently either vm or
 * km) provides translation.
 *
1428
 * The addr can be translated simply without checking if it falls into the
1429 1430 1431 1432 1433
 * first chunk. But the current code reflects better how percpu allocator
 * actually works, and the verification can discover both bugs in percpu
 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
 * code.
 *
1434 1435 1436 1437 1438
 * RETURNS:
 * The physical address for @addr.
 */
phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
1439 1440
	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
	bool in_first_chunk = false;
T
Tejun Heo 已提交
1441
	unsigned long first_low, first_high;
1442 1443 1444
	unsigned int cpu;

	/*
T
Tejun Heo 已提交
1445
	 * The following test on unit_low/high isn't strictly
1446 1447
	 * necessary but will speed up lookups of addresses which
	 * aren't in the first chunk.
1448 1449 1450 1451 1452
	 *
	 * The address check is against full chunk sizes.  pcpu_base_addr
	 * points to the beginning of the first chunk including the
	 * static region.  Assumes good intent as the first chunk may
	 * not be full (ie. < pcpu_unit_pages in size).
1453
	 */
1454 1455 1456 1457
	first_low = (unsigned long)pcpu_base_addr +
		    pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
	first_high = (unsigned long)pcpu_base_addr +
		     pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
T
Tejun Heo 已提交
1458 1459
	if ((unsigned long)addr >= first_low &&
	    (unsigned long)addr < first_high) {
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
		for_each_possible_cpu(cpu) {
			void *start = per_cpu_ptr(base, cpu);

			if (addr >= start && addr < start + pcpu_unit_size) {
				in_first_chunk = true;
				break;
			}
		}
	}

	if (in_first_chunk) {
1471
		if (!is_vmalloc_addr(addr))
1472 1473
			return __pa(addr);
		else
1474 1475
			return page_to_phys(vmalloc_to_page(addr)) +
			       offset_in_page(addr);
1476
	} else
1477 1478
		return page_to_phys(pcpu_addr_to_page(addr)) +
		       offset_in_page(addr);
1479 1480
}

1481
/**
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
 * pcpu_alloc_alloc_info - allocate percpu allocation info
 * @nr_groups: the number of groups
 * @nr_units: the number of units
 *
 * Allocate ai which is large enough for @nr_groups groups containing
 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
 * cpu_map array which is long enough for @nr_units and filled with
 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
 * pointer of other groups.
 *
 * RETURNS:
 * Pointer to the allocated pcpu_alloc_info on success, NULL on
 * failure.
 */
struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
						      int nr_units)
{
	struct pcpu_alloc_info *ai;
	size_t base_size, ai_size;
	void *ptr;
	int unit;

	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
			  __alignof__(ai->groups[0].cpu_map[0]));
	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);

1508
	ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
	if (!ptr)
		return NULL;
	ai = ptr;
	ptr += base_size;

	ai->groups[0].cpu_map = ptr;

	for (unit = 0; unit < nr_units; unit++)
		ai->groups[0].cpu_map[unit] = NR_CPUS;

	ai->nr_groups = nr_groups;
	ai->__ai_size = PFN_ALIGN(ai_size);

	return ai;
}

/**
 * pcpu_free_alloc_info - free percpu allocation info
 * @ai: pcpu_alloc_info to free
 *
 * Free @ai which was allocated by pcpu_alloc_alloc_info().
 */
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
{
1533
	memblock_free_early(__pa(ai), ai->__ai_size);
1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544
}

/**
 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
 * @lvl: loglevel
 * @ai: allocation info to dump
 *
 * Print out information about @ai using loglevel @lvl.
 */
static void pcpu_dump_alloc_info(const char *lvl,
				 const struct pcpu_alloc_info *ai)
1545
{
1546
	int group_width = 1, cpu_width = 1, width;
1547
	char empty_str[] = "--------";
1548 1549 1550 1551 1552 1553 1554
	int alloc = 0, alloc_end = 0;
	int group, v;
	int upa, apl;	/* units per alloc, allocs per line */

	v = ai->nr_groups;
	while (v /= 10)
		group_width++;
1555

1556
	v = num_possible_cpus();
1557
	while (v /= 10)
1558 1559
		cpu_width++;
	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1560

1561 1562 1563
	upa = ai->alloc_size / ai->unit_size;
	width = upa * (cpu_width + 1) + group_width + 3;
	apl = rounddown_pow_of_two(max(60 / width, 1));
1564

1565 1566 1567
	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1568

1569 1570 1571 1572 1573 1574 1575 1576
	for (group = 0; group < ai->nr_groups; group++) {
		const struct pcpu_group_info *gi = &ai->groups[group];
		int unit = 0, unit_end = 0;

		BUG_ON(gi->nr_units % upa);
		for (alloc_end += gi->nr_units / upa;
		     alloc < alloc_end; alloc++) {
			if (!(alloc % apl)) {
1577
				pr_cont("\n");
1578 1579
				printk("%spcpu-alloc: ", lvl);
			}
1580
			pr_cont("[%0*d] ", group_width, group);
1581 1582 1583

			for (unit_end += upa; unit < unit_end; unit++)
				if (gi->cpu_map[unit] != NR_CPUS)
1584 1585
					pr_cont("%0*d ",
						cpu_width, gi->cpu_map[unit]);
1586
				else
1587
					pr_cont("%s ", empty_str);
1588 1589
		}
	}
1590
	pr_cont("\n");
1591 1592
}

1593
/**
1594
 * pcpu_setup_first_chunk - initialize the first percpu chunk
1595
 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1596
 * @base_addr: mapped address
1597 1598 1599
 *
 * Initialize the first percpu chunk which contains the kernel static
 * perpcu area.  This function is to be called from arch percpu area
1600
 * setup path.
1601
 *
1602 1603 1604 1605 1606 1607
 * @ai contains all information necessary to initialize the first
 * chunk and prime the dynamic percpu allocator.
 *
 * @ai->static_size is the size of static percpu area.
 *
 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1608 1609 1610 1611 1612 1613 1614
 * reserve after the static area in the first chunk.  This reserves
 * the first chunk such that it's available only through reserved
 * percpu allocation.  This is primarily used to serve module percpu
 * static areas on architectures where the addressing model has
 * limited offset range for symbol relocations to guarantee module
 * percpu symbols fall inside the relocatable range.
 *
1615 1616 1617
 * @ai->dyn_size determines the number of bytes available for dynamic
 * allocation in the first chunk.  The area between @ai->static_size +
 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1618
 *
1619 1620 1621
 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
 * and equal to or larger than @ai->static_size + @ai->reserved_size +
 * @ai->dyn_size.
1622
 *
1623 1624
 * @ai->atom_size is the allocation atom size and used as alignment
 * for vm areas.
1625
 *
1626 1627 1628 1629 1630 1631 1632 1633 1634
 * @ai->alloc_size is the allocation size and always multiple of
 * @ai->atom_size.  This is larger than @ai->atom_size if
 * @ai->unit_size is larger than @ai->atom_size.
 *
 * @ai->nr_groups and @ai->groups describe virtual memory layout of
 * percpu areas.  Units which should be colocated are put into the
 * same group.  Dynamic VM areas will be allocated according to these
 * groupings.  If @ai->nr_groups is zero, a single group containing
 * all units is assumed.
1635
 *
1636 1637
 * The caller should have mapped the first chunk at @base_addr and
 * copied static data to each unit.
1638
 *
1639 1640 1641 1642 1643 1644 1645
 * The first chunk will always contain a static and a dynamic region.
 * However, the static region is not managed by any chunk.  If the first
 * chunk also contains a reserved region, it is served by two chunks -
 * one for the reserved region and one for the dynamic region.  They
 * share the same vm, but use offset regions in the area allocation map.
 * The chunk serving the dynamic region is circulated in the chunk slots
 * and available for dynamic allocation like any other chunk.
1646
 *
1647
 * RETURNS:
T
Tejun Heo 已提交
1648
 * 0 on success, -errno on failure.
1649
 */
T
Tejun Heo 已提交
1650 1651
int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
				  void *base_addr)
1652
{
1653 1654
	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1655
	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1656
	struct pcpu_chunk *chunk;
1657 1658
	unsigned long *group_offsets;
	size_t *group_sizes;
T
Tejun Heo 已提交
1659
	unsigned long *unit_off;
1660
	unsigned int cpu;
1661 1662
	int *unit_map;
	int group, unit, i;
1663 1664
	int map_size;
	unsigned long tmp_addr;
1665

1666 1667
#define PCPU_SETUP_BUG_ON(cond)	do {					\
	if (unlikely(cond)) {						\
1668 1669
		pr_emerg("failed to initialize, %s\n", #cond);		\
		pr_emerg("cpu_possible_mask=%*pb\n",			\
1670
			 cpumask_pr_args(cpu_possible_mask));		\
1671 1672 1673 1674 1675
		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
		BUG();							\
	}								\
} while (0)

1676
	/* sanity checks */
1677
	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1678
#ifdef CONFIG_SMP
1679
	PCPU_SETUP_BUG_ON(!ai->static_size);
1680
	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
1681
#endif
1682
	PCPU_SETUP_BUG_ON(!base_addr);
1683
	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
1684
	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1685
	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
1686
	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1687
	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1688
	PCPU_SETUP_BUG_ON(!ai->dyn_size);
1689
	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1690

1691
	/* process group information and build config tables accordingly */
1692 1693 1694 1695 1696 1697
	group_offsets = memblock_virt_alloc(ai->nr_groups *
					     sizeof(group_offsets[0]), 0);
	group_sizes = memblock_virt_alloc(ai->nr_groups *
					   sizeof(group_sizes[0]), 0);
	unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
	unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1698

1699
	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1700
		unit_map[cpu] = UINT_MAX;
T
Tejun Heo 已提交
1701 1702 1703

	pcpu_low_unit_cpu = NR_CPUS;
	pcpu_high_unit_cpu = NR_CPUS;
1704

1705 1706
	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
		const struct pcpu_group_info *gi = &ai->groups[group];
1707

1708 1709 1710
		group_offsets[group] = gi->base_offset;
		group_sizes[group] = gi->nr_units * ai->unit_size;

1711 1712 1713 1714
		for (i = 0; i < gi->nr_units; i++) {
			cpu = gi->cpu_map[i];
			if (cpu == NR_CPUS)
				continue;
1715

D
Dan Carpenter 已提交
1716
			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
1717 1718
			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1719

1720
			unit_map[cpu] = unit + i;
T
Tejun Heo 已提交
1721 1722
			unit_off[cpu] = gi->base_offset + i * ai->unit_size;

T
Tejun Heo 已提交
1723 1724 1725 1726 1727 1728 1729
			/* determine low/high unit_cpu */
			if (pcpu_low_unit_cpu == NR_CPUS ||
			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
				pcpu_low_unit_cpu = cpu;
			if (pcpu_high_unit_cpu == NR_CPUS ||
			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
				pcpu_high_unit_cpu = cpu;
1730
		}
1731
	}
1732 1733 1734
	pcpu_nr_units = unit;

	for_each_possible_cpu(cpu)
1735 1736 1737 1738
		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);

	/* we're done parsing the input, undefine BUG macro and dump config */
#undef PCPU_SETUP_BUG_ON
1739
	pcpu_dump_alloc_info(KERN_DEBUG, ai);
1740

1741 1742 1743
	pcpu_nr_groups = ai->nr_groups;
	pcpu_group_offsets = group_offsets;
	pcpu_group_sizes = group_sizes;
1744
	pcpu_unit_map = unit_map;
T
Tejun Heo 已提交
1745
	pcpu_unit_offsets = unit_off;
1746 1747

	/* determine basic parameters */
1748
	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1749
	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1750
	pcpu_atom_size = ai->atom_size;
T
Tejun Heo 已提交
1751 1752
	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1753

1754 1755
	pcpu_stats_save_ai(ai);

1756 1757 1758 1759 1760
	/*
	 * Allocate chunk slots.  The additional last slot is for
	 * empty chunks.
	 */
	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1761 1762
	pcpu_slot = memblock_virt_alloc(
			pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1763 1764 1765
	for (i = 0; i < pcpu_nr_slots; i++)
		INIT_LIST_HEAD(&pcpu_slot[i]);

1766
	/*
1767 1768 1769 1770 1771 1772
	 * Initialize first chunk.
	 * If the reserved_size is non-zero, this initializes the reserved
	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
	 * and the dynamic region is initialized here.  The first chunk,
	 * pcpu_first_chunk, will always point to the chunk that serves
	 * the dynamic region.
1773
	 */
1774
	tmp_addr = (unsigned long)base_addr + ai->static_size;
1775
	map_size = ai->reserved_size ?: ai->dyn_size;
1776
	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size, smap,
1777
				       ARRAY_SIZE(smap));
1778

1779
	/* init dynamic chunk if necessary */
1780
	if (ai->reserved_size) {
1781
		pcpu_reserved_chunk = chunk;
1782

1783 1784
		tmp_addr = (unsigned long)base_addr + ai->static_size +
			   ai->reserved_size;
1785
		map_size = ai->dyn_size;
1786
		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size, dmap,
1787
					       ARRAY_SIZE(dmap));
1788 1789
	}

1790
	/* link the first chunk in */
1791
	pcpu_first_chunk = chunk;
1792
	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
1793
	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1794

1795
	pcpu_stats_chunk_alloc();
1796
	trace_percpu_create_chunk(base_addr);
1797

1798
	/* we're done */
T
Tejun Heo 已提交
1799
	pcpu_base_addr = base_addr;
T
Tejun Heo 已提交
1800
	return 0;
1801
}
1802

1803 1804
#ifdef CONFIG_SMP

1805
const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1806 1807 1808 1809
	[PCPU_FC_AUTO]	= "auto",
	[PCPU_FC_EMBED]	= "embed",
	[PCPU_FC_PAGE]	= "page",
};
1810

1811
enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1812

1813 1814
static int __init percpu_alloc_setup(char *str)
{
1815 1816 1817
	if (!str)
		return -EINVAL;

1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
	if (0)
		/* nada */;
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
	else if (!strcmp(str, "embed"))
		pcpu_chosen_fc = PCPU_FC_EMBED;
#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
	else if (!strcmp(str, "page"))
		pcpu_chosen_fc = PCPU_FC_PAGE;
#endif
	else
1829
		pr_warn("unknown allocator %s specified\n", str);
1830

1831
	return 0;
1832
}
1833
early_param("percpu_alloc", percpu_alloc_setup);
1834

1835 1836 1837 1838 1839
/*
 * pcpu_embed_first_chunk() is used by the generic percpu setup.
 * Build it if needed by the arch config or the generic setup is going
 * to be used.
 */
1840 1841
#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862
#define BUILD_EMBED_FIRST_CHUNK
#endif

/* build pcpu_page_first_chunk() iff needed by the arch config */
#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
#define BUILD_PAGE_FIRST_CHUNK
#endif

/* pcpu_build_alloc_info() is used by both embed and page first chunk */
#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
/**
 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
 * @reserved_size: the size of reserved percpu area in bytes
 * @dyn_size: minimum free size for dynamic allocation in bytes
 * @atom_size: allocation atom size
 * @cpu_distance_fn: callback to determine distance between cpus, optional
 *
 * This function determines grouping of units, their mappings to cpus
 * and other parameters considering needed percpu size, allocation
 * atom size and distances between CPUs.
 *
1863
 * Groups are always multiples of atom size and CPUs which are of
1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
 * LOCAL_DISTANCE both ways are grouped together and share space for
 * units in the same group.  The returned configuration is guaranteed
 * to have CPUs on different nodes on different groups and >=75% usage
 * of allocated virtual address space.
 *
 * RETURNS:
 * On success, pointer to the new allocation_info is returned.  On
 * failure, ERR_PTR value is returned.
 */
static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
				size_t reserved_size, size_t dyn_size,
				size_t atom_size,
				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
{
	static int group_map[NR_CPUS] __initdata;
	static int group_cnt[NR_CPUS] __initdata;
	const size_t static_size = __per_cpu_end - __per_cpu_start;
	int nr_groups = 1, nr_units = 0;
	size_t size_sum, min_unit_size, alloc_size;
	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
	int last_allocs, group, unit;
	unsigned int cpu, tcpu;
	struct pcpu_alloc_info *ai;
	unsigned int *cpu_map;

	/* this function may be called multiple times */
	memset(group_map, 0, sizeof(group_map));
	memset(group_cnt, 0, sizeof(group_cnt));

	/* calculate size_sum and ensure dyn_size is enough for early alloc */
	size_sum = PFN_ALIGN(static_size + reserved_size +
			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
	dyn_size = size_sum - static_size - reserved_size;

	/*
	 * Determine min_unit_size, alloc_size and max_upa such that
	 * alloc_size is multiple of atom_size and is the smallest
L
Lucas De Marchi 已提交
1901
	 * which can accommodate 4k aligned segments which are equal to
1902 1903 1904 1905
	 * or larger than min_unit_size.
	 */
	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);

1906
	/* determine the maximum # of units that can fit in an allocation */
1907 1908
	alloc_size = roundup(min_unit_size, atom_size);
	upa = alloc_size / min_unit_size;
1909
	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
		upa--;
	max_upa = upa;

	/* group cpus according to their proximity */
	for_each_possible_cpu(cpu) {
		group = 0;
	next_group:
		for_each_possible_cpu(tcpu) {
			if (cpu == tcpu)
				break;
			if (group_map[tcpu] == group && cpu_distance_fn &&
			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
				group++;
				nr_groups = max(nr_groups, group + 1);
				goto next_group;
			}
		}
		group_map[cpu] = group;
		group_cnt[group]++;
	}

	/*
1933 1934 1935
	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
	 * Expand the unit_size until we use >= 75% of the units allocated.
	 * Related to atom_size, which could be much larger than the unit_size.
1936 1937 1938 1939 1940
	 */
	last_allocs = INT_MAX;
	for (upa = max_upa; upa; upa--) {
		int allocs = 0, wasted = 0;

1941
		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009
			continue;

		for (group = 0; group < nr_groups; group++) {
			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
			allocs += this_allocs;
			wasted += this_allocs * upa - group_cnt[group];
		}

		/*
		 * Don't accept if wastage is over 1/3.  The
		 * greater-than comparison ensures upa==1 always
		 * passes the following check.
		 */
		if (wasted > num_possible_cpus() / 3)
			continue;

		/* and then don't consume more memory */
		if (allocs > last_allocs)
			break;
		last_allocs = allocs;
		best_upa = upa;
	}
	upa = best_upa;

	/* allocate and fill alloc_info */
	for (group = 0; group < nr_groups; group++)
		nr_units += roundup(group_cnt[group], upa);

	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
	if (!ai)
		return ERR_PTR(-ENOMEM);
	cpu_map = ai->groups[0].cpu_map;

	for (group = 0; group < nr_groups; group++) {
		ai->groups[group].cpu_map = cpu_map;
		cpu_map += roundup(group_cnt[group], upa);
	}

	ai->static_size = static_size;
	ai->reserved_size = reserved_size;
	ai->dyn_size = dyn_size;
	ai->unit_size = alloc_size / upa;
	ai->atom_size = atom_size;
	ai->alloc_size = alloc_size;

	for (group = 0, unit = 0; group_cnt[group]; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];

		/*
		 * Initialize base_offset as if all groups are located
		 * back-to-back.  The caller should update this to
		 * reflect actual allocation.
		 */
		gi->base_offset = unit * ai->unit_size;

		for_each_possible_cpu(cpu)
			if (group_map[cpu] == group)
				gi->cpu_map[gi->nr_units++] = cpu;
		gi->nr_units = roundup(gi->nr_units, upa);
		unit += gi->nr_units;
	}
	BUG_ON(unit != nr_units);

	return ai;
}
#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */

#if defined(BUILD_EMBED_FIRST_CHUNK)
2010 2011 2012
/**
 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
 * @reserved_size: the size of reserved percpu area in bytes
2013
 * @dyn_size: minimum free size for dynamic allocation in bytes
2014 2015 2016
 * @atom_size: allocation atom size
 * @cpu_distance_fn: callback to determine distance between cpus, optional
 * @alloc_fn: function to allocate percpu page
L
Lucas De Marchi 已提交
2017
 * @free_fn: function to free percpu page
2018 2019 2020 2021 2022
 *
 * This is a helper to ease setting up embedded first percpu chunk and
 * can be called where pcpu_setup_first_chunk() is expected.
 *
 * If this function is used to setup the first chunk, it is allocated
2023 2024 2025 2026 2027 2028 2029 2030 2031 2032
 * by calling @alloc_fn and used as-is without being mapped into
 * vmalloc area.  Allocations are always whole multiples of @atom_size
 * aligned to @atom_size.
 *
 * This enables the first chunk to piggy back on the linear physical
 * mapping which often uses larger page size.  Please note that this
 * can result in very sparse cpu->unit mapping on NUMA machines thus
 * requiring large vmalloc address space.  Don't use this allocator if
 * vmalloc space is not orders of magnitude larger than distances
 * between node memory addresses (ie. 32bit NUMA machines).
2033
 *
2034
 * @dyn_size specifies the minimum dynamic area size.
2035 2036
 *
 * If the needed size is smaller than the minimum or specified unit
2037
 * size, the leftover is returned using @free_fn.
2038 2039
 *
 * RETURNS:
T
Tejun Heo 已提交
2040
 * 0 on success, -errno on failure.
2041
 */
2042
int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2043 2044 2045 2046
				  size_t atom_size,
				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
				  pcpu_fc_alloc_fn_t alloc_fn,
				  pcpu_fc_free_fn_t free_fn)
2047
{
2048 2049
	void *base = (void *)ULONG_MAX;
	void **areas = NULL;
2050
	struct pcpu_alloc_info *ai;
2051 2052
	size_t size_sum, areas_size;
	unsigned long max_distance;
2053
	int group, i, highest_group, rc;
2054

2055 2056
	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
				   cpu_distance_fn);
2057 2058
	if (IS_ERR(ai))
		return PTR_ERR(ai);
2059

2060
	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2061
	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
2062

2063
	areas = memblock_virt_alloc_nopanic(areas_size, 0);
2064
	if (!areas) {
T
Tejun Heo 已提交
2065
		rc = -ENOMEM;
2066
		goto out_free;
2067
	}
2068

2069 2070
	/* allocate, copy and determine base address & max_distance */
	highest_group = 0;
2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
	for (group = 0; group < ai->nr_groups; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];
		unsigned int cpu = NR_CPUS;
		void *ptr;

		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
			cpu = gi->cpu_map[i];
		BUG_ON(cpu == NR_CPUS);

		/* allocate space for the whole group */
		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
		if (!ptr) {
			rc = -ENOMEM;
			goto out_free_areas;
		}
2086 2087
		/* kmemleak tracks the percpu allocations separately */
		kmemleak_free(ptr);
2088
		areas[group] = ptr;
2089

2090
		base = min(ptr, base);
2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
		if (ptr > areas[highest_group])
			highest_group = group;
	}
	max_distance = areas[highest_group] - base;
	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;

	/* warn if maximum distance is further than 75% of vmalloc space */
	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
				max_distance, VMALLOC_TOTAL);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
		/* and fail if we have fallback */
		rc = -EINVAL;
		goto out_free_areas;
#endif
2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
	}

	/*
	 * Copy data and free unused parts.  This should happen after all
	 * allocations are complete; otherwise, we may end up with
	 * overlapping groups.
	 */
	for (group = 0; group < ai->nr_groups; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];
		void *ptr = areas[group];
2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126

		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
			if (gi->cpu_map[i] == NR_CPUS) {
				/* unused unit, free whole */
				free_fn(ptr, ai->unit_size);
				continue;
			}
			/* copy and return the unused part */
			memcpy(ptr, __per_cpu_load, ai->static_size);
			free_fn(ptr + size_sum, ai->unit_size - size_sum);
		}
2127
	}
2128

2129
	/* base address is now known, determine group base offsets */
2130
	for (group = 0; group < ai->nr_groups; group++) {
2131
		ai->groups[group].base_offset = areas[group] - base;
2132
	}
2133

2134
	pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2135 2136
		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
		ai->dyn_size, ai->unit_size);
2137

T
Tejun Heo 已提交
2138
	rc = pcpu_setup_first_chunk(ai, base);
2139 2140 2141 2142
	goto out_free;

out_free_areas:
	for (group = 0; group < ai->nr_groups; group++)
2143 2144 2145
		if (areas[group])
			free_fn(areas[group],
				ai->groups[group].nr_units * ai->unit_size);
2146
out_free:
2147
	pcpu_free_alloc_info(ai);
2148
	if (areas)
2149
		memblock_free_early(__pa(areas), areas_size);
T
Tejun Heo 已提交
2150
	return rc;
2151
}
2152
#endif /* BUILD_EMBED_FIRST_CHUNK */
2153

2154
#ifdef BUILD_PAGE_FIRST_CHUNK
2155
/**
2156
 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2157 2158
 * @reserved_size: the size of reserved percpu area in bytes
 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
L
Lucas De Marchi 已提交
2159
 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2160 2161
 * @populate_pte_fn: function to populate pte
 *
2162 2163
 * This is a helper to ease setting up page-remapped first percpu
 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2164 2165 2166 2167 2168
 *
 * This is the basic allocator.  Static percpu area is allocated
 * page-by-page into vmalloc area.
 *
 * RETURNS:
T
Tejun Heo 已提交
2169
 * 0 on success, -errno on failure.
2170
 */
T
Tejun Heo 已提交
2171 2172 2173 2174
int __init pcpu_page_first_chunk(size_t reserved_size,
				 pcpu_fc_alloc_fn_t alloc_fn,
				 pcpu_fc_free_fn_t free_fn,
				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2175
{
2176
	static struct vm_struct vm;
2177
	struct pcpu_alloc_info *ai;
2178
	char psize_str[16];
T
Tejun Heo 已提交
2179
	int unit_pages;
2180
	size_t pages_size;
T
Tejun Heo 已提交
2181
	struct page **pages;
T
Tejun Heo 已提交
2182
	int unit, i, j, rc;
2183 2184
	int upa;
	int nr_g0_units;
2185

2186 2187
	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);

2188
	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2189 2190 2191
	if (IS_ERR(ai))
		return PTR_ERR(ai);
	BUG_ON(ai->nr_groups != 1);
2192 2193 2194 2195 2196 2197
	upa = ai->alloc_size/ai->unit_size;
	nr_g0_units = roundup(num_possible_cpus(), upa);
	if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
		pcpu_free_alloc_info(ai);
		return -EINVAL;
	}
2198 2199

	unit_pages = ai->unit_size >> PAGE_SHIFT;
2200 2201

	/* unaligned allocations can't be freed, round up to page size */
2202 2203
	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
			       sizeof(pages[0]));
2204
	pages = memblock_virt_alloc(pages_size, 0);
2205

2206
	/* allocate pages */
2207
	j = 0;
2208 2209
	for (unit = 0; unit < num_possible_cpus(); unit++) {
		unsigned int cpu = ai->groups[0].cpu_map[unit];
T
Tejun Heo 已提交
2210
		for (i = 0; i < unit_pages; i++) {
2211 2212
			void *ptr;

2213
			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2214
			if (!ptr) {
2215
				pr_warn("failed to allocate %s page for cpu%u\n",
2216
						psize_str, cpu);
2217 2218
				goto enomem;
			}
2219 2220
			/* kmemleak tracks the percpu allocations separately */
			kmemleak_free(ptr);
T
Tejun Heo 已提交
2221
			pages[j++] = virt_to_page(ptr);
2222
		}
2223
	}
2224

2225 2226
	/* allocate vm area, map the pages and copy static data */
	vm.flags = VM_ALLOC;
2227
	vm.size = num_possible_cpus() * ai->unit_size;
2228 2229
	vm_area_register_early(&vm, PAGE_SIZE);

2230
	for (unit = 0; unit < num_possible_cpus(); unit++) {
2231
		unsigned long unit_addr =
2232
			(unsigned long)vm.addr + unit * ai->unit_size;
2233

T
Tejun Heo 已提交
2234
		for (i = 0; i < unit_pages; i++)
2235 2236 2237
			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));

		/* pte already populated, the following shouldn't fail */
T
Tejun Heo 已提交
2238 2239 2240 2241
		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
				      unit_pages);
		if (rc < 0)
			panic("failed to map percpu area, err=%d\n", rc);
2242

2243 2244 2245 2246 2247 2248 2249 2250 2251
		/*
		 * FIXME: Archs with virtual cache should flush local
		 * cache for the linear mapping here - something
		 * equivalent to flush_cache_vmap() on the local cpu.
		 * flush_cache_vmap() can't be used as most supporting
		 * data structures are not set up yet.
		 */

		/* copy static data */
2252
		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2253 2254 2255
	}

	/* we're ready, commit */
2256
	pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2257 2258
		unit_pages, psize_str, vm.addr, ai->static_size,
		ai->reserved_size, ai->dyn_size);
2259

T
Tejun Heo 已提交
2260
	rc = pcpu_setup_first_chunk(ai, vm.addr);
2261 2262 2263 2264
	goto out_free_ar;

enomem:
	while (--j >= 0)
T
Tejun Heo 已提交
2265
		free_fn(page_address(pages[j]), PAGE_SIZE);
T
Tejun Heo 已提交
2266
	rc = -ENOMEM;
2267
out_free_ar:
2268
	memblock_free_early(__pa(pages), pages_size);
2269
	pcpu_free_alloc_info(ai);
T
Tejun Heo 已提交
2270
	return rc;
2271
}
2272
#endif /* BUILD_PAGE_FIRST_CHUNK */
2273

2274
#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
2275
/*
2276
 * Generic SMP percpu area setup.
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
 *
 * The embedding helper is used because its behavior closely resembles
 * the original non-dynamic generic percpu area setup.  This is
 * important because many archs have addressing restrictions and might
 * fail if the percpu area is located far away from the previous
 * location.  As an added bonus, in non-NUMA cases, embedding is
 * generally a good idea TLB-wise because percpu area can piggy back
 * on the physical linear memory mapping which uses large page
 * mappings on applicable archs.
 */
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);

2290 2291 2292
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
				       size_t align)
{
2293 2294
	return  memblock_virt_alloc_from_nopanic(
			size, align, __pa(MAX_DMA_ADDRESS));
2295
}
2296

2297 2298
static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
{
2299
	memblock_free_early(__pa(ptr), size);
2300 2301
}

2302 2303 2304 2305
void __init setup_per_cpu_areas(void)
{
	unsigned long delta;
	unsigned int cpu;
T
Tejun Heo 已提交
2306
	int rc;
2307 2308 2309 2310 2311

	/*
	 * Always reserve area for module percpu variables.  That's
	 * what the legacy allocator did.
	 */
T
Tejun Heo 已提交
2312
	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2313 2314
				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
T
Tejun Heo 已提交
2315
	if (rc < 0)
2316
		panic("Failed to initialize percpu areas.");
2317 2318 2319

	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
	for_each_possible_cpu(cpu)
T
Tejun Heo 已提交
2320
		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2321
}
2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341
#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */

#else	/* CONFIG_SMP */

/*
 * UP percpu area setup.
 *
 * UP always uses km-based percpu allocator with identity mapping.
 * Static percpu variables are indistinguishable from the usual static
 * variables and don't require any special preparation.
 */
void __init setup_per_cpu_areas(void)
{
	const size_t unit_size =
		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
					 PERCPU_DYNAMIC_RESERVE));
	struct pcpu_alloc_info *ai;
	void *fc;

	ai = pcpu_alloc_alloc_info(1, 1);
2342 2343 2344
	fc = memblock_virt_alloc_from_nopanic(unit_size,
					      PAGE_SIZE,
					      __pa(MAX_DMA_ADDRESS));
2345 2346
	if (!ai || !fc)
		panic("Failed to allocate memory for percpu areas.");
2347 2348
	/* kmemleak tracks the percpu allocations separately */
	kmemleak_free(fc);
2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361

	ai->dyn_size = unit_size;
	ai->unit_size = unit_size;
	ai->atom_size = unit_size;
	ai->alloc_size = unit_size;
	ai->groups[0].nr_units = 1;
	ai->groups[0].cpu_map[0] = 0;

	if (pcpu_setup_first_chunk(ai, fc) < 0)
		panic("Failed to initialize percpu areas.");
}

#endif	/* CONFIG_SMP */
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382

/*
 * First and reserved chunks are initialized with temporary allocation
 * map in initdata so that they can be used before slab is online.
 * This function is called after slab is brought up and replaces those
 * with properly allocated maps.
 */
void __init percpu_init_late(void)
{
	struct pcpu_chunk *target_chunks[] =
		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
	struct pcpu_chunk *chunk;
	unsigned long flags;
	int i;

	for (i = 0; (chunk = target_chunks[i]); i++) {
		int *map;
		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);

		BUILD_BUG_ON(size > PAGE_SIZE);

2383
		map = pcpu_mem_zalloc(size);
2384 2385 2386 2387 2388 2389 2390 2391
		BUG_ON(!map);

		spin_lock_irqsave(&pcpu_lock, flags);
		memcpy(map, chunk->map, size);
		chunk->map = map;
		spin_unlock_irqrestore(&pcpu_lock, flags);
	}
}
2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403

/*
 * Percpu allocator is initialized early during boot when neither slab or
 * workqueue is available.  Plug async management until everything is up
 * and running.
 */
static int __init percpu_enable_async(void)
{
	pcpu_async_enabled = true;
	return 0;
}
subsys_initcall(percpu_enable_async);