percpu.c 57.2 KB
Newer Older
1
/*
2
 * mm/percpu.c - percpu memory allocator
3 4 5 6 7 8 9
 *
 * Copyright (C) 2009		SUSE Linux Products GmbH
 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
 *
 * This file is released under the GPLv2.
 *
 * This is percpu allocator which can handle both static and dynamic
10 11 12
 * areas.  Percpu areas are allocated in chunks.  Each chunk is
 * consisted of boot-time determined number of units and the first
 * chunk is used for static percpu variables in the kernel image
13 14 15
 * (special boot time alloc/init handling necessary as these areas
 * need to be brought up before allocation services are running).
 * Unit grows as necessary and all units grow or shrink in unison.
16
 * When a chunk is filled up, another chunk is allocated.
17 18 19 20 21 22 23 24
 *
 *  c0                           c1                         c2
 *  -------------------          -------------------        ------------
 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
 *  -------------------  ......  -------------------  ....  ------------
 *
 * Allocation is done in offset-size areas of single unit space.  Ie,
 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 26 27 28
 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
 * Percpu access can be done by configuring percpu base registers
 * according to cpu to unit mapping and pcpu_unit_size.
29
 *
30 31
 * There are usually many small percpu allocations many of them being
 * as small as 4 bytes.  The allocator organizes chunks into lists
32 33
 * according to free size and tries to allocate from the fullest one.
 * Each chunk keeps the maximum contiguous area size hint which is
34
 * guaranteed to be equal to or larger than the maximum contiguous
35 36 37 38 39 40 41 42
 * area in the chunk.  This helps the allocator not to iterate the
 * chunk maps unnecessarily.
 *
 * Allocation state in each chunk is kept using an array of integers
 * on chunk->map.  A positive value in the map represents a free
 * region and negative allocated.  Allocation inside a chunk is done
 * by scanning this map sequentially and serving the first matching
 * entry.  This is mostly copied from the percpu_modalloc() allocator.
43 44
 * Chunks can be determined from the address using the index field
 * in the page struct. The index field contains a pointer to the chunk.
45 46 47 48
 *
 * To use this allocator, arch code should do the followings.
 *
 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 50
 *   regular address to percpu pointer and back if they need to be
 *   different from the default
51
 *
52 53
 * - use pcpu_setup_first_chunk() during percpu area initialization to
 *   setup the first chunk containing the kernel static percpu area
54 55 56 57
 */

#include <linux/bitmap.h>
#include <linux/bootmem.h>
58
#include <linux/err.h>
59
#include <linux/list.h>
60
#include <linux/log2.h>
61 62 63 64 65 66
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/pfn.h>
#include <linux/slab.h>
67
#include <linux/spinlock.h>
68
#include <linux/vmalloc.h>
69
#include <linux/workqueue.h>
70
#include <linux/kmemleak.h>
71 72

#include <asm/cacheflush.h>
73
#include <asm/sections.h>
74
#include <asm/tlbflush.h>
75
#include <asm/io.h>
76 77 78 79

#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */

80
#ifdef CONFIG_SMP
81 82 83
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
#ifndef __addr_to_pcpu_ptr
#define __addr_to_pcpu_ptr(addr)					\
84 85 86
	(void __percpu *)((unsigned long)(addr) -			\
			  (unsigned long)pcpu_base_addr	+		\
			  (unsigned long)__per_cpu_start)
87 88 89
#endif
#ifndef __pcpu_ptr_to_addr
#define __pcpu_ptr_to_addr(ptr)						\
90 91 92
	(void __force *)((unsigned long)(ptr) +				\
			 (unsigned long)pcpu_base_addr -		\
			 (unsigned long)__per_cpu_start)
93
#endif
94 95 96 97 98
#else	/* CONFIG_SMP */
/* on UP, it's always identity mapped */
#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
#endif	/* CONFIG_SMP */
99

100 101 102 103
struct pcpu_chunk {
	struct list_head	list;		/* linked to pcpu_slot lists */
	int			free_size;	/* free bytes in the chunk */
	int			contig_hint;	/* max contiguous size hint */
T
Tejun Heo 已提交
104
	void			*base_addr;	/* base address of this chunk */
105 106 107
	int			map_used;	/* # of map entries used */
	int			map_alloc;	/* # of map entries allocated */
	int			*map;		/* allocation map */
108
	void			*data;		/* chunk data */
109
	bool			immutable;	/* no [de]population allowed */
T
Tejun Heo 已提交
110
	unsigned long		populated[];	/* populated bitmap */
111 112
};

113 114
static int pcpu_unit_pages __read_mostly;
static int pcpu_unit_size __read_mostly;
115
static int pcpu_nr_units __read_mostly;
116
static int pcpu_atom_size __read_mostly;
117 118
static int pcpu_nr_slots __read_mostly;
static size_t pcpu_chunk_struct_size __read_mostly;
119

T
Tejun Heo 已提交
120 121 122
/* cpus with the lowest and highest unit addresses */
static unsigned int pcpu_low_unit_cpu __read_mostly;
static unsigned int pcpu_high_unit_cpu __read_mostly;
123

124
/* the address of the first chunk which starts with the kernel static area */
125
void *pcpu_base_addr __read_mostly;
126 127
EXPORT_SYMBOL_GPL(pcpu_base_addr);

T
Tejun Heo 已提交
128 129
static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
130

131 132 133 134 135
/* group information, used for vm allocation */
static int pcpu_nr_groups __read_mostly;
static const unsigned long *pcpu_group_offsets __read_mostly;
static const size_t *pcpu_group_sizes __read_mostly;

136 137 138 139 140 141 142 143 144 145 146 147 148 149
/*
 * The first chunk which always exists.  Note that unlike other
 * chunks, this one can be allocated and mapped in several different
 * ways and thus often doesn't live in the vmalloc area.
 */
static struct pcpu_chunk *pcpu_first_chunk;

/*
 * Optional reserved chunk.  This chunk reserves part of the first
 * chunk and serves it for reserved allocations.  The amount of
 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
 * area doesn't exist, the following variables contain NULL and 0
 * respectively.
 */
150 151 152
static struct pcpu_chunk *pcpu_reserved_chunk;
static int pcpu_reserved_chunk_limit;

153
/*
154 155 156
 * Synchronization rules.
 *
 * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
T
Tejun Heo 已提交
157 158 159
 * protects allocation/reclaim paths, chunks, populated bitmap and
 * vmalloc mapping.  The latter is a spinlock and protects the index
 * data structures - chunk slots, chunks and area maps in chunks.
160 161 162
 *
 * During allocation, pcpu_alloc_mutex is kept locked all the time and
 * pcpu_lock is grabbed and released as necessary.  All actual memory
163 164 165 166
 * allocations are done using GFP_KERNEL with pcpu_lock released.  In
 * general, percpu memory can't be allocated with irq off but
 * irqsave/restore are still used in alloc path so that it can be used
 * from early init path - sched_init() specifically.
167 168 169 170 171 172 173 174 175
 *
 * Free path accesses and alters only the index data structures, so it
 * can be safely called from atomic context.  When memory needs to be
 * returned to the system, free path schedules reclaim_work which
 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
 * reclaimed, release both locks and frees the chunks.  Note that it's
 * necessary to grab both locks to remove a chunk from circulation as
 * allocation path might be referencing the chunk with only
 * pcpu_alloc_mutex locked.
176
 */
177 178
static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
179

180
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
181

182 183 184 185
/* reclaim work to release fully free chunks, scheduled from free path */
static void pcpu_reclaim(struct work_struct *work);
static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
static bool pcpu_addr_in_first_chunk(void *addr)
{
	void *first_start = pcpu_first_chunk->base_addr;

	return addr >= first_start && addr < first_start + pcpu_unit_size;
}

static bool pcpu_addr_in_reserved_chunk(void *addr)
{
	void *first_start = pcpu_first_chunk->base_addr;

	return addr >= first_start &&
		addr < first_start + pcpu_reserved_chunk_limit;
}

201
static int __pcpu_size_to_slot(int size)
202
{
T
Tejun Heo 已提交
203
	int highbit = fls(size);	/* size is in bytes */
204 205 206
	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
}

207 208 209 210 211 212 213
static int pcpu_size_to_slot(int size)
{
	if (size == pcpu_unit_size)
		return pcpu_nr_slots - 1;
	return __pcpu_size_to_slot(size);
}

214 215 216 217 218 219 220 221
static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
{
	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
		return 0;

	return pcpu_size_to_slot(chunk->free_size);
}

222 223 224 225 226 227 228 229 230 231 232 233 234
/* set the pointer to a chunk in a page struct */
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
{
	page->index = (unsigned long)pcpu;
}

/* obtain pointer to a chunk from a page struct */
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
{
	return (struct pcpu_chunk *)page->index;
}

static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
235
{
236
	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
237 238
}

239 240
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
				     unsigned int cpu, int page_idx)
241
{
T
Tejun Heo 已提交
242
	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
T
Tejun Heo 已提交
243
		(page_idx << PAGE_SHIFT);
244 245
}

246 247
static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
					   int *rs, int *re, int end)
T
Tejun Heo 已提交
248 249 250 251 252
{
	*rs = find_next_zero_bit(chunk->populated, end, *rs);
	*re = find_next_bit(chunk->populated, end, *rs + 1);
}

253 254
static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
					 int *rs, int *re, int end)
T
Tejun Heo 已提交
255 256 257 258 259 260 261
{
	*rs = find_next_bit(chunk->populated, end, *rs);
	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
}

/*
 * (Un)populated page region iterators.  Iterate over (un)populated
262
 * page regions between @start and @end in @chunk.  @rs and @re should
T
Tejun Heo 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275
 * be integer variables and will be set to start and end page index of
 * the current region.
 */
#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))

#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))

276
/**
277
 * pcpu_mem_zalloc - allocate memory
278
 * @size: bytes to allocate
279
 *
280
 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
281
 * kzalloc() is used; otherwise, vzalloc() is used.  The returned
282
 * memory is always zeroed.
283
 *
284 285 286
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
287
 * RETURNS:
288
 * Pointer to the allocated area on success, NULL on failure.
289
 */
290
static void *pcpu_mem_zalloc(size_t size)
291
{
292 293 294
	if (WARN_ON_ONCE(!slab_is_available()))
		return NULL;

295 296
	if (size <= PAGE_SIZE)
		return kzalloc(size, GFP_KERNEL);
297 298
	else
		return vzalloc(size);
299
}
300

301 302 303 304 305
/**
 * pcpu_mem_free - free memory
 * @ptr: memory to free
 * @size: size of the area
 *
306
 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
307 308 309
 */
static void pcpu_mem_free(void *ptr, size_t size)
{
310
	if (size <= PAGE_SIZE)
311
		kfree(ptr);
312
	else
313
		vfree(ptr);
314 315 316 317 318 319 320 321 322
}

/**
 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 * @chunk: chunk of interest
 * @oslot: the previous slot it was on
 *
 * This function is called after an allocation or free changed @chunk.
 * New slot according to the changed state is determined and @chunk is
323 324
 * moved to the slot.  Note that the reserved chunk is never put on
 * chunk slots.
325 326 327
 *
 * CONTEXT:
 * pcpu_lock.
328 329 330 331 332
 */
static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
{
	int nslot = pcpu_chunk_slot(chunk);

333
	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
334 335 336 337 338 339 340
		if (oslot < nslot)
			list_move(&chunk->list, &pcpu_slot[nslot]);
		else
			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
	}
}

341
/**
342 343
 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 * @chunk: chunk of interest
344
 *
345
 * Determine whether area map of @chunk needs to be extended to
L
Lucas De Marchi 已提交
346
 * accommodate a new allocation.
347
 *
348
 * CONTEXT:
349
 * pcpu_lock.
350
 *
351
 * RETURNS:
352 353
 * New target map allocation length if extension is necessary, 0
 * otherwise.
354
 */
355
static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
356 357 358 359 360 361 362 363 364 365
{
	int new_alloc;

	if (chunk->map_alloc >= chunk->map_used + 2)
		return 0;

	new_alloc = PCPU_DFL_MAP_ALLOC;
	while (new_alloc < chunk->map_used + 2)
		new_alloc *= 2;

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
	return new_alloc;
}

/**
 * pcpu_extend_area_map - extend area map of a chunk
 * @chunk: chunk of interest
 * @new_alloc: new target allocation length of the area map
 *
 * Extend area map of @chunk to have @new_alloc entries.
 *
 * CONTEXT:
 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
{
	int *old = NULL, *new = NULL;
	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
	unsigned long flags;

388
	new = pcpu_mem_zalloc(new_size);
389
	if (!new)
390
		return -ENOMEM;
391

392 393 394 395 396
	/* acquire pcpu_lock and switch to new area map */
	spin_lock_irqsave(&pcpu_lock, flags);

	if (new_alloc <= chunk->map_alloc)
		goto out_unlock;
397

398
	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
399 400 401
	old = chunk->map;

	memcpy(new, old, old_size);
402 403 404

	chunk->map_alloc = new_alloc;
	chunk->map = new;
405 406 407 408 409 410 411 412 413 414 415 416
	new = NULL;

out_unlock:
	spin_unlock_irqrestore(&pcpu_lock, flags);

	/*
	 * pcpu_mem_free() might end up calling vfree() which uses
	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
	 */
	pcpu_mem_free(old, old_size);
	pcpu_mem_free(new, new_size);

417 418 419
	return 0;
}

420 421 422 423
/**
 * pcpu_split_block - split a map block
 * @chunk: chunk of interest
 * @i: index of map block to split
T
Tejun Heo 已提交
424 425
 * @head: head size in bytes (can be 0)
 * @tail: tail size in bytes (can be 0)
426 427 428 429 430 431 432 433 434
 *
 * Split the @i'th map block into two or three blocks.  If @head is
 * non-zero, @head bytes block is inserted before block @i moving it
 * to @i+1 and reducing its size by @head bytes.
 *
 * If @tail is non-zero, the target block, which can be @i or @i+1
 * depending on @head, is reduced by @tail bytes and @tail byte block
 * is inserted after the target block.
 *
L
Lucas De Marchi 已提交
435
 * @chunk->map must have enough free slots to accommodate the split.
436 437 438
 *
 * CONTEXT:
 * pcpu_lock.
439
 */
440 441
static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
			     int head, int tail)
442 443
{
	int nr_extra = !!head + !!tail;
444

445
	BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
446

447
	/* insert new subblocks */
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
	memmove(&chunk->map[i + nr_extra], &chunk->map[i],
		sizeof(chunk->map[0]) * (chunk->map_used - i));
	chunk->map_used += nr_extra;

	if (head) {
		chunk->map[i + 1] = chunk->map[i] - head;
		chunk->map[i++] = head;
	}
	if (tail) {
		chunk->map[i++] -= tail;
		chunk->map[i] = tail;
	}
}

/**
 * pcpu_alloc_area - allocate area from a pcpu_chunk
 * @chunk: chunk of interest
T
Tejun Heo 已提交
465
 * @size: wanted size in bytes
466 467 468 469 470 471
 * @align: wanted align
 *
 * Try to allocate @size bytes area aligned at @align from @chunk.
 * Note that this function only allocates the offset.  It doesn't
 * populate or map the area.
 *
472 473
 * @chunk->map must have at least two free slots.
 *
474 475 476
 * CONTEXT:
 * pcpu_lock.
 *
477
 * RETURNS:
478 479
 * Allocated offset in @chunk on success, -1 if no matching area is
 * found.
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
 */
static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
{
	int oslot = pcpu_chunk_slot(chunk);
	int max_contig = 0;
	int i, off;

	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
		bool is_last = i + 1 == chunk->map_used;
		int head, tail;

		/* extra for alignment requirement */
		head = ALIGN(off, align) - off;
		BUG_ON(i == 0 && head != 0);

		if (chunk->map[i] < 0)
			continue;
		if (chunk->map[i] < head + size) {
			max_contig = max(chunk->map[i], max_contig);
			continue;
		}

		/*
		 * If head is small or the previous block is free,
		 * merge'em.  Note that 'small' is defined as smaller
		 * than sizeof(int), which is very small but isn't too
		 * uncommon for percpu allocations.
		 */
		if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
			if (chunk->map[i - 1] > 0)
				chunk->map[i - 1] += head;
			else {
				chunk->map[i - 1] -= head;
				chunk->free_size -= head;
			}
			chunk->map[i] -= head;
			off += head;
			head = 0;
		}

		/* if tail is small, just keep it around */
		tail = chunk->map[i] - head - size;
		if (tail < sizeof(int))
			tail = 0;

		/* split if warranted */
		if (head || tail) {
527
			pcpu_split_block(chunk, i, head, tail);
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
			if (head) {
				i++;
				off += head;
				max_contig = max(chunk->map[i - 1], max_contig);
			}
			if (tail)
				max_contig = max(chunk->map[i + 1], max_contig);
		}

		/* update hint and mark allocated */
		if (is_last)
			chunk->contig_hint = max_contig; /* fully scanned */
		else
			chunk->contig_hint = max(chunk->contig_hint,
						 max_contig);

		chunk->free_size -= chunk->map[i];
		chunk->map[i] = -chunk->map[i];

		pcpu_chunk_relocate(chunk, oslot);
		return off;
	}

	chunk->contig_hint = max_contig;	/* fully scanned */
	pcpu_chunk_relocate(chunk, oslot);

554 555
	/* tell the upper layer that this chunk has no matching area */
	return -1;
556 557 558 559 560 561 562 563 564 565
}

/**
 * pcpu_free_area - free area to a pcpu_chunk
 * @chunk: chunk of interest
 * @freeme: offset of area to free
 *
 * Free area starting from @freeme to @chunk.  Note that this function
 * only modifies the allocation map.  It doesn't depopulate or unmap
 * the area.
566 567 568
 *
 * CONTEXT:
 * pcpu_lock.
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
 */
static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
{
	int oslot = pcpu_chunk_slot(chunk);
	int i, off;

	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
		if (off == freeme)
			break;
	BUG_ON(off != freeme);
	BUG_ON(chunk->map[i] > 0);

	chunk->map[i] = -chunk->map[i];
	chunk->free_size += chunk->map[i];

	/* merge with previous? */
	if (i > 0 && chunk->map[i - 1] >= 0) {
		chunk->map[i - 1] += chunk->map[i];
		chunk->map_used--;
		memmove(&chunk->map[i], &chunk->map[i + 1],
			(chunk->map_used - i) * sizeof(chunk->map[0]));
		i--;
	}
	/* merge with next? */
	if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
		chunk->map[i] += chunk->map[i + 1];
		chunk->map_used--;
		memmove(&chunk->map[i + 1], &chunk->map[i + 2],
			(chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
	}

	chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
	pcpu_chunk_relocate(chunk, oslot);
}

604 605 606 607
static struct pcpu_chunk *pcpu_alloc_chunk(void)
{
	struct pcpu_chunk *chunk;

608
	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
609 610 611
	if (!chunk)
		return NULL;

612 613
	chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
						sizeof(chunk->map[0]));
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	if (!chunk->map) {
		kfree(chunk);
		return NULL;
	}

	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
	chunk->map[chunk->map_used++] = pcpu_unit_size;

	INIT_LIST_HEAD(&chunk->list);
	chunk->free_size = pcpu_unit_size;
	chunk->contig_hint = pcpu_unit_size;

	return chunk;
}

static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
	if (!chunk)
		return;
	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
634
	pcpu_mem_free(chunk, pcpu_chunk_struct_size);
635 636
}

637 638 639 640 641 642 643 644 645 646 647 648 649 650
/*
 * Chunk management implementation.
 *
 * To allow different implementations, chunk alloc/free and
 * [de]population are implemented in a separate file which is pulled
 * into this file and compiled together.  The following functions
 * should be implemented.
 *
 * pcpu_populate_chunk		- populate the specified range of a chunk
 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
 * pcpu_create_chunk		- create a new chunk
 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
 * pcpu_addr_to_page		- translate address to physical address
 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
651
 */
652 653 654 655 656 657
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
static struct pcpu_chunk *pcpu_create_chunk(void);
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
static struct page *pcpu_addr_to_page(void *addr);
static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
658

659 660 661
#ifdef CONFIG_NEED_PER_CPU_KM
#include "percpu-km.c"
#else
662
#include "percpu-vm.c"
663
#endif
664

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
/**
 * pcpu_chunk_addr_search - determine chunk containing specified address
 * @addr: address for which the chunk needs to be determined.
 *
 * RETURNS:
 * The address of the found chunk.
 */
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
{
	/* is it in the first chunk? */
	if (pcpu_addr_in_first_chunk(addr)) {
		/* is it in the reserved area? */
		if (pcpu_addr_in_reserved_chunk(addr))
			return pcpu_reserved_chunk;
		return pcpu_first_chunk;
	}

	/*
	 * The address is relative to unit0 which might be unused and
	 * thus unmapped.  Offset the address to the unit space of the
	 * current processor before looking it up in the vmalloc
	 * space.  Note that any possible cpu id can be used here, so
	 * there's no need to worry about preemption or cpu hotplug.
	 */
	addr += pcpu_unit_offsets[raw_smp_processor_id()];
690
	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
691 692
}

693
/**
694
 * pcpu_alloc - the percpu allocator
T
Tejun Heo 已提交
695
 * @size: size of area to allocate in bytes
696
 * @align: alignment of area (max PAGE_SIZE)
697
 * @reserved: allocate from the reserved chunk if available
698
 *
699 700 701 702
 * Allocate percpu area of @size bytes aligned at @align.
 *
 * CONTEXT:
 * Does GFP_KERNEL allocation.
703 704 705 706
 *
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
707
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
708
{
709
	static int warn_limit = 10;
710
	struct pcpu_chunk *chunk;
711
	const char *err;
712
	int slot, off, new_alloc;
713
	unsigned long flags;
714
	void __percpu *ptr;
715

716
	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
717 718 719 720 721
		WARN(true, "illegal size (%zu) or align (%zu) for "
		     "percpu allocation\n", size, align);
		return NULL;
	}

722
	mutex_lock(&pcpu_alloc_mutex);
723
	spin_lock_irqsave(&pcpu_lock, flags);
724

725 726 727
	/* serve reserved allocations from the reserved chunk if available */
	if (reserved && pcpu_reserved_chunk) {
		chunk = pcpu_reserved_chunk;
728 729 730

		if (size > chunk->contig_hint) {
			err = "alloc from reserved chunk failed";
731
			goto fail_unlock;
732
		}
733 734 735 736 737 738 739 740 741 742

		while ((new_alloc = pcpu_need_to_extend(chunk))) {
			spin_unlock_irqrestore(&pcpu_lock, flags);
			if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
				err = "failed to extend area map of reserved chunk";
				goto fail_unlock_mutex;
			}
			spin_lock_irqsave(&pcpu_lock, flags);
		}

743 744 745
		off = pcpu_alloc_area(chunk, size, align);
		if (off >= 0)
			goto area_found;
746

747
		err = "alloc from reserved chunk failed";
748
		goto fail_unlock;
749 750
	}

751
restart:
752
	/* search through normal chunks */
753 754 755 756
	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
			if (size > chunk->contig_hint)
				continue;
757

758 759 760 761 762 763 764 765 766 767 768 769 770 771
			new_alloc = pcpu_need_to_extend(chunk);
			if (new_alloc) {
				spin_unlock_irqrestore(&pcpu_lock, flags);
				if (pcpu_extend_area_map(chunk,
							 new_alloc) < 0) {
					err = "failed to extend area map";
					goto fail_unlock_mutex;
				}
				spin_lock_irqsave(&pcpu_lock, flags);
				/*
				 * pcpu_lock has been dropped, need to
				 * restart cpu_slot list walking.
				 */
				goto restart;
772 773
			}

774 775 776 777 778 779 780
			off = pcpu_alloc_area(chunk, size, align);
			if (off >= 0)
				goto area_found;
		}
	}

	/* hmmm... no space left, create a new chunk */
781
	spin_unlock_irqrestore(&pcpu_lock, flags);
782

783
	chunk = pcpu_create_chunk();
784 785
	if (!chunk) {
		err = "failed to allocate new chunk";
786
		goto fail_unlock_mutex;
787
	}
788

789
	spin_lock_irqsave(&pcpu_lock, flags);
790
	pcpu_chunk_relocate(chunk, -1);
791
	goto restart;
792 793

area_found:
794
	spin_unlock_irqrestore(&pcpu_lock, flags);
795

796 797
	/* populate, map and clear the area */
	if (pcpu_populate_chunk(chunk, off, size)) {
798
		spin_lock_irqsave(&pcpu_lock, flags);
799
		pcpu_free_area(chunk, off);
800
		err = "failed to populate";
801
		goto fail_unlock;
802 803
	}

804 805
	mutex_unlock(&pcpu_alloc_mutex);

T
Tejun Heo 已提交
806
	/* return address relative to base address */
807 808 809
	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
	kmemleak_alloc_percpu(ptr, size);
	return ptr;
810 811

fail_unlock:
812
	spin_unlock_irqrestore(&pcpu_lock, flags);
813 814
fail_unlock_mutex:
	mutex_unlock(&pcpu_alloc_mutex);
815 816 817 818 819 820 821
	if (warn_limit) {
		pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
			   "%s\n", size, align, err);
		dump_stack();
		if (!--warn_limit)
			pr_info("PERCPU: limit reached, disable warning\n");
	}
822
	return NULL;
823
}
824 825 826 827 828 829

/**
 * __alloc_percpu - allocate dynamic percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
830 831
 * Allocate zero-filled percpu area of @size bytes aligned at @align.
 * Might sleep.  Might trigger writeouts.
832
 *
833 834 835
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
836 837 838
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
839
void __percpu *__alloc_percpu(size_t size, size_t align)
840 841 842
{
	return pcpu_alloc(size, align, false);
}
843 844
EXPORT_SYMBOL_GPL(__alloc_percpu);

845 846 847 848 849
/**
 * __alloc_reserved_percpu - allocate reserved percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
850 851 852 853
 * Allocate zero-filled percpu area of @size bytes aligned at @align
 * from reserved percpu area if arch has set it up; otherwise,
 * allocation is served from the same dynamic area.  Might sleep.
 * Might trigger writeouts.
854
 *
855 856 857
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
858 859 860
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
861
void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
862 863 864 865
{
	return pcpu_alloc(size, align, true);
}

866 867 868 869 870
/**
 * pcpu_reclaim - reclaim fully free chunks, workqueue function
 * @work: unused
 *
 * Reclaim all fully free chunks except for the first one.
871 872 873
 *
 * CONTEXT:
 * workqueue context.
874 875
 */
static void pcpu_reclaim(struct work_struct *work)
876
{
877 878 879 880
	LIST_HEAD(todo);
	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
	struct pcpu_chunk *chunk, *next;

881 882
	mutex_lock(&pcpu_alloc_mutex);
	spin_lock_irq(&pcpu_lock);
883 884 885 886 887 888 889 890 891 892 893

	list_for_each_entry_safe(chunk, next, head, list) {
		WARN_ON(chunk->immutable);

		/* spare the first one */
		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
			continue;

		list_move(&chunk->list, &todo);
	}

894
	spin_unlock_irq(&pcpu_lock);
895 896

	list_for_each_entry_safe(chunk, next, &todo, list) {
T
Tejun Heo 已提交
897
		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
898
		pcpu_destroy_chunk(chunk);
899
	}
T
Tejun Heo 已提交
900 901

	mutex_unlock(&pcpu_alloc_mutex);
902 903 904 905 906 907
}

/**
 * free_percpu - free percpu area
 * @ptr: pointer to area to free
 *
908 909 910 911
 * Free percpu area @ptr.
 *
 * CONTEXT:
 * Can be called from atomic context.
912
 */
913
void free_percpu(void __percpu *ptr)
914
{
915
	void *addr;
916
	struct pcpu_chunk *chunk;
917
	unsigned long flags;
918 919 920 921 922
	int off;

	if (!ptr)
		return;

923 924
	kmemleak_free_percpu(ptr);

925 926
	addr = __pcpu_ptr_to_addr(ptr);

927
	spin_lock_irqsave(&pcpu_lock, flags);
928 929

	chunk = pcpu_chunk_addr_search(addr);
T
Tejun Heo 已提交
930
	off = addr - chunk->base_addr;
931 932 933

	pcpu_free_area(chunk, off);

934
	/* if there are more than one fully free chunks, wake up grim reaper */
935 936 937
	if (chunk->free_size == pcpu_unit_size) {
		struct pcpu_chunk *pos;

938
		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
939
			if (pos != chunk) {
940
				schedule_work(&pcpu_reclaim_work);
941 942 943 944
				break;
			}
	}

945
	spin_unlock_irqrestore(&pcpu_lock, flags);
946 947 948
}
EXPORT_SYMBOL_GPL(free_percpu);

949 950 951 952 953 954 955 956 957 958 959 960 961
/**
 * is_kernel_percpu_address - test whether address is from static percpu area
 * @addr: address to test
 *
 * Test whether @addr belongs to in-kernel static percpu area.  Module
 * static percpu areas are not considered.  For those, use
 * is_module_percpu_address().
 *
 * RETURNS:
 * %true if @addr is from in-kernel static percpu area, %false otherwise.
 */
bool is_kernel_percpu_address(unsigned long addr)
{
962
#ifdef CONFIG_SMP
963 964 965 966 967 968 969 970 971 972
	const size_t static_size = __per_cpu_end - __per_cpu_start;
	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
	unsigned int cpu;

	for_each_possible_cpu(cpu) {
		void *start = per_cpu_ptr(base, cpu);

		if ((void *)addr >= start && (void *)addr < start + static_size)
			return true;
        }
973 974
#endif
	/* on UP, can't distinguish from other static vars, always false */
975 976 977
	return false;
}

978 979 980 981 982 983 984 985 986
/**
 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
 * @addr: the address to be converted to physical address
 *
 * Given @addr which is dereferenceable address obtained via one of
 * percpu access macros, this function translates it into its physical
 * address.  The caller is responsible for ensuring @addr stays valid
 * until this function finishes.
 *
987 988 989 990 991 992 993 994 995 996 997
 * percpu allocator has special setup for the first chunk, which currently
 * supports either embedding in linear address space or vmalloc mapping,
 * and, from the second one, the backing allocator (currently either vm or
 * km) provides translation.
 *
 * The addr can be tranlated simply without checking if it falls into the
 * first chunk. But the current code reflects better how percpu allocator
 * actually works, and the verification can discover both bugs in percpu
 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
 * code.
 *
998 999 1000 1001 1002
 * RETURNS:
 * The physical address for @addr.
 */
phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
1003 1004
	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
	bool in_first_chunk = false;
T
Tejun Heo 已提交
1005
	unsigned long first_low, first_high;
1006 1007 1008
	unsigned int cpu;

	/*
T
Tejun Heo 已提交
1009
	 * The following test on unit_low/high isn't strictly
1010 1011 1012
	 * necessary but will speed up lookups of addresses which
	 * aren't in the first chunk.
	 */
T
Tejun Heo 已提交
1013 1014 1015 1016 1017
	first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
	first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
				     pcpu_unit_pages);
	if ((unsigned long)addr >= first_low &&
	    (unsigned long)addr < first_high) {
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
		for_each_possible_cpu(cpu) {
			void *start = per_cpu_ptr(base, cpu);

			if (addr >= start && addr < start + pcpu_unit_size) {
				in_first_chunk = true;
				break;
			}
		}
	}

	if (in_first_chunk) {
1029
		if (!is_vmalloc_addr(addr))
1030 1031
			return __pa(addr);
		else
1032 1033
			return page_to_phys(vmalloc_to_page(addr)) +
			       offset_in_page(addr);
1034
	} else
1035 1036
		return page_to_phys(pcpu_addr_to_page(addr)) +
		       offset_in_page(addr);
1037 1038
}

1039
/**
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
 * pcpu_alloc_alloc_info - allocate percpu allocation info
 * @nr_groups: the number of groups
 * @nr_units: the number of units
 *
 * Allocate ai which is large enough for @nr_groups groups containing
 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
 * cpu_map array which is long enough for @nr_units and filled with
 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
 * pointer of other groups.
 *
 * RETURNS:
 * Pointer to the allocated pcpu_alloc_info on success, NULL on
 * failure.
 */
struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
						      int nr_units)
{
	struct pcpu_alloc_info *ai;
	size_t base_size, ai_size;
	void *ptr;
	int unit;

	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
			  __alignof__(ai->groups[0].cpu_map[0]));
	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);

1066
	ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
	if (!ptr)
		return NULL;
	ai = ptr;
	ptr += base_size;

	ai->groups[0].cpu_map = ptr;

	for (unit = 0; unit < nr_units; unit++)
		ai->groups[0].cpu_map[unit] = NR_CPUS;

	ai->nr_groups = nr_groups;
	ai->__ai_size = PFN_ALIGN(ai_size);

	return ai;
}

/**
 * pcpu_free_alloc_info - free percpu allocation info
 * @ai: pcpu_alloc_info to free
 *
 * Free @ai which was allocated by pcpu_alloc_alloc_info().
 */
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
{
1091
	memblock_free_early(__pa(ai), ai->__ai_size);
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
}

/**
 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
 * @lvl: loglevel
 * @ai: allocation info to dump
 *
 * Print out information about @ai using loglevel @lvl.
 */
static void pcpu_dump_alloc_info(const char *lvl,
				 const struct pcpu_alloc_info *ai)
1103
{
1104
	int group_width = 1, cpu_width = 1, width;
1105
	char empty_str[] = "--------";
1106 1107 1108 1109 1110 1111 1112
	int alloc = 0, alloc_end = 0;
	int group, v;
	int upa, apl;	/* units per alloc, allocs per line */

	v = ai->nr_groups;
	while (v /= 10)
		group_width++;
1113

1114
	v = num_possible_cpus();
1115
	while (v /= 10)
1116 1117
		cpu_width++;
	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1118

1119 1120 1121
	upa = ai->alloc_size / ai->unit_size;
	width = upa * (cpu_width + 1) + group_width + 3;
	apl = rounddown_pow_of_two(max(60 / width, 1));
1122

1123 1124 1125
	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1126

1127 1128 1129 1130 1131 1132 1133 1134
	for (group = 0; group < ai->nr_groups; group++) {
		const struct pcpu_group_info *gi = &ai->groups[group];
		int unit = 0, unit_end = 0;

		BUG_ON(gi->nr_units % upa);
		for (alloc_end += gi->nr_units / upa;
		     alloc < alloc_end; alloc++) {
			if (!(alloc % apl)) {
1135
				printk(KERN_CONT "\n");
1136 1137
				printk("%spcpu-alloc: ", lvl);
			}
1138
			printk(KERN_CONT "[%0*d] ", group_width, group);
1139 1140 1141

			for (unit_end += upa; unit < unit_end; unit++)
				if (gi->cpu_map[unit] != NR_CPUS)
1142
					printk(KERN_CONT "%0*d ", cpu_width,
1143 1144
					       gi->cpu_map[unit]);
				else
1145
					printk(KERN_CONT "%s ", empty_str);
1146 1147
		}
	}
1148
	printk(KERN_CONT "\n");
1149 1150
}

1151
/**
1152
 * pcpu_setup_first_chunk - initialize the first percpu chunk
1153
 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1154
 * @base_addr: mapped address
1155 1156 1157
 *
 * Initialize the first percpu chunk which contains the kernel static
 * perpcu area.  This function is to be called from arch percpu area
1158
 * setup path.
1159
 *
1160 1161 1162 1163 1164 1165
 * @ai contains all information necessary to initialize the first
 * chunk and prime the dynamic percpu allocator.
 *
 * @ai->static_size is the size of static percpu area.
 *
 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1166 1167 1168 1169 1170 1171 1172
 * reserve after the static area in the first chunk.  This reserves
 * the first chunk such that it's available only through reserved
 * percpu allocation.  This is primarily used to serve module percpu
 * static areas on architectures where the addressing model has
 * limited offset range for symbol relocations to guarantee module
 * percpu symbols fall inside the relocatable range.
 *
1173 1174 1175
 * @ai->dyn_size determines the number of bytes available for dynamic
 * allocation in the first chunk.  The area between @ai->static_size +
 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1176
 *
1177 1178 1179
 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
 * and equal to or larger than @ai->static_size + @ai->reserved_size +
 * @ai->dyn_size.
1180
 *
1181 1182
 * @ai->atom_size is the allocation atom size and used as alignment
 * for vm areas.
1183
 *
1184 1185 1186 1187 1188 1189 1190 1191 1192
 * @ai->alloc_size is the allocation size and always multiple of
 * @ai->atom_size.  This is larger than @ai->atom_size if
 * @ai->unit_size is larger than @ai->atom_size.
 *
 * @ai->nr_groups and @ai->groups describe virtual memory layout of
 * percpu areas.  Units which should be colocated are put into the
 * same group.  Dynamic VM areas will be allocated according to these
 * groupings.  If @ai->nr_groups is zero, a single group containing
 * all units is assumed.
1193
 *
1194 1195
 * The caller should have mapped the first chunk at @base_addr and
 * copied static data to each unit.
1196
 *
1197 1198 1199 1200 1201 1202 1203
 * If the first chunk ends up with both reserved and dynamic areas, it
 * is served by two chunks - one to serve the core static and reserved
 * areas and the other for the dynamic area.  They share the same vm
 * and page map but uses different area allocation map to stay away
 * from each other.  The latter chunk is circulated in the chunk slots
 * and available for dynamic allocation like any other chunks.
 *
1204
 * RETURNS:
T
Tejun Heo 已提交
1205
 * 0 on success, -errno on failure.
1206
 */
T
Tejun Heo 已提交
1207 1208
int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
				  void *base_addr)
1209
{
1210
	static char cpus_buf[4096] __initdata;
1211 1212
	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1213 1214
	size_t dyn_size = ai->dyn_size;
	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1215
	struct pcpu_chunk *schunk, *dchunk = NULL;
1216 1217
	unsigned long *group_offsets;
	size_t *group_sizes;
T
Tejun Heo 已提交
1218
	unsigned long *unit_off;
1219
	unsigned int cpu;
1220 1221
	int *unit_map;
	int group, unit, i;
1222

1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
	cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);

#define PCPU_SETUP_BUG_ON(cond)	do {					\
	if (unlikely(cond)) {						\
		pr_emerg("PERCPU: failed to initialize, %s", #cond);	\
		pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);	\
		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
		BUG();							\
	}								\
} while (0)

1234
	/* sanity checks */
1235
	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1236
#ifdef CONFIG_SMP
1237
	PCPU_SETUP_BUG_ON(!ai->static_size);
1238
	PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1239
#endif
1240
	PCPU_SETUP_BUG_ON(!base_addr);
1241
	PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1242 1243 1244
	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
	PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1245
	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1246
	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1247

1248
	/* process group information and build config tables accordingly */
1249 1250 1251 1252 1253 1254
	group_offsets = memblock_virt_alloc(ai->nr_groups *
					     sizeof(group_offsets[0]), 0);
	group_sizes = memblock_virt_alloc(ai->nr_groups *
					   sizeof(group_sizes[0]), 0);
	unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
	unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1255

1256
	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1257
		unit_map[cpu] = UINT_MAX;
T
Tejun Heo 已提交
1258 1259 1260

	pcpu_low_unit_cpu = NR_CPUS;
	pcpu_high_unit_cpu = NR_CPUS;
1261

1262 1263
	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
		const struct pcpu_group_info *gi = &ai->groups[group];
1264

1265 1266 1267
		group_offsets[group] = gi->base_offset;
		group_sizes[group] = gi->nr_units * ai->unit_size;

1268 1269 1270 1271
		for (i = 0; i < gi->nr_units; i++) {
			cpu = gi->cpu_map[i];
			if (cpu == NR_CPUS)
				continue;
1272

1273 1274 1275
			PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1276

1277
			unit_map[cpu] = unit + i;
T
Tejun Heo 已提交
1278 1279
			unit_off[cpu] = gi->base_offset + i * ai->unit_size;

T
Tejun Heo 已提交
1280 1281 1282 1283 1284 1285 1286
			/* determine low/high unit_cpu */
			if (pcpu_low_unit_cpu == NR_CPUS ||
			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
				pcpu_low_unit_cpu = cpu;
			if (pcpu_high_unit_cpu == NR_CPUS ||
			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
				pcpu_high_unit_cpu = cpu;
1287
		}
1288
	}
1289 1290 1291
	pcpu_nr_units = unit;

	for_each_possible_cpu(cpu)
1292 1293 1294 1295
		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);

	/* we're done parsing the input, undefine BUG macro and dump config */
#undef PCPU_SETUP_BUG_ON
1296
	pcpu_dump_alloc_info(KERN_DEBUG, ai);
1297

1298 1299 1300
	pcpu_nr_groups = ai->nr_groups;
	pcpu_group_offsets = group_offsets;
	pcpu_group_sizes = group_sizes;
1301
	pcpu_unit_map = unit_map;
T
Tejun Heo 已提交
1302
	pcpu_unit_offsets = unit_off;
1303 1304

	/* determine basic parameters */
1305
	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1306
	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1307
	pcpu_atom_size = ai->atom_size;
T
Tejun Heo 已提交
1308 1309
	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1310

1311 1312 1313 1314 1315
	/*
	 * Allocate chunk slots.  The additional last slot is for
	 * empty chunks.
	 */
	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1316 1317
	pcpu_slot = memblock_virt_alloc(
			pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1318 1319 1320
	for (i = 0; i < pcpu_nr_slots; i++)
		INIT_LIST_HEAD(&pcpu_slot[i]);

1321 1322 1323 1324 1325 1326 1327
	/*
	 * Initialize static chunk.  If reserved_size is zero, the
	 * static chunk covers static area + dynamic allocation area
	 * in the first chunk.  If reserved_size is not zero, it
	 * covers static area + reserved area (mostly used for module
	 * static percpu allocation).
	 */
1328
	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1329
	INIT_LIST_HEAD(&schunk->list);
T
Tejun Heo 已提交
1330
	schunk->base_addr = base_addr;
1331 1332
	schunk->map = smap;
	schunk->map_alloc = ARRAY_SIZE(smap);
1333
	schunk->immutable = true;
T
Tejun Heo 已提交
1334
	bitmap_fill(schunk->populated, pcpu_unit_pages);
1335

1336 1337
	if (ai->reserved_size) {
		schunk->free_size = ai->reserved_size;
1338
		pcpu_reserved_chunk = schunk;
1339
		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1340 1341 1342 1343
	} else {
		schunk->free_size = dyn_size;
		dyn_size = 0;			/* dynamic area covered */
	}
1344
	schunk->contig_hint = schunk->free_size;
1345

1346
	schunk->map[schunk->map_used++] = -ai->static_size;
1347 1348 1349
	if (schunk->free_size)
		schunk->map[schunk->map_used++] = schunk->free_size;

1350 1351
	/* init dynamic chunk if necessary */
	if (dyn_size) {
1352
		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1353
		INIT_LIST_HEAD(&dchunk->list);
T
Tejun Heo 已提交
1354
		dchunk->base_addr = base_addr;
1355 1356
		dchunk->map = dmap;
		dchunk->map_alloc = ARRAY_SIZE(dmap);
1357
		dchunk->immutable = true;
T
Tejun Heo 已提交
1358
		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1359 1360 1361 1362 1363 1364

		dchunk->contig_hint = dchunk->free_size = dyn_size;
		dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
		dchunk->map[dchunk->map_used++] = dchunk->free_size;
	}

1365
	/* link the first chunk in */
1366 1367
	pcpu_first_chunk = dchunk ?: schunk;
	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1368 1369

	/* we're done */
T
Tejun Heo 已提交
1370
	pcpu_base_addr = base_addr;
T
Tejun Heo 已提交
1371
	return 0;
1372
}
1373

1374 1375
#ifdef CONFIG_SMP

1376
const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1377 1378 1379 1380
	[PCPU_FC_AUTO]	= "auto",
	[PCPU_FC_EMBED]	= "embed",
	[PCPU_FC_PAGE]	= "page",
};
1381

1382
enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1383

1384 1385
static int __init percpu_alloc_setup(char *str)
{
1386 1387 1388
	if (!str)
		return -EINVAL;

1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
	if (0)
		/* nada */;
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
	else if (!strcmp(str, "embed"))
		pcpu_chosen_fc = PCPU_FC_EMBED;
#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
	else if (!strcmp(str, "page"))
		pcpu_chosen_fc = PCPU_FC_PAGE;
#endif
	else
		pr_warning("PERCPU: unknown allocator %s specified\n", str);
1401

1402
	return 0;
1403
}
1404
early_param("percpu_alloc", percpu_alloc_setup);
1405

1406 1407 1408 1409 1410
/*
 * pcpu_embed_first_chunk() is used by the generic percpu setup.
 * Build it if needed by the arch config or the generic setup is going
 * to be used.
 */
1411 1412
#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
#define BUILD_EMBED_FIRST_CHUNK
#endif

/* build pcpu_page_first_chunk() iff needed by the arch config */
#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
#define BUILD_PAGE_FIRST_CHUNK
#endif

/* pcpu_build_alloc_info() is used by both embed and page first chunk */
#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
/**
 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
 * @reserved_size: the size of reserved percpu area in bytes
 * @dyn_size: minimum free size for dynamic allocation in bytes
 * @atom_size: allocation atom size
 * @cpu_distance_fn: callback to determine distance between cpus, optional
 *
 * This function determines grouping of units, their mappings to cpus
 * and other parameters considering needed percpu size, allocation
 * atom size and distances between CPUs.
 *
 * Groups are always mutliples of atom size and CPUs which are of
 * LOCAL_DISTANCE both ways are grouped together and share space for
 * units in the same group.  The returned configuration is guaranteed
 * to have CPUs on different nodes on different groups and >=75% usage
 * of allocated virtual address space.
 *
 * RETURNS:
 * On success, pointer to the new allocation_info is returned.  On
 * failure, ERR_PTR value is returned.
 */
static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
				size_t reserved_size, size_t dyn_size,
				size_t atom_size,
				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
{
	static int group_map[NR_CPUS] __initdata;
	static int group_cnt[NR_CPUS] __initdata;
	const size_t static_size = __per_cpu_end - __per_cpu_start;
	int nr_groups = 1, nr_units = 0;
	size_t size_sum, min_unit_size, alloc_size;
	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
	int last_allocs, group, unit;
	unsigned int cpu, tcpu;
	struct pcpu_alloc_info *ai;
	unsigned int *cpu_map;

	/* this function may be called multiple times */
	memset(group_map, 0, sizeof(group_map));
	memset(group_cnt, 0, sizeof(group_cnt));

	/* calculate size_sum and ensure dyn_size is enough for early alloc */
	size_sum = PFN_ALIGN(static_size + reserved_size +
			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
	dyn_size = size_sum - static_size - reserved_size;

	/*
	 * Determine min_unit_size, alloc_size and max_upa such that
	 * alloc_size is multiple of atom_size and is the smallest
L
Lucas De Marchi 已提交
1472
	 * which can accommodate 4k aligned segments which are equal to
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
	 * or larger than min_unit_size.
	 */
	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);

	alloc_size = roundup(min_unit_size, atom_size);
	upa = alloc_size / min_unit_size;
	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
		upa--;
	max_upa = upa;

	/* group cpus according to their proximity */
	for_each_possible_cpu(cpu) {
		group = 0;
	next_group:
		for_each_possible_cpu(tcpu) {
			if (cpu == tcpu)
				break;
			if (group_map[tcpu] == group && cpu_distance_fn &&
			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
				group++;
				nr_groups = max(nr_groups, group + 1);
				goto next_group;
			}
		}
		group_map[cpu] = group;
		group_cnt[group]++;
	}

	/*
	 * Expand unit size until address space usage goes over 75%
	 * and then as much as possible without using more address
	 * space.
	 */
	last_allocs = INT_MAX;
	for (upa = max_upa; upa; upa--) {
		int allocs = 0, wasted = 0;

		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
			continue;

		for (group = 0; group < nr_groups; group++) {
			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
			allocs += this_allocs;
			wasted += this_allocs * upa - group_cnt[group];
		}

		/*
		 * Don't accept if wastage is over 1/3.  The
		 * greater-than comparison ensures upa==1 always
		 * passes the following check.
		 */
		if (wasted > num_possible_cpus() / 3)
			continue;

		/* and then don't consume more memory */
		if (allocs > last_allocs)
			break;
		last_allocs = allocs;
		best_upa = upa;
	}
	upa = best_upa;

	/* allocate and fill alloc_info */
	for (group = 0; group < nr_groups; group++)
		nr_units += roundup(group_cnt[group], upa);

	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
	if (!ai)
		return ERR_PTR(-ENOMEM);
	cpu_map = ai->groups[0].cpu_map;

	for (group = 0; group < nr_groups; group++) {
		ai->groups[group].cpu_map = cpu_map;
		cpu_map += roundup(group_cnt[group], upa);
	}

	ai->static_size = static_size;
	ai->reserved_size = reserved_size;
	ai->dyn_size = dyn_size;
	ai->unit_size = alloc_size / upa;
	ai->atom_size = atom_size;
	ai->alloc_size = alloc_size;

	for (group = 0, unit = 0; group_cnt[group]; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];

		/*
		 * Initialize base_offset as if all groups are located
		 * back-to-back.  The caller should update this to
		 * reflect actual allocation.
		 */
		gi->base_offset = unit * ai->unit_size;

		for_each_possible_cpu(cpu)
			if (group_map[cpu] == group)
				gi->cpu_map[gi->nr_units++] = cpu;
		gi->nr_units = roundup(gi->nr_units, upa);
		unit += gi->nr_units;
	}
	BUG_ON(unit != nr_units);

	return ai;
}
#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */

#if defined(BUILD_EMBED_FIRST_CHUNK)
1580 1581 1582
/**
 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
 * @reserved_size: the size of reserved percpu area in bytes
1583
 * @dyn_size: minimum free size for dynamic allocation in bytes
1584 1585 1586
 * @atom_size: allocation atom size
 * @cpu_distance_fn: callback to determine distance between cpus, optional
 * @alloc_fn: function to allocate percpu page
L
Lucas De Marchi 已提交
1587
 * @free_fn: function to free percpu page
1588 1589 1590 1591 1592
 *
 * This is a helper to ease setting up embedded first percpu chunk and
 * can be called where pcpu_setup_first_chunk() is expected.
 *
 * If this function is used to setup the first chunk, it is allocated
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
 * by calling @alloc_fn and used as-is without being mapped into
 * vmalloc area.  Allocations are always whole multiples of @atom_size
 * aligned to @atom_size.
 *
 * This enables the first chunk to piggy back on the linear physical
 * mapping which often uses larger page size.  Please note that this
 * can result in very sparse cpu->unit mapping on NUMA machines thus
 * requiring large vmalloc address space.  Don't use this allocator if
 * vmalloc space is not orders of magnitude larger than distances
 * between node memory addresses (ie. 32bit NUMA machines).
1603
 *
1604
 * @dyn_size specifies the minimum dynamic area size.
1605 1606
 *
 * If the needed size is smaller than the minimum or specified unit
1607
 * size, the leftover is returned using @free_fn.
1608 1609
 *
 * RETURNS:
T
Tejun Heo 已提交
1610
 * 0 on success, -errno on failure.
1611
 */
1612
int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1613 1614 1615 1616
				  size_t atom_size,
				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
				  pcpu_fc_alloc_fn_t alloc_fn,
				  pcpu_fc_free_fn_t free_fn)
1617
{
1618 1619
	void *base = (void *)ULONG_MAX;
	void **areas = NULL;
1620
	struct pcpu_alloc_info *ai;
1621
	size_t size_sum, areas_size, max_distance;
1622
	int group, i, rc;
1623

1624 1625
	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
				   cpu_distance_fn);
1626 1627
	if (IS_ERR(ai))
		return PTR_ERR(ai);
1628

1629
	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1630
	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1631

1632
	areas = memblock_virt_alloc_nopanic(areas_size, 0);
1633
	if (!areas) {
T
Tejun Heo 已提交
1634
		rc = -ENOMEM;
1635
		goto out_free;
1636
	}
1637

1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
	/* allocate, copy and determine base address */
	for (group = 0; group < ai->nr_groups; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];
		unsigned int cpu = NR_CPUS;
		void *ptr;

		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
			cpu = gi->cpu_map[i];
		BUG_ON(cpu == NR_CPUS);

		/* allocate space for the whole group */
		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
		if (!ptr) {
			rc = -ENOMEM;
			goto out_free_areas;
		}
1654 1655
		/* kmemleak tracks the percpu allocations separately */
		kmemleak_free(ptr);
1656
		areas[group] = ptr;
1657

1658
		base = min(ptr, base);
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
	}

	/*
	 * Copy data and free unused parts.  This should happen after all
	 * allocations are complete; otherwise, we may end up with
	 * overlapping groups.
	 */
	for (group = 0; group < ai->nr_groups; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];
		void *ptr = areas[group];
1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679

		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
			if (gi->cpu_map[i] == NR_CPUS) {
				/* unused unit, free whole */
				free_fn(ptr, ai->unit_size);
				continue;
			}
			/* copy and return the unused part */
			memcpy(ptr, __per_cpu_load, ai->static_size);
			free_fn(ptr + size_sum, ai->unit_size - size_sum);
		}
1680
	}
1681

1682
	/* base address is now known, determine group base offsets */
1683 1684
	max_distance = 0;
	for (group = 0; group < ai->nr_groups; group++) {
1685
		ai->groups[group].base_offset = areas[group] - base;
T
Tejun Heo 已提交
1686 1687
		max_distance = max_t(size_t, max_distance,
				     ai->groups[group].base_offset);
1688 1689 1690 1691
	}
	max_distance += ai->unit_size;

	/* warn if maximum distance is further than 75% of vmalloc space */
1692
	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
T
Tejun Heo 已提交
1693
		pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1694
			   "space 0x%lx\n", max_distance,
1695
			   VMALLOC_TOTAL);
1696 1697 1698 1699 1700 1701
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
		/* and fail if we have fallback */
		rc = -EINVAL;
		goto out_free;
#endif
	}
1702

T
Tejun Heo 已提交
1703
	pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1704 1705
		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
		ai->dyn_size, ai->unit_size);
1706

T
Tejun Heo 已提交
1707
	rc = pcpu_setup_first_chunk(ai, base);
1708 1709 1710 1711
	goto out_free;

out_free_areas:
	for (group = 0; group < ai->nr_groups; group++)
1712 1713 1714
		if (areas[group])
			free_fn(areas[group],
				ai->groups[group].nr_units * ai->unit_size);
1715
out_free:
1716
	pcpu_free_alloc_info(ai);
1717
	if (areas)
1718
		memblock_free_early(__pa(areas), areas_size);
T
Tejun Heo 已提交
1719
	return rc;
1720
}
1721
#endif /* BUILD_EMBED_FIRST_CHUNK */
1722

1723
#ifdef BUILD_PAGE_FIRST_CHUNK
1724
/**
1725
 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1726 1727
 * @reserved_size: the size of reserved percpu area in bytes
 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
L
Lucas De Marchi 已提交
1728
 * @free_fn: function to free percpu page, always called with PAGE_SIZE
1729 1730
 * @populate_pte_fn: function to populate pte
 *
1731 1732
 * This is a helper to ease setting up page-remapped first percpu
 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1733 1734 1735 1736 1737
 *
 * This is the basic allocator.  Static percpu area is allocated
 * page-by-page into vmalloc area.
 *
 * RETURNS:
T
Tejun Heo 已提交
1738
 * 0 on success, -errno on failure.
1739
 */
T
Tejun Heo 已提交
1740 1741 1742 1743
int __init pcpu_page_first_chunk(size_t reserved_size,
				 pcpu_fc_alloc_fn_t alloc_fn,
				 pcpu_fc_free_fn_t free_fn,
				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1744
{
1745
	static struct vm_struct vm;
1746
	struct pcpu_alloc_info *ai;
1747
	char psize_str[16];
T
Tejun Heo 已提交
1748
	int unit_pages;
1749
	size_t pages_size;
T
Tejun Heo 已提交
1750
	struct page **pages;
T
Tejun Heo 已提交
1751
	int unit, i, j, rc;
1752

1753 1754
	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);

1755
	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1756 1757 1758 1759 1760 1761
	if (IS_ERR(ai))
		return PTR_ERR(ai);
	BUG_ON(ai->nr_groups != 1);
	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());

	unit_pages = ai->unit_size >> PAGE_SHIFT;
1762 1763

	/* unaligned allocations can't be freed, round up to page size */
1764 1765
	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
			       sizeof(pages[0]));
1766
	pages = memblock_virt_alloc(pages_size, 0);
1767

1768
	/* allocate pages */
1769
	j = 0;
1770
	for (unit = 0; unit < num_possible_cpus(); unit++)
T
Tejun Heo 已提交
1771
		for (i = 0; i < unit_pages; i++) {
1772
			unsigned int cpu = ai->groups[0].cpu_map[unit];
1773 1774
			void *ptr;

1775
			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1776
			if (!ptr) {
1777 1778
				pr_warning("PERCPU: failed to allocate %s page "
					   "for cpu%u\n", psize_str, cpu);
1779 1780
				goto enomem;
			}
1781 1782
			/* kmemleak tracks the percpu allocations separately */
			kmemleak_free(ptr);
T
Tejun Heo 已提交
1783
			pages[j++] = virt_to_page(ptr);
1784 1785
		}

1786 1787
	/* allocate vm area, map the pages and copy static data */
	vm.flags = VM_ALLOC;
1788
	vm.size = num_possible_cpus() * ai->unit_size;
1789 1790
	vm_area_register_early(&vm, PAGE_SIZE);

1791
	for (unit = 0; unit < num_possible_cpus(); unit++) {
1792
		unsigned long unit_addr =
1793
			(unsigned long)vm.addr + unit * ai->unit_size;
1794

T
Tejun Heo 已提交
1795
		for (i = 0; i < unit_pages; i++)
1796 1797 1798
			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));

		/* pte already populated, the following shouldn't fail */
T
Tejun Heo 已提交
1799 1800 1801 1802
		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
				      unit_pages);
		if (rc < 0)
			panic("failed to map percpu area, err=%d\n", rc);
1803

1804 1805 1806 1807 1808 1809 1810 1811 1812
		/*
		 * FIXME: Archs with virtual cache should flush local
		 * cache for the linear mapping here - something
		 * equivalent to flush_cache_vmap() on the local cpu.
		 * flush_cache_vmap() can't be used as most supporting
		 * data structures are not set up yet.
		 */

		/* copy static data */
1813
		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1814 1815 1816
	}

	/* we're ready, commit */
1817
	pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1818 1819
		unit_pages, psize_str, vm.addr, ai->static_size,
		ai->reserved_size, ai->dyn_size);
1820

T
Tejun Heo 已提交
1821
	rc = pcpu_setup_first_chunk(ai, vm.addr);
1822 1823 1824 1825
	goto out_free_ar;

enomem:
	while (--j >= 0)
T
Tejun Heo 已提交
1826
		free_fn(page_address(pages[j]), PAGE_SIZE);
T
Tejun Heo 已提交
1827
	rc = -ENOMEM;
1828
out_free_ar:
1829
	memblock_free_early(__pa(pages), pages_size);
1830
	pcpu_free_alloc_info(ai);
T
Tejun Heo 已提交
1831
	return rc;
1832
}
1833
#endif /* BUILD_PAGE_FIRST_CHUNK */
1834

1835
#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
1836
/*
1837
 * Generic SMP percpu area setup.
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
 *
 * The embedding helper is used because its behavior closely resembles
 * the original non-dynamic generic percpu area setup.  This is
 * important because many archs have addressing restrictions and might
 * fail if the percpu area is located far away from the previous
 * location.  As an added bonus, in non-NUMA cases, embedding is
 * generally a good idea TLB-wise because percpu area can piggy back
 * on the physical linear memory mapping which uses large page
 * mappings on applicable archs.
 */
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);

1851 1852 1853
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
				       size_t align)
{
1854 1855
	return  memblock_virt_alloc_from_nopanic(
			size, align, __pa(MAX_DMA_ADDRESS));
1856
}
1857

1858 1859
static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
{
1860
	memblock_free_early(__pa(ptr), size);
1861 1862
}

1863 1864 1865 1866
void __init setup_per_cpu_areas(void)
{
	unsigned long delta;
	unsigned int cpu;
T
Tejun Heo 已提交
1867
	int rc;
1868 1869 1870 1871 1872

	/*
	 * Always reserve area for module percpu variables.  That's
	 * what the legacy allocator did.
	 */
T
Tejun Heo 已提交
1873
	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1874 1875
				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
T
Tejun Heo 已提交
1876
	if (rc < 0)
1877
		panic("Failed to initialize percpu areas.");
1878 1879 1880

	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
	for_each_possible_cpu(cpu)
T
Tejun Heo 已提交
1881
		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1882
}
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902
#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */

#else	/* CONFIG_SMP */

/*
 * UP percpu area setup.
 *
 * UP always uses km-based percpu allocator with identity mapping.
 * Static percpu variables are indistinguishable from the usual static
 * variables and don't require any special preparation.
 */
void __init setup_per_cpu_areas(void)
{
	const size_t unit_size =
		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
					 PERCPU_DYNAMIC_RESERVE));
	struct pcpu_alloc_info *ai;
	void *fc;

	ai = pcpu_alloc_alloc_info(1, 1);
1903 1904 1905
	fc = memblock_virt_alloc_from_nopanic(unit_size,
					      PAGE_SIZE,
					      __pa(MAX_DMA_ADDRESS));
1906 1907
	if (!ai || !fc)
		panic("Failed to allocate memory for percpu areas.");
1908 1909
	/* kmemleak tracks the percpu allocations separately */
	kmemleak_free(fc);
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922

	ai->dyn_size = unit_size;
	ai->unit_size = unit_size;
	ai->atom_size = unit_size;
	ai->alloc_size = unit_size;
	ai->groups[0].nr_units = 1;
	ai->groups[0].cpu_map[0] = 0;

	if (pcpu_setup_first_chunk(ai, fc) < 0)
		panic("Failed to initialize percpu areas.");
}

#endif	/* CONFIG_SMP */
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943

/*
 * First and reserved chunks are initialized with temporary allocation
 * map in initdata so that they can be used before slab is online.
 * This function is called after slab is brought up and replaces those
 * with properly allocated maps.
 */
void __init percpu_init_late(void)
{
	struct pcpu_chunk *target_chunks[] =
		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
	struct pcpu_chunk *chunk;
	unsigned long flags;
	int i;

	for (i = 0; (chunk = target_chunks[i]); i++) {
		int *map;
		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);

		BUILD_BUG_ON(size > PAGE_SIZE);

1944
		map = pcpu_mem_zalloc(size);
1945 1946 1947 1948 1949 1950 1951 1952
		BUG_ON(!map);

		spin_lock_irqsave(&pcpu_lock, flags);
		memcpy(map, chunk->map, size);
		chunk->map = map;
		spin_unlock_irqrestore(&pcpu_lock, flags);
	}
}