percpu.c 57.0 KB
Newer Older
1
/*
2
 * mm/percpu.c - percpu memory allocator
3 4 5 6 7 8 9
 *
 * Copyright (C) 2009		SUSE Linux Products GmbH
 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
 *
 * This file is released under the GPLv2.
 *
 * This is percpu allocator which can handle both static and dynamic
10 11 12
 * areas.  Percpu areas are allocated in chunks.  Each chunk is
 * consisted of boot-time determined number of units and the first
 * chunk is used for static percpu variables in the kernel image
13 14 15
 * (special boot time alloc/init handling necessary as these areas
 * need to be brought up before allocation services are running).
 * Unit grows as necessary and all units grow or shrink in unison.
16
 * When a chunk is filled up, another chunk is allocated.
17 18 19 20 21 22 23 24
 *
 *  c0                           c1                         c2
 *  -------------------          -------------------        ------------
 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
 *  -------------------  ......  -------------------  ....  ------------
 *
 * Allocation is done in offset-size areas of single unit space.  Ie,
 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 26 27 28
 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
 * Percpu access can be done by configuring percpu base registers
 * according to cpu to unit mapping and pcpu_unit_size.
29
 *
30 31
 * There are usually many small percpu allocations many of them being
 * as small as 4 bytes.  The allocator organizes chunks into lists
32 33
 * according to free size and tries to allocate from the fullest one.
 * Each chunk keeps the maximum contiguous area size hint which is
34
 * guaranteed to be equal to or larger than the maximum contiguous
35 36 37 38 39 40 41 42
 * area in the chunk.  This helps the allocator not to iterate the
 * chunk maps unnecessarily.
 *
 * Allocation state in each chunk is kept using an array of integers
 * on chunk->map.  A positive value in the map represents a free
 * region and negative allocated.  Allocation inside a chunk is done
 * by scanning this map sequentially and serving the first matching
 * entry.  This is mostly copied from the percpu_modalloc() allocator.
43 44
 * Chunks can be determined from the address using the index field
 * in the page struct. The index field contains a pointer to the chunk.
45 46 47 48
 *
 * To use this allocator, arch code should do the followings.
 *
 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 50
 *   regular address to percpu pointer and back if they need to be
 *   different from the default
51
 *
52 53
 * - use pcpu_setup_first_chunk() during percpu area initialization to
 *   setup the first chunk containing the kernel static percpu area
54 55 56 57
 */

#include <linux/bitmap.h>
#include <linux/bootmem.h>
58
#include <linux/err.h>
59
#include <linux/list.h>
60
#include <linux/log2.h>
61 62 63 64 65 66
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/pfn.h>
#include <linux/slab.h>
67
#include <linux/spinlock.h>
68
#include <linux/vmalloc.h>
69
#include <linux/workqueue.h>
70
#include <linux/kmemleak.h>
71 72

#include <asm/cacheflush.h>
73
#include <asm/sections.h>
74
#include <asm/tlbflush.h>
75
#include <asm/io.h>
76 77 78 79

#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */

80
#ifdef CONFIG_SMP
81 82 83
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
#ifndef __addr_to_pcpu_ptr
#define __addr_to_pcpu_ptr(addr)					\
84 85 86
	(void __percpu *)((unsigned long)(addr) -			\
			  (unsigned long)pcpu_base_addr	+		\
			  (unsigned long)__per_cpu_start)
87 88 89
#endif
#ifndef __pcpu_ptr_to_addr
#define __pcpu_ptr_to_addr(ptr)						\
90 91 92
	(void __force *)((unsigned long)(ptr) +				\
			 (unsigned long)pcpu_base_addr -		\
			 (unsigned long)__per_cpu_start)
93
#endif
94 95 96 97 98
#else	/* CONFIG_SMP */
/* on UP, it's always identity mapped */
#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
#endif	/* CONFIG_SMP */
99

100 101 102 103
struct pcpu_chunk {
	struct list_head	list;		/* linked to pcpu_slot lists */
	int			free_size;	/* free bytes in the chunk */
	int			contig_hint;	/* max contiguous size hint */
T
Tejun Heo 已提交
104
	void			*base_addr;	/* base address of this chunk */
105
	int			map_used;	/* # of map entries used before the sentry */
106 107
	int			map_alloc;	/* # of map entries allocated */
	int			*map;		/* allocation map */
108
	void			*data;		/* chunk data */
A
Al Viro 已提交
109
	int			first_free;	/* no free below this */
110
	bool			immutable;	/* no [de]population allowed */
T
Tejun Heo 已提交
111
	unsigned long		populated[];	/* populated bitmap */
112 113
};

114 115
static int pcpu_unit_pages __read_mostly;
static int pcpu_unit_size __read_mostly;
116
static int pcpu_nr_units __read_mostly;
117
static int pcpu_atom_size __read_mostly;
118 119
static int pcpu_nr_slots __read_mostly;
static size_t pcpu_chunk_struct_size __read_mostly;
120

T
Tejun Heo 已提交
121 122 123
/* cpus with the lowest and highest unit addresses */
static unsigned int pcpu_low_unit_cpu __read_mostly;
static unsigned int pcpu_high_unit_cpu __read_mostly;
124

125
/* the address of the first chunk which starts with the kernel static area */
126
void *pcpu_base_addr __read_mostly;
127 128
EXPORT_SYMBOL_GPL(pcpu_base_addr);

T
Tejun Heo 已提交
129 130
static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
131

132 133 134 135 136
/* group information, used for vm allocation */
static int pcpu_nr_groups __read_mostly;
static const unsigned long *pcpu_group_offsets __read_mostly;
static const size_t *pcpu_group_sizes __read_mostly;

137 138 139 140 141 142 143 144 145 146 147 148 149 150
/*
 * The first chunk which always exists.  Note that unlike other
 * chunks, this one can be allocated and mapped in several different
 * ways and thus often doesn't live in the vmalloc area.
 */
static struct pcpu_chunk *pcpu_first_chunk;

/*
 * Optional reserved chunk.  This chunk reserves part of the first
 * chunk and serves it for reserved allocations.  The amount of
 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
 * area doesn't exist, the following variables contain NULL and 0
 * respectively.
 */
151 152 153
static struct pcpu_chunk *pcpu_reserved_chunk;
static int pcpu_reserved_chunk_limit;

154
/*
155 156 157
 * Synchronization rules.
 *
 * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
T
Tejun Heo 已提交
158 159 160
 * protects allocation/reclaim paths, chunks, populated bitmap and
 * vmalloc mapping.  The latter is a spinlock and protects the index
 * data structures - chunk slots, chunks and area maps in chunks.
161 162 163
 *
 * During allocation, pcpu_alloc_mutex is kept locked all the time and
 * pcpu_lock is grabbed and released as necessary.  All actual memory
164 165 166 167
 * allocations are done using GFP_KERNEL with pcpu_lock released.  In
 * general, percpu memory can't be allocated with irq off but
 * irqsave/restore are still used in alloc path so that it can be used
 * from early init path - sched_init() specifically.
168 169 170 171 172 173 174 175 176
 *
 * Free path accesses and alters only the index data structures, so it
 * can be safely called from atomic context.  When memory needs to be
 * returned to the system, free path schedules reclaim_work which
 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
 * reclaimed, release both locks and frees the chunks.  Note that it's
 * necessary to grab both locks to remove a chunk from circulation as
 * allocation path might be referencing the chunk with only
 * pcpu_alloc_mutex locked.
177
 */
178 179
static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
180

181
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
182

183 184 185 186
/* reclaim work to release fully free chunks, scheduled from free path */
static void pcpu_reclaim(struct work_struct *work);
static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
static bool pcpu_addr_in_first_chunk(void *addr)
{
	void *first_start = pcpu_first_chunk->base_addr;

	return addr >= first_start && addr < first_start + pcpu_unit_size;
}

static bool pcpu_addr_in_reserved_chunk(void *addr)
{
	void *first_start = pcpu_first_chunk->base_addr;

	return addr >= first_start &&
		addr < first_start + pcpu_reserved_chunk_limit;
}

202
static int __pcpu_size_to_slot(int size)
203
{
T
Tejun Heo 已提交
204
	int highbit = fls(size);	/* size is in bytes */
205 206 207
	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
}

208 209 210 211 212 213 214
static int pcpu_size_to_slot(int size)
{
	if (size == pcpu_unit_size)
		return pcpu_nr_slots - 1;
	return __pcpu_size_to_slot(size);
}

215 216 217 218 219 220 221 222
static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
{
	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
		return 0;

	return pcpu_size_to_slot(chunk->free_size);
}

223 224 225 226 227 228 229 230 231 232 233 234 235
/* set the pointer to a chunk in a page struct */
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
{
	page->index = (unsigned long)pcpu;
}

/* obtain pointer to a chunk from a page struct */
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
{
	return (struct pcpu_chunk *)page->index;
}

static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
236
{
237
	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
238 239
}

240 241
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
				     unsigned int cpu, int page_idx)
242
{
T
Tejun Heo 已提交
243
	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
T
Tejun Heo 已提交
244
		(page_idx << PAGE_SHIFT);
245 246
}

247 248
static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
					   int *rs, int *re, int end)
T
Tejun Heo 已提交
249 250 251 252 253
{
	*rs = find_next_zero_bit(chunk->populated, end, *rs);
	*re = find_next_bit(chunk->populated, end, *rs + 1);
}

254 255
static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
					 int *rs, int *re, int end)
T
Tejun Heo 已提交
256 257 258 259 260 261 262
{
	*rs = find_next_bit(chunk->populated, end, *rs);
	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
}

/*
 * (Un)populated page region iterators.  Iterate over (un)populated
263
 * page regions between @start and @end in @chunk.  @rs and @re should
T
Tejun Heo 已提交
264 265 266 267 268 269 270 271 272 273 274 275 276
 * be integer variables and will be set to start and end page index of
 * the current region.
 */
#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))

#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
	     (rs) < (re);						    \
	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))

277
/**
278
 * pcpu_mem_zalloc - allocate memory
279
 * @size: bytes to allocate
280
 *
281
 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
282
 * kzalloc() is used; otherwise, vzalloc() is used.  The returned
283
 * memory is always zeroed.
284
 *
285 286 287
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
288
 * RETURNS:
289
 * Pointer to the allocated area on success, NULL on failure.
290
 */
291
static void *pcpu_mem_zalloc(size_t size)
292
{
293 294 295
	if (WARN_ON_ONCE(!slab_is_available()))
		return NULL;

296 297
	if (size <= PAGE_SIZE)
		return kzalloc(size, GFP_KERNEL);
298 299
	else
		return vzalloc(size);
300
}
301

302 303 304 305 306
/**
 * pcpu_mem_free - free memory
 * @ptr: memory to free
 * @size: size of the area
 *
307
 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
308 309 310
 */
static void pcpu_mem_free(void *ptr, size_t size)
{
311
	if (size <= PAGE_SIZE)
312
		kfree(ptr);
313
	else
314
		vfree(ptr);
315 316 317 318 319 320 321 322 323
}

/**
 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 * @chunk: chunk of interest
 * @oslot: the previous slot it was on
 *
 * This function is called after an allocation or free changed @chunk.
 * New slot according to the changed state is determined and @chunk is
324 325
 * moved to the slot.  Note that the reserved chunk is never put on
 * chunk slots.
326 327 328
 *
 * CONTEXT:
 * pcpu_lock.
329 330 331 332 333
 */
static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
{
	int nslot = pcpu_chunk_slot(chunk);

334
	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
335 336 337 338 339 340 341
		if (oslot < nslot)
			list_move(&chunk->list, &pcpu_slot[nslot]);
		else
			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
	}
}

342
/**
343 344
 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 * @chunk: chunk of interest
345
 *
346
 * Determine whether area map of @chunk needs to be extended to
L
Lucas De Marchi 已提交
347
 * accommodate a new allocation.
348
 *
349
 * CONTEXT:
350
 * pcpu_lock.
351
 *
352
 * RETURNS:
353 354
 * New target map allocation length if extension is necessary, 0
 * otherwise.
355
 */
356
static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
357 358 359
{
	int new_alloc;

360
	if (chunk->map_alloc >= chunk->map_used + 3)
361 362 363
		return 0;

	new_alloc = PCPU_DFL_MAP_ALLOC;
364
	while (new_alloc < chunk->map_used + 3)
365 366
		new_alloc *= 2;

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
	return new_alloc;
}

/**
 * pcpu_extend_area_map - extend area map of a chunk
 * @chunk: chunk of interest
 * @new_alloc: new target allocation length of the area map
 *
 * Extend area map of @chunk to have @new_alloc entries.
 *
 * CONTEXT:
 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
{
	int *old = NULL, *new = NULL;
	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
	unsigned long flags;

389
	new = pcpu_mem_zalloc(new_size);
390
	if (!new)
391
		return -ENOMEM;
392

393 394 395 396 397
	/* acquire pcpu_lock and switch to new area map */
	spin_lock_irqsave(&pcpu_lock, flags);

	if (new_alloc <= chunk->map_alloc)
		goto out_unlock;
398

399
	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
400 401 402
	old = chunk->map;

	memcpy(new, old, old_size);
403 404 405

	chunk->map_alloc = new_alloc;
	chunk->map = new;
406 407 408 409 410 411 412 413 414 415 416 417
	new = NULL;

out_unlock:
	spin_unlock_irqrestore(&pcpu_lock, flags);

	/*
	 * pcpu_mem_free() might end up calling vfree() which uses
	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
	 */
	pcpu_mem_free(old, old_size);
	pcpu_mem_free(new, new_size);

418 419 420
	return 0;
}

421 422 423
/**
 * pcpu_alloc_area - allocate area from a pcpu_chunk
 * @chunk: chunk of interest
T
Tejun Heo 已提交
424
 * @size: wanted size in bytes
425 426 427 428 429 430
 * @align: wanted align
 *
 * Try to allocate @size bytes area aligned at @align from @chunk.
 * Note that this function only allocates the offset.  It doesn't
 * populate or map the area.
 *
431 432
 * @chunk->map must have at least two free slots.
 *
433 434 435
 * CONTEXT:
 * pcpu_lock.
 *
436
 * RETURNS:
437 438
 * Allocated offset in @chunk on success, -1 if no matching area is
 * found.
439 440 441 442 443 444
 */
static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
{
	int oslot = pcpu_chunk_slot(chunk);
	int max_contig = 0;
	int i, off;
A
Al Viro 已提交
445
	bool seen_free = false;
446
	int *p;
447

A
Al Viro 已提交
448
	for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
449
		int head, tail;
450 451 452 453 454
		int this_size;

		off = *p;
		if (off & 1)
			continue;
455 456 457 458

		/* extra for alignment requirement */
		head = ALIGN(off, align) - off;

459 460
		this_size = (p[1] & ~1) - off;
		if (this_size < head + size) {
A
Al Viro 已提交
461 462 463 464
			if (!seen_free) {
				chunk->first_free = i;
				seen_free = true;
			}
465
			max_contig = max(this_size, max_contig);
466 467 468 469 470 471 472 473 474
			continue;
		}

		/*
		 * If head is small or the previous block is free,
		 * merge'em.  Note that 'small' is defined as smaller
		 * than sizeof(int), which is very small but isn't too
		 * uncommon for percpu allocations.
		 */
475
		if (head && (head < sizeof(int) || !(p[-1] & 1))) {
476
			*p = off += head;
477
			if (p[-1] & 1)
478
				chunk->free_size -= head;
479 480
			else
				max_contig = max(*p - p[-1], max_contig);
481
			this_size -= head;
482 483 484 485
			head = 0;
		}

		/* if tail is small, just keep it around */
486 487
		tail = this_size - head - size;
		if (tail < sizeof(int)) {
488
			tail = 0;
489 490
			size = this_size - head;
		}
491 492 493

		/* split if warranted */
		if (head || tail) {
494 495 496
			int nr_extra = !!head + !!tail;

			/* insert new subblocks */
497
			memmove(p + nr_extra + 1, p + 1,
498 499 500
				sizeof(chunk->map[0]) * (chunk->map_used - i));
			chunk->map_used += nr_extra;

501
			if (head) {
A
Al Viro 已提交
502 503 504 505
				if (!seen_free) {
					chunk->first_free = i;
					seen_free = true;
				}
506 507
				*++p = off += head;
				++i;
508 509 510
				max_contig = max(head, max_contig);
			}
			if (tail) {
511
				p[1] = off + size;
512
				max_contig = max(tail, max_contig);
513 514 515
			}
		}

A
Al Viro 已提交
516 517 518
		if (!seen_free)
			chunk->first_free = i + 1;

519
		/* update hint and mark allocated */
520
		if (i + 1 == chunk->map_used)
521 522 523 524 525
			chunk->contig_hint = max_contig; /* fully scanned */
		else
			chunk->contig_hint = max(chunk->contig_hint,
						 max_contig);

526 527
		chunk->free_size -= size;
		*p |= 1;
528 529 530 531 532 533 534 535

		pcpu_chunk_relocate(chunk, oslot);
		return off;
	}

	chunk->contig_hint = max_contig;	/* fully scanned */
	pcpu_chunk_relocate(chunk, oslot);

536 537
	/* tell the upper layer that this chunk has no matching area */
	return -1;
538 539 540 541 542 543 544 545 546 547
}

/**
 * pcpu_free_area - free area to a pcpu_chunk
 * @chunk: chunk of interest
 * @freeme: offset of area to free
 *
 * Free area starting from @freeme to @chunk.  Note that this function
 * only modifies the allocation map.  It doesn't depopulate or unmap
 * the area.
548 549 550
 *
 * CONTEXT:
 * pcpu_lock.
551 552 553 554
 */
static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
{
	int oslot = pcpu_chunk_slot(chunk);
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
	int off = 0;
	unsigned i, j;
	int to_free = 0;
	int *p;

	freeme |= 1;	/* we are searching for <given offset, in use> pair */

	i = 0;
	j = chunk->map_used;
	while (i != j) {
		unsigned k = (i + j) / 2;
		off = chunk->map[k];
		if (off < freeme)
			i = k + 1;
		else if (off > freeme)
			j = k;
		else
			i = j = k;
	}
574 575
	BUG_ON(off != freeme);

A
Al Viro 已提交
576 577 578
	if (i < chunk->first_free)
		chunk->first_free = i;

579 580 581
	p = chunk->map + i;
	*p = off &= ~1;
	chunk->free_size += (p[1] & ~1) - off;
582

583 584 585
	/* merge with next? */
	if (!(p[1] & 1))
		to_free++;
586
	/* merge with previous? */
587 588
	if (i > 0 && !(p[-1] & 1)) {
		to_free++;
589
		i--;
590
		p--;
591
	}
592 593 594 595
	if (to_free) {
		chunk->map_used -= to_free;
		memmove(p + 1, p + 1 + to_free,
			(chunk->map_used - i) * sizeof(chunk->map[0]));
596 597
	}

598
	chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
599 600 601
	pcpu_chunk_relocate(chunk, oslot);
}

602 603 604 605
static struct pcpu_chunk *pcpu_alloc_chunk(void)
{
	struct pcpu_chunk *chunk;

606
	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
607 608 609
	if (!chunk)
		return NULL;

610 611
	chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
						sizeof(chunk->map[0]));
612
	if (!chunk->map) {
613
		pcpu_mem_free(chunk, pcpu_chunk_struct_size);
614 615 616 617
		return NULL;
	}

	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
618 619 620
	chunk->map[0] = 0;
	chunk->map[1] = pcpu_unit_size | 1;
	chunk->map_used = 1;
621 622 623 624 625 626 627 628 629 630 631 632 633

	INIT_LIST_HEAD(&chunk->list);
	chunk->free_size = pcpu_unit_size;
	chunk->contig_hint = pcpu_unit_size;

	return chunk;
}

static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
	if (!chunk)
		return;
	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
634
	pcpu_mem_free(chunk, pcpu_chunk_struct_size);
635 636
}

637 638 639 640 641 642 643 644 645 646 647 648 649 650
/*
 * Chunk management implementation.
 *
 * To allow different implementations, chunk alloc/free and
 * [de]population are implemented in a separate file which is pulled
 * into this file and compiled together.  The following functions
 * should be implemented.
 *
 * pcpu_populate_chunk		- populate the specified range of a chunk
 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
 * pcpu_create_chunk		- create a new chunk
 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
 * pcpu_addr_to_page		- translate address to physical address
 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
651
 */
652 653 654 655 656 657
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
static struct pcpu_chunk *pcpu_create_chunk(void);
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
static struct page *pcpu_addr_to_page(void *addr);
static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
658

659 660 661
#ifdef CONFIG_NEED_PER_CPU_KM
#include "percpu-km.c"
#else
662
#include "percpu-vm.c"
663
#endif
664

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
/**
 * pcpu_chunk_addr_search - determine chunk containing specified address
 * @addr: address for which the chunk needs to be determined.
 *
 * RETURNS:
 * The address of the found chunk.
 */
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
{
	/* is it in the first chunk? */
	if (pcpu_addr_in_first_chunk(addr)) {
		/* is it in the reserved area? */
		if (pcpu_addr_in_reserved_chunk(addr))
			return pcpu_reserved_chunk;
		return pcpu_first_chunk;
	}

	/*
	 * The address is relative to unit0 which might be unused and
	 * thus unmapped.  Offset the address to the unit space of the
	 * current processor before looking it up in the vmalloc
	 * space.  Note that any possible cpu id can be used here, so
	 * there's no need to worry about preemption or cpu hotplug.
	 */
	addr += pcpu_unit_offsets[raw_smp_processor_id()];
690
	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
691 692
}

693
/**
694
 * pcpu_alloc - the percpu allocator
T
Tejun Heo 已提交
695
 * @size: size of area to allocate in bytes
696
 * @align: alignment of area (max PAGE_SIZE)
697
 * @reserved: allocate from the reserved chunk if available
698
 *
699 700 701 702
 * Allocate percpu area of @size bytes aligned at @align.
 *
 * CONTEXT:
 * Does GFP_KERNEL allocation.
703 704 705 706
 *
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
707
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
708
{
709
	static int warn_limit = 10;
710
	struct pcpu_chunk *chunk;
711
	const char *err;
712
	int slot, off, new_alloc;
713
	unsigned long flags;
714
	void __percpu *ptr;
715

716 717
	/*
	 * We want the lowest bit of offset available for in-use/free
V
Viro 已提交
718
	 * indicator, so force >= 16bit alignment and make size even.
719 720 721 722
	 */
	if (unlikely(align < 2))
		align = 2;

723
	size = ALIGN(size, 2);
V
Viro 已提交
724

725
	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
726 727 728 729 730
		WARN(true, "illegal size (%zu) or align (%zu) for "
		     "percpu allocation\n", size, align);
		return NULL;
	}

731
	mutex_lock(&pcpu_alloc_mutex);
732
	spin_lock_irqsave(&pcpu_lock, flags);
733

734 735 736
	/* serve reserved allocations from the reserved chunk if available */
	if (reserved && pcpu_reserved_chunk) {
		chunk = pcpu_reserved_chunk;
737 738 739

		if (size > chunk->contig_hint) {
			err = "alloc from reserved chunk failed";
740
			goto fail_unlock;
741
		}
742 743 744 745 746 747 748 749 750 751

		while ((new_alloc = pcpu_need_to_extend(chunk))) {
			spin_unlock_irqrestore(&pcpu_lock, flags);
			if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
				err = "failed to extend area map of reserved chunk";
				goto fail_unlock_mutex;
			}
			spin_lock_irqsave(&pcpu_lock, flags);
		}

752 753 754
		off = pcpu_alloc_area(chunk, size, align);
		if (off >= 0)
			goto area_found;
755

756
		err = "alloc from reserved chunk failed";
757
		goto fail_unlock;
758 759
	}

760
restart:
761
	/* search through normal chunks */
762 763 764 765
	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
			if (size > chunk->contig_hint)
				continue;
766

767 768 769 770 771 772 773 774 775 776 777 778 779 780
			new_alloc = pcpu_need_to_extend(chunk);
			if (new_alloc) {
				spin_unlock_irqrestore(&pcpu_lock, flags);
				if (pcpu_extend_area_map(chunk,
							 new_alloc) < 0) {
					err = "failed to extend area map";
					goto fail_unlock_mutex;
				}
				spin_lock_irqsave(&pcpu_lock, flags);
				/*
				 * pcpu_lock has been dropped, need to
				 * restart cpu_slot list walking.
				 */
				goto restart;
781 782
			}

783 784 785 786 787 788 789
			off = pcpu_alloc_area(chunk, size, align);
			if (off >= 0)
				goto area_found;
		}
	}

	/* hmmm... no space left, create a new chunk */
790
	spin_unlock_irqrestore(&pcpu_lock, flags);
791

792
	chunk = pcpu_create_chunk();
793 794
	if (!chunk) {
		err = "failed to allocate new chunk";
795
		goto fail_unlock_mutex;
796
	}
797

798
	spin_lock_irqsave(&pcpu_lock, flags);
799
	pcpu_chunk_relocate(chunk, -1);
800
	goto restart;
801 802

area_found:
803
	spin_unlock_irqrestore(&pcpu_lock, flags);
804

805 806
	/* populate, map and clear the area */
	if (pcpu_populate_chunk(chunk, off, size)) {
807
		spin_lock_irqsave(&pcpu_lock, flags);
808
		pcpu_free_area(chunk, off);
809
		err = "failed to populate";
810
		goto fail_unlock;
811 812
	}

813 814
	mutex_unlock(&pcpu_alloc_mutex);

T
Tejun Heo 已提交
815
	/* return address relative to base address */
816 817 818
	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
	kmemleak_alloc_percpu(ptr, size);
	return ptr;
819 820

fail_unlock:
821
	spin_unlock_irqrestore(&pcpu_lock, flags);
822 823
fail_unlock_mutex:
	mutex_unlock(&pcpu_alloc_mutex);
824 825 826 827 828 829 830
	if (warn_limit) {
		pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
			   "%s\n", size, align, err);
		dump_stack();
		if (!--warn_limit)
			pr_info("PERCPU: limit reached, disable warning\n");
	}
831
	return NULL;
832
}
833 834 835 836 837 838

/**
 * __alloc_percpu - allocate dynamic percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
839 840
 * Allocate zero-filled percpu area of @size bytes aligned at @align.
 * Might sleep.  Might trigger writeouts.
841
 *
842 843 844
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
845 846 847
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
848
void __percpu *__alloc_percpu(size_t size, size_t align)
849 850 851
{
	return pcpu_alloc(size, align, false);
}
852 853
EXPORT_SYMBOL_GPL(__alloc_percpu);

854 855 856 857 858
/**
 * __alloc_reserved_percpu - allocate reserved percpu area
 * @size: size of area to allocate in bytes
 * @align: alignment of area (max PAGE_SIZE)
 *
859 860 861 862
 * Allocate zero-filled percpu area of @size bytes aligned at @align
 * from reserved percpu area if arch has set it up; otherwise,
 * allocation is served from the same dynamic area.  Might sleep.
 * Might trigger writeouts.
863
 *
864 865 866
 * CONTEXT:
 * Does GFP_KERNEL allocation.
 *
867 868 869
 * RETURNS:
 * Percpu pointer to the allocated area on success, NULL on failure.
 */
870
void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
871 872 873 874
{
	return pcpu_alloc(size, align, true);
}

875 876 877 878 879
/**
 * pcpu_reclaim - reclaim fully free chunks, workqueue function
 * @work: unused
 *
 * Reclaim all fully free chunks except for the first one.
880 881 882
 *
 * CONTEXT:
 * workqueue context.
883 884
 */
static void pcpu_reclaim(struct work_struct *work)
885
{
886 887 888 889
	LIST_HEAD(todo);
	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
	struct pcpu_chunk *chunk, *next;

890 891
	mutex_lock(&pcpu_alloc_mutex);
	spin_lock_irq(&pcpu_lock);
892 893 894 895 896 897 898 899 900 901 902

	list_for_each_entry_safe(chunk, next, head, list) {
		WARN_ON(chunk->immutable);

		/* spare the first one */
		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
			continue;

		list_move(&chunk->list, &todo);
	}

903
	spin_unlock_irq(&pcpu_lock);
904 905

	list_for_each_entry_safe(chunk, next, &todo, list) {
T
Tejun Heo 已提交
906
		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
907
		pcpu_destroy_chunk(chunk);
908
	}
T
Tejun Heo 已提交
909 910

	mutex_unlock(&pcpu_alloc_mutex);
911 912 913 914 915 916
}

/**
 * free_percpu - free percpu area
 * @ptr: pointer to area to free
 *
917 918 919 920
 * Free percpu area @ptr.
 *
 * CONTEXT:
 * Can be called from atomic context.
921
 */
922
void free_percpu(void __percpu *ptr)
923
{
924
	void *addr;
925
	struct pcpu_chunk *chunk;
926
	unsigned long flags;
927 928 929 930 931
	int off;

	if (!ptr)
		return;

932 933
	kmemleak_free_percpu(ptr);

934 935
	addr = __pcpu_ptr_to_addr(ptr);

936
	spin_lock_irqsave(&pcpu_lock, flags);
937 938

	chunk = pcpu_chunk_addr_search(addr);
T
Tejun Heo 已提交
939
	off = addr - chunk->base_addr;
940 941 942

	pcpu_free_area(chunk, off);

943
	/* if there are more than one fully free chunks, wake up grim reaper */
944 945 946
	if (chunk->free_size == pcpu_unit_size) {
		struct pcpu_chunk *pos;

947
		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
948
			if (pos != chunk) {
949
				schedule_work(&pcpu_reclaim_work);
950 951 952 953
				break;
			}
	}

954
	spin_unlock_irqrestore(&pcpu_lock, flags);
955 956 957
}
EXPORT_SYMBOL_GPL(free_percpu);

958 959 960 961 962 963 964 965 966 967 968 969 970
/**
 * is_kernel_percpu_address - test whether address is from static percpu area
 * @addr: address to test
 *
 * Test whether @addr belongs to in-kernel static percpu area.  Module
 * static percpu areas are not considered.  For those, use
 * is_module_percpu_address().
 *
 * RETURNS:
 * %true if @addr is from in-kernel static percpu area, %false otherwise.
 */
bool is_kernel_percpu_address(unsigned long addr)
{
971
#ifdef CONFIG_SMP
972 973 974 975 976 977 978 979 980 981
	const size_t static_size = __per_cpu_end - __per_cpu_start;
	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
	unsigned int cpu;

	for_each_possible_cpu(cpu) {
		void *start = per_cpu_ptr(base, cpu);

		if ((void *)addr >= start && (void *)addr < start + static_size)
			return true;
        }
982 983
#endif
	/* on UP, can't distinguish from other static vars, always false */
984 985 986
	return false;
}

987 988 989 990 991 992 993 994 995
/**
 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
 * @addr: the address to be converted to physical address
 *
 * Given @addr which is dereferenceable address obtained via one of
 * percpu access macros, this function translates it into its physical
 * address.  The caller is responsible for ensuring @addr stays valid
 * until this function finishes.
 *
996 997 998 999 1000 1001 1002 1003 1004 1005 1006
 * percpu allocator has special setup for the first chunk, which currently
 * supports either embedding in linear address space or vmalloc mapping,
 * and, from the second one, the backing allocator (currently either vm or
 * km) provides translation.
 *
 * The addr can be tranlated simply without checking if it falls into the
 * first chunk. But the current code reflects better how percpu allocator
 * actually works, and the verification can discover both bugs in percpu
 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
 * code.
 *
1007 1008 1009 1010 1011
 * RETURNS:
 * The physical address for @addr.
 */
phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
1012 1013
	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
	bool in_first_chunk = false;
T
Tejun Heo 已提交
1014
	unsigned long first_low, first_high;
1015 1016 1017
	unsigned int cpu;

	/*
T
Tejun Heo 已提交
1018
	 * The following test on unit_low/high isn't strictly
1019 1020 1021
	 * necessary but will speed up lookups of addresses which
	 * aren't in the first chunk.
	 */
T
Tejun Heo 已提交
1022 1023 1024 1025 1026
	first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
	first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
				     pcpu_unit_pages);
	if ((unsigned long)addr >= first_low &&
	    (unsigned long)addr < first_high) {
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
		for_each_possible_cpu(cpu) {
			void *start = per_cpu_ptr(base, cpu);

			if (addr >= start && addr < start + pcpu_unit_size) {
				in_first_chunk = true;
				break;
			}
		}
	}

	if (in_first_chunk) {
1038
		if (!is_vmalloc_addr(addr))
1039 1040
			return __pa(addr);
		else
1041 1042
			return page_to_phys(vmalloc_to_page(addr)) +
			       offset_in_page(addr);
1043
	} else
1044 1045
		return page_to_phys(pcpu_addr_to_page(addr)) +
		       offset_in_page(addr);
1046 1047
}

1048
/**
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
 * pcpu_alloc_alloc_info - allocate percpu allocation info
 * @nr_groups: the number of groups
 * @nr_units: the number of units
 *
 * Allocate ai which is large enough for @nr_groups groups containing
 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
 * cpu_map array which is long enough for @nr_units and filled with
 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
 * pointer of other groups.
 *
 * RETURNS:
 * Pointer to the allocated pcpu_alloc_info on success, NULL on
 * failure.
 */
struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
						      int nr_units)
{
	struct pcpu_alloc_info *ai;
	size_t base_size, ai_size;
	void *ptr;
	int unit;

	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
			  __alignof__(ai->groups[0].cpu_map[0]));
	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);

1075
	ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
	if (!ptr)
		return NULL;
	ai = ptr;
	ptr += base_size;

	ai->groups[0].cpu_map = ptr;

	for (unit = 0; unit < nr_units; unit++)
		ai->groups[0].cpu_map[unit] = NR_CPUS;

	ai->nr_groups = nr_groups;
	ai->__ai_size = PFN_ALIGN(ai_size);

	return ai;
}

/**
 * pcpu_free_alloc_info - free percpu allocation info
 * @ai: pcpu_alloc_info to free
 *
 * Free @ai which was allocated by pcpu_alloc_alloc_info().
 */
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
{
1100
	memblock_free_early(__pa(ai), ai->__ai_size);
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
}

/**
 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
 * @lvl: loglevel
 * @ai: allocation info to dump
 *
 * Print out information about @ai using loglevel @lvl.
 */
static void pcpu_dump_alloc_info(const char *lvl,
				 const struct pcpu_alloc_info *ai)
1112
{
1113
	int group_width = 1, cpu_width = 1, width;
1114
	char empty_str[] = "--------";
1115 1116 1117 1118 1119 1120 1121
	int alloc = 0, alloc_end = 0;
	int group, v;
	int upa, apl;	/* units per alloc, allocs per line */

	v = ai->nr_groups;
	while (v /= 10)
		group_width++;
1122

1123
	v = num_possible_cpus();
1124
	while (v /= 10)
1125 1126
		cpu_width++;
	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1127

1128 1129 1130
	upa = ai->alloc_size / ai->unit_size;
	width = upa * (cpu_width + 1) + group_width + 3;
	apl = rounddown_pow_of_two(max(60 / width, 1));
1131

1132 1133 1134
	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1135

1136 1137 1138 1139 1140 1141 1142 1143
	for (group = 0; group < ai->nr_groups; group++) {
		const struct pcpu_group_info *gi = &ai->groups[group];
		int unit = 0, unit_end = 0;

		BUG_ON(gi->nr_units % upa);
		for (alloc_end += gi->nr_units / upa;
		     alloc < alloc_end; alloc++) {
			if (!(alloc % apl)) {
1144
				printk(KERN_CONT "\n");
1145 1146
				printk("%spcpu-alloc: ", lvl);
			}
1147
			printk(KERN_CONT "[%0*d] ", group_width, group);
1148 1149 1150

			for (unit_end += upa; unit < unit_end; unit++)
				if (gi->cpu_map[unit] != NR_CPUS)
1151
					printk(KERN_CONT "%0*d ", cpu_width,
1152 1153
					       gi->cpu_map[unit]);
				else
1154
					printk(KERN_CONT "%s ", empty_str);
1155 1156
		}
	}
1157
	printk(KERN_CONT "\n");
1158 1159
}

1160
/**
1161
 * pcpu_setup_first_chunk - initialize the first percpu chunk
1162
 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1163
 * @base_addr: mapped address
1164 1165 1166
 *
 * Initialize the first percpu chunk which contains the kernel static
 * perpcu area.  This function is to be called from arch percpu area
1167
 * setup path.
1168
 *
1169 1170 1171 1172 1173 1174
 * @ai contains all information necessary to initialize the first
 * chunk and prime the dynamic percpu allocator.
 *
 * @ai->static_size is the size of static percpu area.
 *
 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1175 1176 1177 1178 1179 1180 1181
 * reserve after the static area in the first chunk.  This reserves
 * the first chunk such that it's available only through reserved
 * percpu allocation.  This is primarily used to serve module percpu
 * static areas on architectures where the addressing model has
 * limited offset range for symbol relocations to guarantee module
 * percpu symbols fall inside the relocatable range.
 *
1182 1183 1184
 * @ai->dyn_size determines the number of bytes available for dynamic
 * allocation in the first chunk.  The area between @ai->static_size +
 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1185
 *
1186 1187 1188
 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
 * and equal to or larger than @ai->static_size + @ai->reserved_size +
 * @ai->dyn_size.
1189
 *
1190 1191
 * @ai->atom_size is the allocation atom size and used as alignment
 * for vm areas.
1192
 *
1193 1194 1195 1196 1197 1198 1199 1200 1201
 * @ai->alloc_size is the allocation size and always multiple of
 * @ai->atom_size.  This is larger than @ai->atom_size if
 * @ai->unit_size is larger than @ai->atom_size.
 *
 * @ai->nr_groups and @ai->groups describe virtual memory layout of
 * percpu areas.  Units which should be colocated are put into the
 * same group.  Dynamic VM areas will be allocated according to these
 * groupings.  If @ai->nr_groups is zero, a single group containing
 * all units is assumed.
1202
 *
1203 1204
 * The caller should have mapped the first chunk at @base_addr and
 * copied static data to each unit.
1205
 *
1206 1207 1208 1209 1210 1211 1212
 * If the first chunk ends up with both reserved and dynamic areas, it
 * is served by two chunks - one to serve the core static and reserved
 * areas and the other for the dynamic area.  They share the same vm
 * and page map but uses different area allocation map to stay away
 * from each other.  The latter chunk is circulated in the chunk slots
 * and available for dynamic allocation like any other chunks.
 *
1213
 * RETURNS:
T
Tejun Heo 已提交
1214
 * 0 on success, -errno on failure.
1215
 */
T
Tejun Heo 已提交
1216 1217
int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
				  void *base_addr)
1218
{
1219
	static char cpus_buf[4096] __initdata;
1220 1221
	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1222 1223
	size_t dyn_size = ai->dyn_size;
	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1224
	struct pcpu_chunk *schunk, *dchunk = NULL;
1225 1226
	unsigned long *group_offsets;
	size_t *group_sizes;
T
Tejun Heo 已提交
1227
	unsigned long *unit_off;
1228
	unsigned int cpu;
1229 1230
	int *unit_map;
	int group, unit, i;
1231

1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
	cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);

#define PCPU_SETUP_BUG_ON(cond)	do {					\
	if (unlikely(cond)) {						\
		pr_emerg("PERCPU: failed to initialize, %s", #cond);	\
		pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);	\
		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
		BUG();							\
	}								\
} while (0)

1243
	/* sanity checks */
1244
	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1245
#ifdef CONFIG_SMP
1246
	PCPU_SETUP_BUG_ON(!ai->static_size);
1247
	PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1248
#endif
1249
	PCPU_SETUP_BUG_ON(!base_addr);
1250
	PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1251 1252 1253
	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
	PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1254
	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1255
	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1256

1257
	/* process group information and build config tables accordingly */
1258 1259 1260 1261 1262 1263
	group_offsets = memblock_virt_alloc(ai->nr_groups *
					     sizeof(group_offsets[0]), 0);
	group_sizes = memblock_virt_alloc(ai->nr_groups *
					   sizeof(group_sizes[0]), 0);
	unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
	unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1264

1265
	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1266
		unit_map[cpu] = UINT_MAX;
T
Tejun Heo 已提交
1267 1268 1269

	pcpu_low_unit_cpu = NR_CPUS;
	pcpu_high_unit_cpu = NR_CPUS;
1270

1271 1272
	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
		const struct pcpu_group_info *gi = &ai->groups[group];
1273

1274 1275 1276
		group_offsets[group] = gi->base_offset;
		group_sizes[group] = gi->nr_units * ai->unit_size;

1277 1278 1279 1280
		for (i = 0; i < gi->nr_units; i++) {
			cpu = gi->cpu_map[i];
			if (cpu == NR_CPUS)
				continue;
1281

1282 1283 1284
			PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1285

1286
			unit_map[cpu] = unit + i;
T
Tejun Heo 已提交
1287 1288
			unit_off[cpu] = gi->base_offset + i * ai->unit_size;

T
Tejun Heo 已提交
1289 1290 1291 1292 1293 1294 1295
			/* determine low/high unit_cpu */
			if (pcpu_low_unit_cpu == NR_CPUS ||
			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
				pcpu_low_unit_cpu = cpu;
			if (pcpu_high_unit_cpu == NR_CPUS ||
			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
				pcpu_high_unit_cpu = cpu;
1296
		}
1297
	}
1298 1299 1300
	pcpu_nr_units = unit;

	for_each_possible_cpu(cpu)
1301 1302 1303 1304
		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);

	/* we're done parsing the input, undefine BUG macro and dump config */
#undef PCPU_SETUP_BUG_ON
1305
	pcpu_dump_alloc_info(KERN_DEBUG, ai);
1306

1307 1308 1309
	pcpu_nr_groups = ai->nr_groups;
	pcpu_group_offsets = group_offsets;
	pcpu_group_sizes = group_sizes;
1310
	pcpu_unit_map = unit_map;
T
Tejun Heo 已提交
1311
	pcpu_unit_offsets = unit_off;
1312 1313

	/* determine basic parameters */
1314
	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1315
	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1316
	pcpu_atom_size = ai->atom_size;
T
Tejun Heo 已提交
1317 1318
	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1319

1320 1321 1322 1323 1324
	/*
	 * Allocate chunk slots.  The additional last slot is for
	 * empty chunks.
	 */
	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1325 1326
	pcpu_slot = memblock_virt_alloc(
			pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1327 1328 1329
	for (i = 0; i < pcpu_nr_slots; i++)
		INIT_LIST_HEAD(&pcpu_slot[i]);

1330 1331 1332 1333 1334 1335 1336
	/*
	 * Initialize static chunk.  If reserved_size is zero, the
	 * static chunk covers static area + dynamic allocation area
	 * in the first chunk.  If reserved_size is not zero, it
	 * covers static area + reserved area (mostly used for module
	 * static percpu allocation).
	 */
1337
	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1338
	INIT_LIST_HEAD(&schunk->list);
T
Tejun Heo 已提交
1339
	schunk->base_addr = base_addr;
1340 1341
	schunk->map = smap;
	schunk->map_alloc = ARRAY_SIZE(smap);
1342
	schunk->immutable = true;
T
Tejun Heo 已提交
1343
	bitmap_fill(schunk->populated, pcpu_unit_pages);
1344

1345 1346
	if (ai->reserved_size) {
		schunk->free_size = ai->reserved_size;
1347
		pcpu_reserved_chunk = schunk;
1348
		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1349 1350 1351 1352
	} else {
		schunk->free_size = dyn_size;
		dyn_size = 0;			/* dynamic area covered */
	}
1353
	schunk->contig_hint = schunk->free_size;
1354

1355 1356 1357
	schunk->map[0] = 1;
	schunk->map[1] = ai->static_size;
	schunk->map_used = 1;
1358
	if (schunk->free_size)
1359 1360 1361
		schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size);
	else
		schunk->map[1] |= 1;
1362

1363 1364
	/* init dynamic chunk if necessary */
	if (dyn_size) {
1365
		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1366
		INIT_LIST_HEAD(&dchunk->list);
T
Tejun Heo 已提交
1367
		dchunk->base_addr = base_addr;
1368 1369
		dchunk->map = dmap;
		dchunk->map_alloc = ARRAY_SIZE(dmap);
1370
		dchunk->immutable = true;
T
Tejun Heo 已提交
1371
		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1372 1373

		dchunk->contig_hint = dchunk->free_size = dyn_size;
1374 1375 1376 1377
		dchunk->map[0] = 1;
		dchunk->map[1] = pcpu_reserved_chunk_limit;
		dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
		dchunk->map_used = 2;
1378 1379
	}

1380
	/* link the first chunk in */
1381 1382
	pcpu_first_chunk = dchunk ?: schunk;
	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1383 1384

	/* we're done */
T
Tejun Heo 已提交
1385
	pcpu_base_addr = base_addr;
T
Tejun Heo 已提交
1386
	return 0;
1387
}
1388

1389 1390
#ifdef CONFIG_SMP

1391
const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1392 1393 1394 1395
	[PCPU_FC_AUTO]	= "auto",
	[PCPU_FC_EMBED]	= "embed",
	[PCPU_FC_PAGE]	= "page",
};
1396

1397
enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1398

1399 1400
static int __init percpu_alloc_setup(char *str)
{
1401 1402 1403
	if (!str)
		return -EINVAL;

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
	if (0)
		/* nada */;
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
	else if (!strcmp(str, "embed"))
		pcpu_chosen_fc = PCPU_FC_EMBED;
#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
	else if (!strcmp(str, "page"))
		pcpu_chosen_fc = PCPU_FC_PAGE;
#endif
	else
		pr_warning("PERCPU: unknown allocator %s specified\n", str);
1416

1417
	return 0;
1418
}
1419
early_param("percpu_alloc", percpu_alloc_setup);
1420

1421 1422 1423 1424 1425
/*
 * pcpu_embed_first_chunk() is used by the generic percpu setup.
 * Build it if needed by the arch config or the generic setup is going
 * to be used.
 */
1426 1427
#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
#define BUILD_EMBED_FIRST_CHUNK
#endif

/* build pcpu_page_first_chunk() iff needed by the arch config */
#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
#define BUILD_PAGE_FIRST_CHUNK
#endif

/* pcpu_build_alloc_info() is used by both embed and page first chunk */
#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
/**
 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
 * @reserved_size: the size of reserved percpu area in bytes
 * @dyn_size: minimum free size for dynamic allocation in bytes
 * @atom_size: allocation atom size
 * @cpu_distance_fn: callback to determine distance between cpus, optional
 *
 * This function determines grouping of units, their mappings to cpus
 * and other parameters considering needed percpu size, allocation
 * atom size and distances between CPUs.
 *
 * Groups are always mutliples of atom size and CPUs which are of
 * LOCAL_DISTANCE both ways are grouped together and share space for
 * units in the same group.  The returned configuration is guaranteed
 * to have CPUs on different nodes on different groups and >=75% usage
 * of allocated virtual address space.
 *
 * RETURNS:
 * On success, pointer to the new allocation_info is returned.  On
 * failure, ERR_PTR value is returned.
 */
static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
				size_t reserved_size, size_t dyn_size,
				size_t atom_size,
				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
{
	static int group_map[NR_CPUS] __initdata;
	static int group_cnt[NR_CPUS] __initdata;
	const size_t static_size = __per_cpu_end - __per_cpu_start;
	int nr_groups = 1, nr_units = 0;
	size_t size_sum, min_unit_size, alloc_size;
	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
	int last_allocs, group, unit;
	unsigned int cpu, tcpu;
	struct pcpu_alloc_info *ai;
	unsigned int *cpu_map;

	/* this function may be called multiple times */
	memset(group_map, 0, sizeof(group_map));
	memset(group_cnt, 0, sizeof(group_cnt));

	/* calculate size_sum and ensure dyn_size is enough for early alloc */
	size_sum = PFN_ALIGN(static_size + reserved_size +
			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
	dyn_size = size_sum - static_size - reserved_size;

	/*
	 * Determine min_unit_size, alloc_size and max_upa such that
	 * alloc_size is multiple of atom_size and is the smallest
L
Lucas De Marchi 已提交
1487
	 * which can accommodate 4k aligned segments which are equal to
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
	 * or larger than min_unit_size.
	 */
	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);

	alloc_size = roundup(min_unit_size, atom_size);
	upa = alloc_size / min_unit_size;
	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
		upa--;
	max_upa = upa;

	/* group cpus according to their proximity */
	for_each_possible_cpu(cpu) {
		group = 0;
	next_group:
		for_each_possible_cpu(tcpu) {
			if (cpu == tcpu)
				break;
			if (group_map[tcpu] == group && cpu_distance_fn &&
			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
				group++;
				nr_groups = max(nr_groups, group + 1);
				goto next_group;
			}
		}
		group_map[cpu] = group;
		group_cnt[group]++;
	}

	/*
	 * Expand unit size until address space usage goes over 75%
	 * and then as much as possible without using more address
	 * space.
	 */
	last_allocs = INT_MAX;
	for (upa = max_upa; upa; upa--) {
		int allocs = 0, wasted = 0;

		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
			continue;

		for (group = 0; group < nr_groups; group++) {
			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
			allocs += this_allocs;
			wasted += this_allocs * upa - group_cnt[group];
		}

		/*
		 * Don't accept if wastage is over 1/3.  The
		 * greater-than comparison ensures upa==1 always
		 * passes the following check.
		 */
		if (wasted > num_possible_cpus() / 3)
			continue;

		/* and then don't consume more memory */
		if (allocs > last_allocs)
			break;
		last_allocs = allocs;
		best_upa = upa;
	}
	upa = best_upa;

	/* allocate and fill alloc_info */
	for (group = 0; group < nr_groups; group++)
		nr_units += roundup(group_cnt[group], upa);

	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
	if (!ai)
		return ERR_PTR(-ENOMEM);
	cpu_map = ai->groups[0].cpu_map;

	for (group = 0; group < nr_groups; group++) {
		ai->groups[group].cpu_map = cpu_map;
		cpu_map += roundup(group_cnt[group], upa);
	}

	ai->static_size = static_size;
	ai->reserved_size = reserved_size;
	ai->dyn_size = dyn_size;
	ai->unit_size = alloc_size / upa;
	ai->atom_size = atom_size;
	ai->alloc_size = alloc_size;

	for (group = 0, unit = 0; group_cnt[group]; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];

		/*
		 * Initialize base_offset as if all groups are located
		 * back-to-back.  The caller should update this to
		 * reflect actual allocation.
		 */
		gi->base_offset = unit * ai->unit_size;

		for_each_possible_cpu(cpu)
			if (group_map[cpu] == group)
				gi->cpu_map[gi->nr_units++] = cpu;
		gi->nr_units = roundup(gi->nr_units, upa);
		unit += gi->nr_units;
	}
	BUG_ON(unit != nr_units);

	return ai;
}
#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */

#if defined(BUILD_EMBED_FIRST_CHUNK)
1595 1596 1597
/**
 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
 * @reserved_size: the size of reserved percpu area in bytes
1598
 * @dyn_size: minimum free size for dynamic allocation in bytes
1599 1600 1601
 * @atom_size: allocation atom size
 * @cpu_distance_fn: callback to determine distance between cpus, optional
 * @alloc_fn: function to allocate percpu page
L
Lucas De Marchi 已提交
1602
 * @free_fn: function to free percpu page
1603 1604 1605 1606 1607
 *
 * This is a helper to ease setting up embedded first percpu chunk and
 * can be called where pcpu_setup_first_chunk() is expected.
 *
 * If this function is used to setup the first chunk, it is allocated
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
 * by calling @alloc_fn and used as-is without being mapped into
 * vmalloc area.  Allocations are always whole multiples of @atom_size
 * aligned to @atom_size.
 *
 * This enables the first chunk to piggy back on the linear physical
 * mapping which often uses larger page size.  Please note that this
 * can result in very sparse cpu->unit mapping on NUMA machines thus
 * requiring large vmalloc address space.  Don't use this allocator if
 * vmalloc space is not orders of magnitude larger than distances
 * between node memory addresses (ie. 32bit NUMA machines).
1618
 *
1619
 * @dyn_size specifies the minimum dynamic area size.
1620 1621
 *
 * If the needed size is smaller than the minimum or specified unit
1622
 * size, the leftover is returned using @free_fn.
1623 1624
 *
 * RETURNS:
T
Tejun Heo 已提交
1625
 * 0 on success, -errno on failure.
1626
 */
1627
int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1628 1629 1630 1631
				  size_t atom_size,
				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
				  pcpu_fc_alloc_fn_t alloc_fn,
				  pcpu_fc_free_fn_t free_fn)
1632
{
1633 1634
	void *base = (void *)ULONG_MAX;
	void **areas = NULL;
1635
	struct pcpu_alloc_info *ai;
1636
	size_t size_sum, areas_size, max_distance;
1637
	int group, i, rc;
1638

1639 1640
	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
				   cpu_distance_fn);
1641 1642
	if (IS_ERR(ai))
		return PTR_ERR(ai);
1643

1644
	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1645
	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1646

1647
	areas = memblock_virt_alloc_nopanic(areas_size, 0);
1648
	if (!areas) {
T
Tejun Heo 已提交
1649
		rc = -ENOMEM;
1650
		goto out_free;
1651
	}
1652

1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
	/* allocate, copy and determine base address */
	for (group = 0; group < ai->nr_groups; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];
		unsigned int cpu = NR_CPUS;
		void *ptr;

		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
			cpu = gi->cpu_map[i];
		BUG_ON(cpu == NR_CPUS);

		/* allocate space for the whole group */
		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
		if (!ptr) {
			rc = -ENOMEM;
			goto out_free_areas;
		}
1669 1670
		/* kmemleak tracks the percpu allocations separately */
		kmemleak_free(ptr);
1671
		areas[group] = ptr;
1672

1673
		base = min(ptr, base);
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
	}

	/*
	 * Copy data and free unused parts.  This should happen after all
	 * allocations are complete; otherwise, we may end up with
	 * overlapping groups.
	 */
	for (group = 0; group < ai->nr_groups; group++) {
		struct pcpu_group_info *gi = &ai->groups[group];
		void *ptr = areas[group];
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694

		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
			if (gi->cpu_map[i] == NR_CPUS) {
				/* unused unit, free whole */
				free_fn(ptr, ai->unit_size);
				continue;
			}
			/* copy and return the unused part */
			memcpy(ptr, __per_cpu_load, ai->static_size);
			free_fn(ptr + size_sum, ai->unit_size - size_sum);
		}
1695
	}
1696

1697
	/* base address is now known, determine group base offsets */
1698 1699
	max_distance = 0;
	for (group = 0; group < ai->nr_groups; group++) {
1700
		ai->groups[group].base_offset = areas[group] - base;
T
Tejun Heo 已提交
1701 1702
		max_distance = max_t(size_t, max_distance,
				     ai->groups[group].base_offset);
1703 1704 1705 1706
	}
	max_distance += ai->unit_size;

	/* warn if maximum distance is further than 75% of vmalloc space */
1707
	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
T
Tejun Heo 已提交
1708
		pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1709
			   "space 0x%lx\n", max_distance,
1710
			   VMALLOC_TOTAL);
1711 1712 1713 1714 1715 1716
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
		/* and fail if we have fallback */
		rc = -EINVAL;
		goto out_free;
#endif
	}
1717

T
Tejun Heo 已提交
1718
	pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1719 1720
		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
		ai->dyn_size, ai->unit_size);
1721

T
Tejun Heo 已提交
1722
	rc = pcpu_setup_first_chunk(ai, base);
1723 1724 1725 1726
	goto out_free;

out_free_areas:
	for (group = 0; group < ai->nr_groups; group++)
1727 1728 1729
		if (areas[group])
			free_fn(areas[group],
				ai->groups[group].nr_units * ai->unit_size);
1730
out_free:
1731
	pcpu_free_alloc_info(ai);
1732
	if (areas)
1733
		memblock_free_early(__pa(areas), areas_size);
T
Tejun Heo 已提交
1734
	return rc;
1735
}
1736
#endif /* BUILD_EMBED_FIRST_CHUNK */
1737

1738
#ifdef BUILD_PAGE_FIRST_CHUNK
1739
/**
1740
 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1741 1742
 * @reserved_size: the size of reserved percpu area in bytes
 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
L
Lucas De Marchi 已提交
1743
 * @free_fn: function to free percpu page, always called with PAGE_SIZE
1744 1745
 * @populate_pte_fn: function to populate pte
 *
1746 1747
 * This is a helper to ease setting up page-remapped first percpu
 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1748 1749 1750 1751 1752
 *
 * This is the basic allocator.  Static percpu area is allocated
 * page-by-page into vmalloc area.
 *
 * RETURNS:
T
Tejun Heo 已提交
1753
 * 0 on success, -errno on failure.
1754
 */
T
Tejun Heo 已提交
1755 1756 1757 1758
int __init pcpu_page_first_chunk(size_t reserved_size,
				 pcpu_fc_alloc_fn_t alloc_fn,
				 pcpu_fc_free_fn_t free_fn,
				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1759
{
1760
	static struct vm_struct vm;
1761
	struct pcpu_alloc_info *ai;
1762
	char psize_str[16];
T
Tejun Heo 已提交
1763
	int unit_pages;
1764
	size_t pages_size;
T
Tejun Heo 已提交
1765
	struct page **pages;
T
Tejun Heo 已提交
1766
	int unit, i, j, rc;
1767

1768 1769
	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);

1770
	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1771 1772 1773 1774 1775 1776
	if (IS_ERR(ai))
		return PTR_ERR(ai);
	BUG_ON(ai->nr_groups != 1);
	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());

	unit_pages = ai->unit_size >> PAGE_SHIFT;
1777 1778

	/* unaligned allocations can't be freed, round up to page size */
1779 1780
	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
			       sizeof(pages[0]));
1781
	pages = memblock_virt_alloc(pages_size, 0);
1782

1783
	/* allocate pages */
1784
	j = 0;
1785
	for (unit = 0; unit < num_possible_cpus(); unit++)
T
Tejun Heo 已提交
1786
		for (i = 0; i < unit_pages; i++) {
1787
			unsigned int cpu = ai->groups[0].cpu_map[unit];
1788 1789
			void *ptr;

1790
			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1791
			if (!ptr) {
1792 1793
				pr_warning("PERCPU: failed to allocate %s page "
					   "for cpu%u\n", psize_str, cpu);
1794 1795
				goto enomem;
			}
1796 1797
			/* kmemleak tracks the percpu allocations separately */
			kmemleak_free(ptr);
T
Tejun Heo 已提交
1798
			pages[j++] = virt_to_page(ptr);
1799 1800
		}

1801 1802
	/* allocate vm area, map the pages and copy static data */
	vm.flags = VM_ALLOC;
1803
	vm.size = num_possible_cpus() * ai->unit_size;
1804 1805
	vm_area_register_early(&vm, PAGE_SIZE);

1806
	for (unit = 0; unit < num_possible_cpus(); unit++) {
1807
		unsigned long unit_addr =
1808
			(unsigned long)vm.addr + unit * ai->unit_size;
1809

T
Tejun Heo 已提交
1810
		for (i = 0; i < unit_pages; i++)
1811 1812 1813
			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));

		/* pte already populated, the following shouldn't fail */
T
Tejun Heo 已提交
1814 1815 1816 1817
		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
				      unit_pages);
		if (rc < 0)
			panic("failed to map percpu area, err=%d\n", rc);
1818

1819 1820 1821 1822 1823 1824 1825 1826 1827
		/*
		 * FIXME: Archs with virtual cache should flush local
		 * cache for the linear mapping here - something
		 * equivalent to flush_cache_vmap() on the local cpu.
		 * flush_cache_vmap() can't be used as most supporting
		 * data structures are not set up yet.
		 */

		/* copy static data */
1828
		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1829 1830 1831
	}

	/* we're ready, commit */
1832
	pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1833 1834
		unit_pages, psize_str, vm.addr, ai->static_size,
		ai->reserved_size, ai->dyn_size);
1835

T
Tejun Heo 已提交
1836
	rc = pcpu_setup_first_chunk(ai, vm.addr);
1837 1838 1839 1840
	goto out_free_ar;

enomem:
	while (--j >= 0)
T
Tejun Heo 已提交
1841
		free_fn(page_address(pages[j]), PAGE_SIZE);
T
Tejun Heo 已提交
1842
	rc = -ENOMEM;
1843
out_free_ar:
1844
	memblock_free_early(__pa(pages), pages_size);
1845
	pcpu_free_alloc_info(ai);
T
Tejun Heo 已提交
1846
	return rc;
1847
}
1848
#endif /* BUILD_PAGE_FIRST_CHUNK */
1849

1850
#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
1851
/*
1852
 * Generic SMP percpu area setup.
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
 *
 * The embedding helper is used because its behavior closely resembles
 * the original non-dynamic generic percpu area setup.  This is
 * important because many archs have addressing restrictions and might
 * fail if the percpu area is located far away from the previous
 * location.  As an added bonus, in non-NUMA cases, embedding is
 * generally a good idea TLB-wise because percpu area can piggy back
 * on the physical linear memory mapping which uses large page
 * mappings on applicable archs.
 */
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);

1866 1867 1868
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
				       size_t align)
{
1869 1870
	return  memblock_virt_alloc_from_nopanic(
			size, align, __pa(MAX_DMA_ADDRESS));
1871
}
1872

1873 1874
static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
{
1875
	memblock_free_early(__pa(ptr), size);
1876 1877
}

1878 1879 1880 1881
void __init setup_per_cpu_areas(void)
{
	unsigned long delta;
	unsigned int cpu;
T
Tejun Heo 已提交
1882
	int rc;
1883 1884 1885 1886 1887

	/*
	 * Always reserve area for module percpu variables.  That's
	 * what the legacy allocator did.
	 */
T
Tejun Heo 已提交
1888
	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1889 1890
				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
T
Tejun Heo 已提交
1891
	if (rc < 0)
1892
		panic("Failed to initialize percpu areas.");
1893 1894 1895

	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
	for_each_possible_cpu(cpu)
T
Tejun Heo 已提交
1896
		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1897
}
1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */

#else	/* CONFIG_SMP */

/*
 * UP percpu area setup.
 *
 * UP always uses km-based percpu allocator with identity mapping.
 * Static percpu variables are indistinguishable from the usual static
 * variables and don't require any special preparation.
 */
void __init setup_per_cpu_areas(void)
{
	const size_t unit_size =
		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
					 PERCPU_DYNAMIC_RESERVE));
	struct pcpu_alloc_info *ai;
	void *fc;

	ai = pcpu_alloc_alloc_info(1, 1);
1918 1919 1920
	fc = memblock_virt_alloc_from_nopanic(unit_size,
					      PAGE_SIZE,
					      __pa(MAX_DMA_ADDRESS));
1921 1922
	if (!ai || !fc)
		panic("Failed to allocate memory for percpu areas.");
1923 1924
	/* kmemleak tracks the percpu allocations separately */
	kmemleak_free(fc);
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934

	ai->dyn_size = unit_size;
	ai->unit_size = unit_size;
	ai->atom_size = unit_size;
	ai->alloc_size = unit_size;
	ai->groups[0].nr_units = 1;
	ai->groups[0].cpu_map[0] = 0;

	if (pcpu_setup_first_chunk(ai, fc) < 0)
		panic("Failed to initialize percpu areas.");
1935 1936

	pcpu_free_alloc_info(ai);
1937 1938 1939
}

#endif	/* CONFIG_SMP */
1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960

/*
 * First and reserved chunks are initialized with temporary allocation
 * map in initdata so that they can be used before slab is online.
 * This function is called after slab is brought up and replaces those
 * with properly allocated maps.
 */
void __init percpu_init_late(void)
{
	struct pcpu_chunk *target_chunks[] =
		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
	struct pcpu_chunk *chunk;
	unsigned long flags;
	int i;

	for (i = 0; (chunk = target_chunks[i]); i++) {
		int *map;
		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);

		BUILD_BUG_ON(size > PAGE_SIZE);

1961
		map = pcpu_mem_zalloc(size);
1962 1963 1964 1965 1966 1967 1968 1969
		BUG_ON(!map);

		spin_lock_irqsave(&pcpu_lock, flags);
		memcpy(map, chunk->map, size);
		chunk->map = map;
		spin_unlock_irqrestore(&pcpu_lock, flags);
	}
}