sparse.c 20.5 KB
Newer Older
A
Andy Whitcroft 已提交
1 2 3 4
/*
 * sparse memory mappings.
 */
#include <linux/mm.h>
5
#include <linux/slab.h>
A
Andy Whitcroft 已提交
6 7
#include <linux/mmzone.h>
#include <linux/bootmem.h>
8
#include <linux/highmem.h>
A
Andy Whitcroft 已提交
9
#include <linux/module.h>
10
#include <linux/spinlock.h>
11
#include <linux/vmalloc.h>
12
#include "internal.h"
A
Andy Whitcroft 已提交
13
#include <asm/dma.h>
14 15
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
A
Andy Whitcroft 已提交
16 17 18 19 20 21

/*
 * Permanent SPARSEMEM data:
 *
 * 1) mem_section	- memory sections, mem_map's for valid memory
 */
22
#ifdef CONFIG_SPARSEMEM_EXTREME
B
Bob Picco 已提交
23
struct mem_section *mem_section[NR_SECTION_ROOTS]
24
	____cacheline_internodealigned_in_smp;
25 26
#else
struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
27
	____cacheline_internodealigned_in_smp;
28 29 30
#endif
EXPORT_SYMBOL(mem_section);

31 32 33 34 35 36 37 38 39 40 41 42
#ifdef NODE_NOT_IN_PAGE_FLAGS
/*
 * If we did not store the node number in the page then we have to
 * do a lookup in the section_to_node_table in order to find which
 * node the page belongs to.
 */
#if MAX_NUMNODES <= 256
static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#else
static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#endif

43
int page_to_nid(struct page *page)
44 45 46 47
{
	return section_to_node_table[page_to_section(page)];
}
EXPORT_SYMBOL(page_to_nid);
48 49 50 51 52 53 54 55 56

static void set_section_nid(unsigned long section_nr, int nid)
{
	section_to_node_table[section_nr] = nid;
}
#else /* !NODE_NOT_IN_PAGE_FLAGS */
static inline void set_section_nid(unsigned long section_nr, int nid)
{
}
57 58
#endif

59
#ifdef CONFIG_SPARSEMEM_EXTREME
S
Sam Ravnborg 已提交
60
static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
61 62 63 64 65
{
	struct mem_section *section = NULL;
	unsigned long array_size = SECTIONS_PER_ROOT *
				   sizeof(struct mem_section);

66 67 68 69 70 71
	if (slab_is_available()) {
		if (node_state(nid, N_HIGH_MEMORY))
			section = kmalloc_node(array_size, GFP_KERNEL, nid);
		else
			section = kmalloc(array_size, GFP_KERNEL);
	} else
72
		section = alloc_bootmem_node(NODE_DATA(nid), array_size);
73 74 75 76 77

	if (section)
		memset(section, 0, array_size);

	return section;
78
}
B
Bob Picco 已提交
79

80
static int __meminit sparse_index_init(unsigned long section_nr, int nid)
B
Bob Picco 已提交
81
{
I
Ingo Molnar 已提交
82
	static DEFINE_SPINLOCK(index_init_lock);
83 84 85
	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
	struct mem_section *section;
	int ret = 0;
B
Bob Picco 已提交
86 87

	if (mem_section[root])
88
		return -EEXIST;
89

90
	section = sparse_index_alloc(nid);
91 92
	if (!section)
		return -ENOMEM;
93 94 95 96 97
	/*
	 * This lock keeps two different sections from
	 * reallocating for the same index
	 */
	spin_lock(&index_init_lock);
98

99 100 101 102 103 104 105 106 107 108 109 110 111 112
	if (mem_section[root]) {
		ret = -EEXIST;
		goto out;
	}

	mem_section[root] = section;
out:
	spin_unlock(&index_init_lock);
	return ret;
}
#else /* !SPARSEMEM_EXTREME */
static inline int sparse_index_init(unsigned long section_nr, int nid)
{
	return 0;
B
Bob Picco 已提交
113
}
114 115
#endif

116 117
/*
 * Although written for the SPARSEMEM_EXTREME case, this happens
118
 * to also work for the flat array case because
119 120 121 122 123 124 125
 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
 */
int __section_nr(struct mem_section* ms)
{
	unsigned long root_nr;
	struct mem_section* root;

126 127
	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
128 129 130 131 132 133 134 135 136 137
		if (!root)
			continue;

		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
		     break;
	}

	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
/*
 * During early boot, before section_mem_map is used for an actual
 * mem_map, we use section_mem_map to store the section's NUMA
 * node.  This keeps us from having to use another data structure.  The
 * node information is cleared just before we store the real mem_map.
 */
static inline unsigned long sparse_encode_early_nid(int nid)
{
	return (nid << SECTION_NID_SHIFT);
}

static inline int sparse_early_nid(struct mem_section *section)
{
	return (section->section_mem_map >> SECTION_NID_SHIFT);
}

154 155 156
/* Validate the physical addressing limitations of the model */
void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
						unsigned long *end_pfn)
A
Andy Whitcroft 已提交
157
{
158
	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
A
Andy Whitcroft 已提交
159

I
Ingo Molnar 已提交
160 161 162 163
	/*
	 * Sanity checks - do not allow an architecture to pass
	 * in larger pfns than the maximum scope of sparsemem:
	 */
164 165 166 167 168 169 170
	if (*start_pfn > max_sparsemem_pfn) {
		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
			"Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
			*start_pfn, *end_pfn, max_sparsemem_pfn);
		WARN_ON_ONCE(1);
		*start_pfn = max_sparsemem_pfn;
		*end_pfn = max_sparsemem_pfn;
171
	} else if (*end_pfn > max_sparsemem_pfn) {
172 173 174 175 176 177 178 179 180 181 182 183
		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
			"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
			*start_pfn, *end_pfn, max_sparsemem_pfn);
		WARN_ON_ONCE(1);
		*end_pfn = max_sparsemem_pfn;
	}
}

/* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end)
{
	unsigned long pfn;
I
Ingo Molnar 已提交
184

A
Andy Whitcroft 已提交
185
	start &= PAGE_SECTION_MASK;
186
	mminit_validate_memmodel_limits(&start, &end);
A
Andy Whitcroft 已提交
187 188
	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
		unsigned long section = pfn_to_section_nr(pfn);
B
Bob Picco 已提交
189 190 191
		struct mem_section *ms;

		sparse_index_init(section, nid);
192
		set_section_nid(section, nid);
B
Bob Picco 已提交
193 194 195

		ms = __nr_to_section(section);
		if (!ms->section_mem_map)
196 197
			ms->section_mem_map = sparse_encode_early_nid(nid) |
							SECTION_MARKED_PRESENT;
A
Andy Whitcroft 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210
	}
}

/*
 * Only used by the i386 NUMA architecures, but relatively
 * generic code.
 */
unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
						     unsigned long end_pfn)
{
	unsigned long pfn;
	unsigned long nr_pages = 0;

211
	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
A
Andy Whitcroft 已提交
212 213 214 215
	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
		if (nid != early_pfn_to_nid(pfn))
			continue;

216
		if (pfn_present(pfn))
A
Andy Whitcroft 已提交
217 218 219 220 221 222
			nr_pages += PAGES_PER_SECTION;
	}

	return nr_pages * sizeof(struct page);
}

A
Andy Whitcroft 已提交
223 224 225 226 227 228 229 230 231 232 233
/*
 * Subtle, we encode the real pfn into the mem_map such that
 * the identity pfn - section_mem_map will return the actual
 * physical page frame number.
 */
static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
{
	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
}

/*
234
 * Decode mem_map from the coded memmap
A
Andy Whitcroft 已提交
235 236 237
 */
struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
{
238 239
	/* mask off the extra low bits of information */
	coded_mem_map &= SECTION_MAP_MASK;
A
Andy Whitcroft 已提交
240 241 242
	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
}

243
static int __meminit sparse_init_one_section(struct mem_section *ms,
244 245
		unsigned long pnum, struct page *mem_map,
		unsigned long *pageblock_bitmap)
A
Andy Whitcroft 已提交
246
{
247
	if (!present_section(ms))
A
Andy Whitcroft 已提交
248 249
		return -EINVAL;

250
	ms->section_mem_map &= ~SECTION_MAP_MASK;
251 252
	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
							SECTION_HAS_MEM_MAP;
253
 	ms->pageblock_flags = pageblock_bitmap;
A
Andy Whitcroft 已提交
254 255 256 257

	return 1;
}

258
unsigned long usemap_size(void)
259 260 261 262 263 264 265 266 267 268 269 270 271 272
{
	unsigned long size_bytes;
	size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
	size_bytes = roundup(size_bytes, sizeof(unsigned long));
	return size_bytes;
}

#ifdef CONFIG_MEMORY_HOTPLUG
static unsigned long *__kmalloc_section_usemap(void)
{
	return kmalloc(usemap_size(), GFP_KERNEL);
}
#endif /* CONFIG_MEMORY_HOTPLUG */

273 274
#ifdef CONFIG_MEMORY_HOTREMOVE
static unsigned long * __init
275 276
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
					 unsigned long count)
277 278 279 280 281 282 283 284 285 286 287 288 289 290
{
	unsigned long section_nr;

	/*
	 * A page may contain usemaps for other sections preventing the
	 * page being freed and making a section unremovable while
	 * other sections referencing the usemap retmain active. Similarly,
	 * a pgdat can prevent a section being removed. If section A
	 * contains a pgdat and section B contains the usemap, both
	 * sections become inter-dependent. This allocates usemaps
	 * from the same section as the pgdat where possible to avoid
	 * this problem.
	 */
	section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
291
	return alloc_bootmem_section(usemap_size() * count, section_nr);
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
}

static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
{
	unsigned long usemap_snr, pgdat_snr;
	static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
	static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
	struct pglist_data *pgdat = NODE_DATA(nid);
	int usemap_nid;

	usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
	if (usemap_snr == pgdat_snr)
		return;

	if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
		/* skip redundant message */
		return;

	old_usemap_snr = usemap_snr;
	old_pgdat_snr = pgdat_snr;

	usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
	if (usemap_nid != nid) {
		printk(KERN_INFO
		       "node %d must be removed before remove section %ld\n",
		       nid, usemap_snr);
		return;
	}
	/*
	 * There is a circular dependency.
	 * Some platforms allow un-removable section because they will just
	 * gather other removable sections for dynamic partitioning.
	 * Just notify un-removable section's number here.
	 */
	printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
	       pgdat_snr, nid);
	printk(KERN_CONT
	       " have a circular dependency on usemap and pgdat allocations\n");
}
#else
static unsigned long * __init
334 335
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
					 unsigned long count)
336 337 338 339 340 341 342 343 344
{
	return NULL;
}

static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
{
}
#endif /* CONFIG_MEMORY_HOTREMOVE */

345 346 347 348
static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
				 unsigned long pnum_begin,
				 unsigned long pnum_end,
				 unsigned long usemap_count, int nodeid)
349
{
350 351 352
	void *usemap;
	unsigned long pnum;
	int size = usemap_size();
353

354 355
	usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
								 usemap_count);
356
	if (usemap) {
357 358 359 360 361 362 363
		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
			if (!present_section_nr(pnum))
				continue;
			usemap_map[pnum] = usemap;
			usemap += size;
		}
		return;
364 365
	}

366 367 368 369 370 371 372 373 374 375 376
	usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
	if (usemap) {
		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
			if (!present_section_nr(pnum))
				continue;
			usemap_map[pnum] = usemap;
			usemap += size;
			check_usemap_section_nr(nodeid, usemap_map[pnum]);
		}
		return;
	}
377

378
	printk(KERN_WARNING "%s: allocation failed\n", __func__);
379 380
}

381
#ifndef CONFIG_SPARSEMEM_VMEMMAP
382
struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
A
Andy Whitcroft 已提交
383 384
{
	struct page *map;
385
	unsigned long size;
A
Andy Whitcroft 已提交
386 387 388 389 390

	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
	if (map)
		return map;

391 392 393
	size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
	map = __alloc_bootmem_node_high(NODE_DATA(nid), size,
					 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
394 395
	return map;
}
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
void __init sparse_mem_maps_populate_node(struct page **map_map,
					  unsigned long pnum_begin,
					  unsigned long pnum_end,
					  unsigned long map_count, int nodeid)
{
	void *map;
	unsigned long pnum;
	unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;

	map = alloc_remap(nodeid, size * map_count);
	if (map) {
		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
			if (!present_section_nr(pnum))
				continue;
			map_map[pnum] = map;
			map += size;
		}
		return;
	}

	size = PAGE_ALIGN(size);
417 418
	map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count,
					 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
	if (map) {
		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
			if (!present_section_nr(pnum))
				continue;
			map_map[pnum] = map;
			map += size;
		}
		return;
	}

	/* fallback */
	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
		struct mem_section *ms;

		if (!present_section_nr(pnum))
			continue;
		map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
		if (map_map[pnum])
			continue;
		ms = __nr_to_section(pnum);
		printk(KERN_ERR "%s: sparsemem memory map backing failed "
			"some memory will not be available.\n", __func__);
		ms->section_mem_map = 0;
	}
}
444 445
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */

446
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
447 448 449 450 451 452 453 454
static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
				 unsigned long pnum_begin,
				 unsigned long pnum_end,
				 unsigned long map_count, int nodeid)
{
	sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
					 map_count, nodeid);
}
455
#else
456
static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
457 458 459 460 461
{
	struct page *map;
	struct mem_section *ms = __nr_to_section(pnum);
	int nid = sparse_early_nid(ms);

462
	map = sparse_mem_map_populate(pnum, nid);
A
Andy Whitcroft 已提交
463 464 465
	if (map)
		return map;

466
	printk(KERN_ERR "%s: sparsemem memory map backing failed "
467
			"some memory will not be available.\n", __func__);
B
Bob Picco 已提交
468
	ms->section_mem_map = 0;
A
Andy Whitcroft 已提交
469 470
	return NULL;
}
471
#endif
A
Andy Whitcroft 已提交
472

473 474 475
void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
{
}
476

477 478 479 480 481 482 483 484
/*
 * Allocate the accumulated non-linear sections, allocate a mem_map
 * for each and record the physical to section mapping.
 */
void __init sparse_init(void)
{
	unsigned long pnum;
	struct page *map;
485
	unsigned long *usemap;
486
	unsigned long **usemap_map;
487
	int size;
488 489 490
	int nodeid_begin = 0;
	unsigned long pnum_begin = 0;
	unsigned long usemap_count;
491
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
492
	unsigned long map_count;
493 494 495
	int size2;
	struct page **map_map;
#endif
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511

	/*
	 * map is using big page (aka 2M in x86 64 bit)
	 * usemap is less one page (aka 24 bytes)
	 * so alloc 2M (with 2M align) and 24 bytes in turn will
	 * make next 2M slip to one more 2M later.
	 * then in big system, the memory will have a lot of holes...
	 * here try to allocate 2M pages continously.
	 *
	 * powerpc need to call sparse_init_one_section right after each
	 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
	 */
	size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
	usemap_map = alloc_bootmem(size);
	if (!usemap_map)
		panic("can not allocate usemap_map\n");
512 513

	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
514 515
		struct mem_section *ms;

516
		if (!present_section_nr(pnum))
517
			continue;
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
		ms = __nr_to_section(pnum);
		nodeid_begin = sparse_early_nid(ms);
		pnum_begin = pnum;
		break;
	}
	usemap_count = 1;
	for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
		struct mem_section *ms;
		int nodeid;

		if (!present_section_nr(pnum))
			continue;
		ms = __nr_to_section(pnum);
		nodeid = sparse_early_nid(ms);
		if (nodeid == nodeid_begin) {
			usemap_count++;
			continue;
		}
		/* ok, we need to take cake of from pnum_begin to pnum - 1*/
		sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
						 usemap_count, nodeid_begin);
		/* new start, update count etc*/
		nodeid_begin = nodeid;
		pnum_begin = pnum;
		usemap_count = 1;
543
	}
544 545 546
	/* ok, last chunk */
	sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
					 usemap_count, nodeid_begin);
547

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
	size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
	map_map = alloc_bootmem(size2);
	if (!map_map)
		panic("can not allocate map_map\n");

	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
		struct mem_section *ms;

		if (!present_section_nr(pnum))
			continue;
		ms = __nr_to_section(pnum);
		nodeid_begin = sparse_early_nid(ms);
		pnum_begin = pnum;
		break;
	}
	map_count = 1;
	for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
		struct mem_section *ms;
		int nodeid;

		if (!present_section_nr(pnum))
			continue;
		ms = __nr_to_section(pnum);
		nodeid = sparse_early_nid(ms);
		if (nodeid == nodeid_begin) {
			map_count++;
			continue;
		}
		/* ok, we need to take cake of from pnum_begin to pnum - 1*/
		sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
						 map_count, nodeid_begin);
		/* new start, update count etc*/
		nodeid_begin = nodeid;
		pnum_begin = pnum;
		map_count = 1;
	}
	/* ok, last chunk */
	sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
					 map_count, nodeid_begin);
#endif

590 591
	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
		if (!present_section_nr(pnum))
592
			continue;
593

594
		usemap = usemap_map[pnum];
595 596 597
		if (!usemap)
			continue;

598 599 600
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
		map = map_map[pnum];
#else
601
		map = sparse_early_mem_map_alloc(pnum);
602
#endif
603 604 605
		if (!map)
			continue;

606 607
		sparse_init_one_section(__nr_to_section(pnum), pnum, map,
								usemap);
608
	}
609

610 611
	vmemmap_populate_print_last();

612 613 614
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
	free_bootmem(__pa(map_map), size2);
#endif
615
	free_bootmem(__pa(usemap_map), size);
616 617 618
}

#ifdef CONFIG_MEMORY_HOTPLUG
619 620 621 622 623 624 625 626 627 628 629
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
						 unsigned long nr_pages)
{
	/* This will make the necessary allocations eventually. */
	return sparse_mem_map_populate(pnum, nid);
}
static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{
	return; /* XXX: Not implemented yet */
}
630 631 632
static void free_map_bootmem(struct page *page, unsigned long nr_pages)
{
}
633
#else
634 635 636 637 638
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
{
	struct page *page, *ret;
	unsigned long memmap_size = sizeof(struct page) * nr_pages;

639
	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
	if (page)
		goto got_map_page;

	ret = vmalloc(memmap_size);
	if (ret)
		goto got_map_ptr;

	return NULL;
got_map_page:
	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
got_map_ptr:
	memset(ret, 0, memmap_size);

	return ret;
}

656 657 658 659 660 661
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
						  unsigned long nr_pages)
{
	return __kmalloc_section_memmap(nr_pages);
}

662 663
static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{
664
	if (is_vmalloc_addr(memmap))
665 666 667 668 669
		vfree(memmap);
	else
		free_pages((unsigned long)memmap,
			   get_order(sizeof(struct page) * nr_pages));
}
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695

static void free_map_bootmem(struct page *page, unsigned long nr_pages)
{
	unsigned long maps_section_nr, removing_section_nr, i;
	int magic;

	for (i = 0; i < nr_pages; i++, page++) {
		magic = atomic_read(&page->_mapcount);

		BUG_ON(magic == NODE_INFO);

		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
		removing_section_nr = page->private;

		/*
		 * When this function is called, the removing section is
		 * logical offlined state. This means all pages are isolated
		 * from page allocator. If removing section's memmap is placed
		 * on the same section, it must not be freed.
		 * If it is freed, page allocator may allocate it which will
		 * be removed physically soon.
		 */
		if (maps_section_nr != removing_section_nr)
			put_page_bootmem(page);
	}
}
696
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
697

698 699
static void free_section_usemap(struct page *memmap, unsigned long *usemap)
{
700 701 702
	struct page *usemap_page;
	unsigned long nr_pages;

703 704 705
	if (!usemap)
		return;

706
	usemap_page = virt_to_page(usemap);
707 708 709
	/*
	 * Check to see if allocation came from hot-plug-add
	 */
710
	if (PageSlab(usemap_page)) {
711 712 713 714 715 716 717
		kfree(usemap);
		if (memmap)
			__kfree_section_memmap(memmap, PAGES_PER_SECTION);
		return;
	}

	/*
718 719
	 * The usemap came from bootmem. This is packed with other usemaps
	 * on the section which has pgdat at boot time. Just keep it as is now.
720
	 */
721 722 723 724 725 726 727 728 729 730

	if (memmap) {
		struct page *memmap_page;
		memmap_page = virt_to_page(memmap);

		nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
			>> PAGE_SHIFT;

		free_map_bootmem(memmap_page, nr_pages);
	}
731 732
}

A
Andy Whitcroft 已提交
733 734 735 736 737
/*
 * returns the number of sections whose mem_maps were properly
 * set.  If this is <=0, then that means that the passed-in
 * map was not consumed and must be freed.
 */
A
Al Viro 已提交
738
int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
739
			   int nr_pages)
A
Andy Whitcroft 已提交
740
{
741 742 743 744
	unsigned long section_nr = pfn_to_section_nr(start_pfn);
	struct pglist_data *pgdat = zone->zone_pgdat;
	struct mem_section *ms;
	struct page *memmap;
745
	unsigned long *usemap;
746 747
	unsigned long flags;
	int ret;
A
Andy Whitcroft 已提交
748

749 750 751 752
	/*
	 * no locking for this, because it does its own
	 * plus, it does a kmalloc
	 */
753 754 755
	ret = sparse_index_init(section_nr, pgdat->node_id);
	if (ret < 0 && ret != -EEXIST)
		return ret;
756
	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
757 758
	if (!memmap)
		return -ENOMEM;
759
	usemap = __kmalloc_section_usemap();
760 761 762 763
	if (!usemap) {
		__kfree_section_memmap(memmap, nr_pages);
		return -ENOMEM;
	}
764 765

	pgdat_resize_lock(pgdat, &flags);
A
Andy Whitcroft 已提交
766

767 768 769 770 771
	ms = __pfn_to_section(start_pfn);
	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
		ret = -EEXIST;
		goto out;
	}
772

A
Andy Whitcroft 已提交
773 774
	ms->section_mem_map |= SECTION_MARKED_PRESENT;

775
	ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
776 777 778

out:
	pgdat_resize_unlock(pgdat, &flags);
779 780
	if (ret <= 0) {
		kfree(usemap);
781
		__kfree_section_memmap(memmap, nr_pages);
782
	}
783
	return ret;
A
Andy Whitcroft 已提交
784
}
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800

void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
{
	struct page *memmap = NULL;
	unsigned long *usemap = NULL;

	if (ms->section_mem_map) {
		usemap = ms->pageblock_flags;
		memmap = sparse_decode_mem_map(ms->section_mem_map,
						__section_nr(ms));
		ms->section_mem_map = 0;
		ms->pageblock_flags = NULL;
	}

	free_section_usemap(memmap, usemap);
}
801
#endif