discontig_32.c 13.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation
 * August 2002: added remote node KVA remap - Martin J. Bligh 
 *
 * Copyright (C) 2002, IBM Corp.
 *
 * All rights reserved.          
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 * NON INFRINGEMENT.  See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/nodemask.h>
31
#include <linux/module.h>
32
#include <linux/kexec.h>
D
Dave Hansen 已提交
33
#include <linux/pfn.h>
34
#include <linux/swap.h>
M
Mel Gorman 已提交
35
#include <linux/acpi.h>
36

L
Linus Torvalds 已提交
37 38 39
#include <asm/e820.h>
#include <asm/setup.h>
#include <asm/mmzone.h>
40
#include <asm/bios_ebda.h>
L
Linus Torvalds 已提交
41

42
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
43
EXPORT_SYMBOL(node_data);
44
static bootmem_data_t node0_bdata;
L
Linus Torvalds 已提交
45 46

/*
A
Adrian Bunk 已提交
47
 * numa interface - we expect the numa architecture specific code to have
L
Linus Torvalds 已提交
48 49 50
 *                  populated the following initialisation.
 *
 * 1) node_online_map  - the map of all nodes configured (online) in the system
51
 * 2) node_start_pfn   - the starting page frame number for a node
L
Linus Torvalds 已提交
52 53
 * 3) node_end_pfn     - the ending page fram number for a node
 */
54 55
unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly;
unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly;
56

L
Linus Torvalds 已提交
57

58
#ifdef CONFIG_DISCONTIGMEM
L
Linus Torvalds 已提交
59
/*
60
 * 4) physnode_map     - the mapping between a pfn and owning node
L
Linus Torvalds 已提交
61 62 63 64 65 66 67 68 69 70
 * physnode_map keeps track of the physical memory layout of a generic
 * numa node on a 256Mb break (each element of the array will
 * represent 256Mb of memory and will be marked by the node id.  so,
 * if the first gig is on node 0, and the second gig is on node 1
 * physnode_map will contain:
 *
 *     physnode_map[0-3] = 0;
 *     physnode_map[4-7] = 1;
 *     physnode_map[8- ] = -1;
 */
71
s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1};
72
EXPORT_SYMBOL(physnode_map);
L
Linus Torvalds 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98

void memory_present(int nid, unsigned long start, unsigned long end)
{
	unsigned long pfn;

	printk(KERN_INFO "Node: %d, start_pfn: %ld, end_pfn: %ld\n",
			nid, start, end);
	printk(KERN_DEBUG "  Setting physnode_map array to node %d for pfns:\n", nid);
	printk(KERN_DEBUG "  ");
	for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) {
		physnode_map[pfn / PAGES_PER_ELEMENT] = nid;
		printk("%ld ", pfn);
	}
	printk("\n");
}

unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
					      unsigned long end_pfn)
{
	unsigned long nr_pages = end_pfn - start_pfn;

	if (!nr_pages)
		return 0;

	return (nr_pages + 1) * sizeof(struct page);
}
99
#endif
L
Linus Torvalds 已提交
100 101

extern unsigned long find_max_low_pfn(void);
102
extern void add_one_highpage_init(struct page *, int, int);
L
Linus Torvalds 已提交
103 104 105 106 107
extern unsigned long highend_pfn, highstart_pfn;

#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)

unsigned long node_remap_size[MAX_NUMNODES];
A
Adrian Bunk 已提交
108
static void *node_remap_start_vaddr[MAX_NUMNODES];
L
Linus Torvalds 已提交
109 110
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);

111 112
static unsigned long kva_start_pfn;
static unsigned long kva_pages;
L
Linus Torvalds 已提交
113 114 115 116 117 118 119 120 121 122
/*
 * FLAT - support for basic PC memory model with discontig enabled, essentially
 *        a single node with all available processors in it with a flat
 *        memory map.
 */
int __init get_memcfg_numa_flat(void)
{
	printk("NUMA - single node, flat memory mode\n");

	/* Run the memory configuration and find the top of memory. */
123
	propagate_e820_map();
L
Linus Torvalds 已提交
124 125 126 127 128 129 130 131 132 133 134 135 136
	node_start_pfn[0] = 0;
	node_end_pfn[0] = max_pfn;
	memory_present(0, 0, max_pfn);

        /* Indicate there is one node available. */
	nodes_clear(node_online_map);
	node_set_online(0);
	return 1;
}

/*
 * Find the highest page frame number we have available for the node
 */
137
static void __init propagate_e820_map_node(int nid)
L
Linus Torvalds 已提交
138 139 140 141 142 143 144 145 146
{
	if (node_end_pfn[nid] > max_pfn)
		node_end_pfn[nid] = max_pfn;
	/*
	 * if a user has given mem=XXXX, then we need to make sure 
	 * that the node _starts_ before that, too, not just ends
	 */
	if (node_start_pfn[nid] > max_pfn)
		node_start_pfn[nid] = max_pfn;
E
Eric Sesterhenn 已提交
147
	BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]);
L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161
}

/* 
 * Allocate memory for the pg_data_t for this node via a crude pre-bootmem
 * method.  For node zero take this from the bottom of memory, for
 * subsequent nodes place them at node_remap_start_vaddr which contains
 * node local data in physically node local memory.  See setup_memory()
 * for details.
 */
static void __init allocate_pgdat(int nid)
{
	if (nid && node_has_online_mem(nid))
		NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
	else {
162
		NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(min_low_pfn));
L
Linus Torvalds 已提交
163 164 165 166
		min_low_pfn += PFN_UP(sizeof(pg_data_t));
	}
}

M
Mel Gorman 已提交
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
#ifdef CONFIG_DISCONTIGMEM
/*
 * In the discontig memory model, a portion of the kernel virtual area (KVA)
 * is reserved and portions of nodes are mapped using it. This is to allow
 * node-local memory to be allocated for structures that would normally require
 * ZONE_NORMAL. The memory is allocated with alloc_remap() and callers
 * should be prepared to allocate from the bootmem allocator instead. This KVA
 * mechanism is incompatible with SPARSEMEM as it makes assumptions about the
 * layout of memory that are broken if alloc_remap() succeeds for some of the
 * map and fails for others
 */
static unsigned long node_remap_start_pfn[MAX_NUMNODES];
static void *node_remap_end_vaddr[MAX_NUMNODES];
static void *node_remap_alloc_vaddr[MAX_NUMNODES];
static unsigned long node_remap_offset[MAX_NUMNODES];

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
void *alloc_remap(int nid, unsigned long size)
{
	void *allocation = node_remap_alloc_vaddr[nid];

	size = ALIGN(size, L1_CACHE_BYTES);

	if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
		return 0;

	node_remap_alloc_vaddr[nid] += size;
	memset(allocation, 0, size);

	return allocation;
}

L
Linus Torvalds 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
void __init remap_numa_kva(void)
{
	void *vaddr;
	unsigned long pfn;
	int node;

	for_each_online_node(node) {
		for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) {
			vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT);
			set_pmd_pfn((ulong) vaddr, 
				node_remap_start_pfn[node] + pfn, 
				PAGE_KERNEL_LARGE);
		}
	}
}

static unsigned long calculate_numa_remap_pages(void)
{
	int nid;
	unsigned long size, reserve_pages = 0;
218
	unsigned long pfn;
L
Linus Torvalds 已提交
219 220

	for_each_online_node(nid) {
221 222
		unsigned old_end_pfn = node_end_pfn[nid];

L
Linus Torvalds 已提交
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
		/*
		 * The acpi/srat node info can show hot-add memroy zones
		 * where memory could be added but not currently present.
		 */
		if (node_start_pfn[nid] > max_pfn)
			continue;
		if (node_end_pfn[nid] > max_pfn)
			node_end_pfn[nid] = max_pfn;

		/* ensure the remap includes space for the pgdat. */
		size = node_remap_size[nid] + sizeof(pg_data_t);

		/* convert size to large (pmd size) pages, rounding up */
		size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
		/* now the roundup is correct, convert to PAGE_SIZE pages */
		size = size * PTRS_PER_PTE;
239 240 241 242 243 244 245 246 247 248 249 250 251

		/*
		 * Validate the region we are allocating only contains valid
		 * pages.
		 */
		for (pfn = node_end_pfn[nid] - size;
		     pfn < node_end_pfn[nid]; pfn++)
			if (!page_is_ram(pfn))
				break;

		if (pfn != node_end_pfn[nid])
			size = 0;

L
Linus Torvalds 已提交
252 253 254 255
		printk("Reserving %ld pages of KVA for lmem_map of node %d\n",
				size, nid);
		node_remap_size[nid] = size;
		node_remap_offset[nid] = reserve_pages;
256
		reserve_pages += size;
L
Linus Torvalds 已提交
257 258
		printk("Shrinking node %d from %ld pages to %ld pages\n",
			nid, node_end_pfn[nid], node_end_pfn[nid] - size);
259 260 261 262 263 264 265 266 267 268 269

		if (node_end_pfn[nid] & (PTRS_PER_PTE-1)) {
			/*
			 * Align node_end_pfn[] and node_remap_start_pfn[] to
			 * pmd boundary. remap_numa_kva will barf otherwise.
			 */
			printk("Shrinking node %d further by %ld pages for proper alignment\n",
				nid, node_end_pfn[nid] & (PTRS_PER_PTE-1));
			size +=  node_end_pfn[nid] & (PTRS_PER_PTE-1);
		}

L
Linus Torvalds 已提交
270 271
		node_end_pfn[nid] -= size;
		node_remap_start_pfn[nid] = node_end_pfn[nid];
272
		shrink_active_range(nid, old_end_pfn, node_end_pfn[nid]);
L
Linus Torvalds 已提交
273 274 275 276 277 278
	}
	printk("Reserving total of %ld pages for numa KVA remap\n",
			reserve_pages);
	return reserve_pages;
}

M
Mel Gorman 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
static void init_remap_allocator(int nid)
{
	node_remap_start_vaddr[nid] = pfn_to_kaddr(
			kva_start_pfn + node_remap_offset[nid]);
	node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
		(node_remap_size[nid] * PAGE_SIZE);
	node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
		ALIGN(sizeof(pg_data_t), PAGE_SIZE);

	printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
		(ulong) node_remap_start_vaddr[nid],
		(ulong) pfn_to_kaddr(highstart_pfn
		   + node_remap_offset[nid] + node_remap_size[nid]));
}
#else
void *alloc_remap(int nid, unsigned long size)
{
	return NULL;
}

static unsigned long calculate_numa_remap_pages(void)
{
	return 0;
}

static void init_remap_allocator(int nid)
{
}

void __init remap_numa_kva(void)
{
}
#endif /* CONFIG_DISCONTIGMEM */

L
Linus Torvalds 已提交
313 314 315 316 317
extern void setup_bootmem_allocator(void);
unsigned long __init setup_memory(void)
{
	int nid;
	unsigned long system_start_pfn, system_max_low_pfn;
M
Mel Gorman 已提交
318
	unsigned long wasted_pages;
L
Linus Torvalds 已提交
319 320 321 322 323

	/*
	 * When mapping a NUMA machine we allocate the node_mem_map arrays
	 * from node local memory.  They are then mapped directly into KVA
	 * between zone normal and vmalloc space.  Calculate the size of
S
Simon Arlott 已提交
324
	 * this space and use it to adjust the boundary between ZONE_NORMAL
L
Linus Torvalds 已提交
325 326 327 328
	 * and ZONE_HIGHMEM.
	 */
	get_memcfg_numa();

329
	kva_pages = calculate_numa_remap_pages();
L
Linus Torvalds 已提交
330 331 332 333

	/* partially used pages are not usable - thus round upwards */
	system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);

334 335 336 337
	kva_start_pfn = find_max_low_pfn() - kva_pages;

#ifdef CONFIG_BLK_DEV_INITRD
	/* Numa kva area is below the initrd */
338 339
	if (initrd_start)
		kva_start_pfn = PFN_DOWN(initrd_start - PAGE_OFFSET)
340
			- kva_pages;
341
#endif
M
Mel Gorman 已提交
342 343 344 345 346 347 348 349

	/*
	 * We waste pages past at the end of the KVA for no good reason other
	 * than how it is located. This is bad.
	 */
	wasted_pages = kva_start_pfn & (PTRS_PER_PTE-1);
	kva_start_pfn -= wasted_pages;
	kva_pages += wasted_pages;
350 351 352 353

	system_max_low_pfn = max_low_pfn = find_max_low_pfn();
	printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n",
		kva_start_pfn, max_low_pfn);
L
Linus Torvalds 已提交
354 355 356 357 358 359 360
	printk("max_pfn = %ld\n", max_pfn);
#ifdef CONFIG_HIGHMEM
	highstart_pfn = highend_pfn = max_pfn;
	if (max_pfn > system_max_low_pfn)
		highstart_pfn = system_max_low_pfn;
	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
	       pages_to_mb(highend_pfn - highstart_pfn));
361 362 363 364 365
	num_physpages = highend_pfn;
	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
#else
	num_physpages = system_max_low_pfn;
	high_memory = (void *) __va(system_max_low_pfn * PAGE_SIZE - 1) + 1;
L
Linus Torvalds 已提交
366 367 368 369 370 371 372 373 374
#endif
	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
			pages_to_mb(system_max_low_pfn));
	printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n", 
			min_low_pfn, max_low_pfn, highstart_pfn);

	printk("Low memory ends at vaddr %08lx\n",
			(ulong) pfn_to_kaddr(max_low_pfn));
	for_each_online_node(nid) {
M
Mel Gorman 已提交
375
		init_remap_allocator(nid);
376

L
Linus Torvalds 已提交
377 378 379 380 381
		allocate_pgdat(nid);
	}
	printk("High memory starts at vaddr %08lx\n",
			(ulong) pfn_to_kaddr(highstart_pfn));
	for_each_online_node(nid)
382
		propagate_e820_map_node(nid);
L
Linus Torvalds 已提交
383 384 385 386 387 388 389

	memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
	NODE_DATA(0)->bdata = &node0_bdata;
	setup_bootmem_allocator();
	return max_low_pfn;
}

390 391
void __init numa_kva_reserve(void)
{
M
Mel Gorman 已提交
392
	if (kva_pages)
393 394
		reserve_bootmem(PFN_PHYS(kva_start_pfn), PFN_PHYS(kva_pages),
				BOOTMEM_DEFAULT);
395 396
}

L
Linus Torvalds 已提交
397 398 399
void __init zone_sizes_init(void)
{
	int nid;
400 401 402 403 404
	unsigned long max_zone_pfns[MAX_NR_ZONES];
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] =
		virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
405
#ifdef CONFIG_HIGHMEM
406
	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
407
#endif
408 409 410 411 412 413 414

	/* If SRAT has not registered memory, register it now */
	if (find_max_pfn_with_active_regions() == 0) {
		for_each_online_node(nid) {
			if (node_has_online_mem(nid))
				add_active_range(nid, node_start_pfn[nid],
							node_end_pfn[nid]);
L
Linus Torvalds 已提交
415 416
		}
	}
417 418

	free_area_init_nodes(max_zone_pfns);
L
Linus Torvalds 已提交
419 420 421 422 423 424 425
	return;
}

void __init set_highmem_pages_init(int bad_ppro) 
{
#ifdef CONFIG_HIGHMEM
	struct zone *zone;
426
	struct page *page;
L
Linus Torvalds 已提交
427 428

	for_each_zone(zone) {
429 430
		unsigned long node_pfn, zone_start_pfn, zone_end_pfn;

L
Linus Torvalds 已提交
431 432 433 434
		if (!is_highmem(zone))
			continue;

		zone_start_pfn = zone->zone_start_pfn;
435 436 437
		zone_end_pfn = zone_start_pfn + zone->spanned_pages;

		printk("Initializing %s for node %d (%08lx:%08lx)\n",
438
				zone->name, zone_to_nid(zone),
439
				zone_start_pfn, zone_end_pfn);
L
Linus Torvalds 已提交
440

441 442 443 444
		for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) {
			if (!pfn_valid(node_pfn))
				continue;
			page = pfn_to_page(node_pfn);
445
			add_one_highpage_init(page, node_pfn, bad_ppro);
L
Linus Torvalds 已提交
446 447 448 449 450
		}
	}
	totalram_pages += totalhigh_pages;
#endif
}
451 452

#ifdef CONFIG_MEMORY_HOTPLUG
453
static int paddr_to_nid(u64 addr)
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
{
	int nid;
	unsigned long pfn = PFN_DOWN(addr);

	for_each_node(nid)
		if (node_start_pfn[nid] <= pfn &&
		    pfn < node_end_pfn[nid])
			return nid;

	return -1;
}

/*
 * This function is used to ask node id BEFORE memmap and mem_section's
 * initialization (pfn_to_nid() can't be used yet).
 * If _PXM is not defined on ACPI's DSDT, node id must be found by this.
 */
int memory_add_physaddr_to_nid(u64 addr)
{
	int nid = paddr_to_nid(addr);
	return (nid >= 0) ? nid : 0;
}

EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif