sparse.c 10.0 KB
Newer Older
A
Andy Whitcroft 已提交
1 2 3 4 5 6
/*
 * sparse memory mappings.
 */
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
7
#include <linux/highmem.h>
A
Andy Whitcroft 已提交
8
#include <linux/module.h>
9
#include <linux/spinlock.h>
10
#include <linux/vmalloc.h>
A
Andy Whitcroft 已提交
11
#include <asm/dma.h>
12 13
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
A
Andy Whitcroft 已提交
14 15 16 17 18 19

/*
 * Permanent SPARSEMEM data:
 *
 * 1) mem_section	- memory sections, mem_map's for valid memory
 */
20
#ifdef CONFIG_SPARSEMEM_EXTREME
B
Bob Picco 已提交
21
struct mem_section *mem_section[NR_SECTION_ROOTS]
22
	____cacheline_internodealigned_in_smp;
23 24
#else
struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
25
	____cacheline_internodealigned_in_smp;
26 27 28
#endif
EXPORT_SYMBOL(mem_section);

29 30 31 32 33 34 35 36 37 38 39 40
#ifdef NODE_NOT_IN_PAGE_FLAGS
/*
 * If we did not store the node number in the page then we have to
 * do a lookup in the section_to_node_table in order to find which
 * node the page belongs to.
 */
#if MAX_NUMNODES <= 256
static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#else
static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#endif

41
int page_to_nid(struct page *page)
42 43 44 45
{
	return section_to_node_table[page_to_section(page)];
}
EXPORT_SYMBOL(page_to_nid);
46 47 48 49 50 51 52 53 54

static void set_section_nid(unsigned long section_nr, int nid)
{
	section_to_node_table[section_nr] = nid;
}
#else /* !NODE_NOT_IN_PAGE_FLAGS */
static inline void set_section_nid(unsigned long section_nr, int nid)
{
}
55 56
#endif

57
#ifdef CONFIG_SPARSEMEM_EXTREME
S
Sam Ravnborg 已提交
58
static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
59 60 61 62 63
{
	struct mem_section *section = NULL;
	unsigned long array_size = SECTIONS_PER_ROOT *
				   sizeof(struct mem_section);

64
	if (slab_is_available())
65 66 67
		section = kmalloc_node(array_size, GFP_KERNEL, nid);
	else
		section = alloc_bootmem_node(NODE_DATA(nid), array_size);
68 69 70 71 72

	if (section)
		memset(section, 0, array_size);

	return section;
73
}
B
Bob Picco 已提交
74

75
static int __meminit sparse_index_init(unsigned long section_nr, int nid)
B
Bob Picco 已提交
76
{
I
Ingo Molnar 已提交
77
	static DEFINE_SPINLOCK(index_init_lock);
78 79 80
	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
	struct mem_section *section;
	int ret = 0;
B
Bob Picco 已提交
81 82

	if (mem_section[root])
83
		return -EEXIST;
84

85 86 87 88 89 90
	section = sparse_index_alloc(nid);
	/*
	 * This lock keeps two different sections from
	 * reallocating for the same index
	 */
	spin_lock(&index_init_lock);
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105
	if (mem_section[root]) {
		ret = -EEXIST;
		goto out;
	}

	mem_section[root] = section;
out:
	spin_unlock(&index_init_lock);
	return ret;
}
#else /* !SPARSEMEM_EXTREME */
static inline int sparse_index_init(unsigned long section_nr, int nid)
{
	return 0;
B
Bob Picco 已提交
106
}
107 108
#endif

109 110
/*
 * Although written for the SPARSEMEM_EXTREME case, this happens
111
 * to also work for the flat array case because
112 113 114 115 116 117 118
 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
 */
int __section_nr(struct mem_section* ms)
{
	unsigned long root_nr;
	struct mem_section* root;

119 120
	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
121 122 123 124 125 126 127 128 129 130
		if (!root)
			continue;

		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
		     break;
	}

	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
/*
 * During early boot, before section_mem_map is used for an actual
 * mem_map, we use section_mem_map to store the section's NUMA
 * node.  This keeps us from having to use another data structure.  The
 * node information is cleared just before we store the real mem_map.
 */
static inline unsigned long sparse_encode_early_nid(int nid)
{
	return (nid << SECTION_NID_SHIFT);
}

static inline int sparse_early_nid(struct mem_section *section)
{
	return (section->section_mem_map >> SECTION_NID_SHIFT);
}

A
Andy Whitcroft 已提交
147
/* Record a memory area against a node. */
148
void __init memory_present(int nid, unsigned long start, unsigned long end)
A
Andy Whitcroft 已提交
149 150 151 152 153 154
{
	unsigned long pfn;

	start &= PAGE_SECTION_MASK;
	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
		unsigned long section = pfn_to_section_nr(pfn);
B
Bob Picco 已提交
155 156 157
		struct mem_section *ms;

		sparse_index_init(section, nid);
158
		set_section_nid(section, nid);
B
Bob Picco 已提交
159 160 161

		ms = __nr_to_section(section);
		if (!ms->section_mem_map)
162 163
			ms->section_mem_map = sparse_encode_early_nid(nid) |
							SECTION_MARKED_PRESENT;
A
Andy Whitcroft 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
	}
}

/*
 * Only used by the i386 NUMA architecures, but relatively
 * generic code.
 */
unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
						     unsigned long end_pfn)
{
	unsigned long pfn;
	unsigned long nr_pages = 0;

	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
		if (nid != early_pfn_to_nid(pfn))
			continue;

181
		if (pfn_present(pfn))
A
Andy Whitcroft 已提交
182 183 184 185 186 187
			nr_pages += PAGES_PER_SECTION;
	}

	return nr_pages * sizeof(struct page);
}

A
Andy Whitcroft 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
/*
 * Subtle, we encode the real pfn into the mem_map such that
 * the identity pfn - section_mem_map will return the actual
 * physical page frame number.
 */
static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
{
	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
}

/*
 * We need this if we ever free the mem_maps.  While not implemented yet,
 * this function is included for parity with its sibling.
 */
static __attribute((unused))
struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
{
	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
}

208
static int __meminit sparse_init_one_section(struct mem_section *ms,
209 210
		unsigned long pnum, struct page *mem_map,
		unsigned long *pageblock_bitmap)
A
Andy Whitcroft 已提交
211
{
212
	if (!present_section(ms))
A
Andy Whitcroft 已提交
213 214
		return -EINVAL;

215
	ms->section_mem_map &= ~SECTION_MAP_MASK;
216 217
	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
							SECTION_HAS_MEM_MAP;
218
 	ms->pageblock_flags = pageblock_bitmap;
A
Andy Whitcroft 已提交
219 220 221 222

	return 1;
}

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
static unsigned long usemap_size(void)
{
	unsigned long size_bytes;
	size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
	size_bytes = roundup(size_bytes, sizeof(unsigned long));
	return size_bytes;
}

#ifdef CONFIG_MEMORY_HOTPLUG
static unsigned long *__kmalloc_section_usemap(void)
{
	return kmalloc(usemap_size(), GFP_KERNEL);
}
#endif /* CONFIG_MEMORY_HOTPLUG */

static unsigned long *sparse_early_usemap_alloc(unsigned long pnum)
{
	unsigned long *usemap;
	struct mem_section *ms = __nr_to_section(pnum);
	int nid = sparse_early_nid(ms);

	usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
	if (usemap)
		return usemap;

	/* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
	nid = 0;

	printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
	return NULL;
}

255
#ifndef CONFIG_SPARSEMEM_VMEMMAP
256
struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
A
Andy Whitcroft 已提交
257 258 259 260 261 262 263 264 265
{
	struct page *map;

	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
	if (map)
		return map;

	map = alloc_bootmem_node(NODE_DATA(nid),
			sizeof(struct page) * PAGES_PER_SECTION);
266 267 268 269 270 271 272 273 274 275
	return map;
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */

struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
{
	struct page *map;
	struct mem_section *ms = __nr_to_section(pnum);
	int nid = sparse_early_nid(ms);

276
	map = sparse_mem_map_populate(pnum, nid);
A
Andy Whitcroft 已提交
277 278 279
	if (map)
		return map;

280 281
	printk(KERN_ERR "%s: sparsemem memory map backing failed "
			"some memory will not be available.\n", __FUNCTION__);
B
Bob Picco 已提交
282
	ms->section_mem_map = 0;
A
Andy Whitcroft 已提交
283 284 285
	return NULL;
}

286 287 288 289 290 291 292 293
/*
 * Allocate the accumulated non-linear sections, allocate a mem_map
 * for each and record the physical to section mapping.
 */
void __init sparse_init(void)
{
	unsigned long pnum;
	struct page *map;
294
	unsigned long *usemap;
295 296

	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
297
		if (!present_section_nr(pnum))
298 299 300 301 302
			continue;

		map = sparse_early_mem_map_alloc(pnum);
		if (!map)
			continue;
303 304 305 306 307 308 309

		usemap = sparse_early_usemap_alloc(pnum);
		if (!usemap)
			continue;

		sparse_init_one_section(__nr_to_section(pnum), pnum, map,
								usemap);
310 311 312 313
	}
}

#ifdef CONFIG_MEMORY_HOTPLUG
314 315 316 317 318 319 320 321 322 323 324 325
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
						 unsigned long nr_pages)
{
	/* This will make the necessary allocations eventually. */
	return sparse_mem_map_populate(pnum, nid);
}
static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{
	return; /* XXX: Not implemented yet */
}
#else
326 327 328 329 330
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
{
	struct page *page, *ret;
	unsigned long memmap_size = sizeof(struct page) * nr_pages;

331
	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
	if (page)
		goto got_map_page;

	ret = vmalloc(memmap_size);
	if (ret)
		goto got_map_ptr;

	return NULL;
got_map_page:
	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
got_map_ptr:
	memset(ret, 0, memmap_size);

	return ret;
}

348 349 350 351 352 353
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
						  unsigned long nr_pages)
{
	return __kmalloc_section_memmap(nr_pages);
}

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
static int vaddr_in_vmalloc_area(void *addr)
{
	if (addr >= (void *)VMALLOC_START &&
	    addr < (void *)VMALLOC_END)
		return 1;
	return 0;
}

static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{
	if (vaddr_in_vmalloc_area(memmap))
		vfree(memmap);
	else
		free_pages((unsigned long)memmap,
			   get_order(sizeof(struct page) * nr_pages));
}
370
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
371

A
Andy Whitcroft 已提交
372 373 374 375 376
/*
 * returns the number of sections whose mem_maps were properly
 * set.  If this is <=0, then that means that the passed-in
 * map was not consumed and must be freed.
 */
377 378
int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
			   int nr_pages)
A
Andy Whitcroft 已提交
379
{
380 381 382 383
	unsigned long section_nr = pfn_to_section_nr(start_pfn);
	struct pglist_data *pgdat = zone->zone_pgdat;
	struct mem_section *ms;
	struct page *memmap;
384
	unsigned long *usemap;
385 386
	unsigned long flags;
	int ret;
A
Andy Whitcroft 已提交
387

388 389 390 391 392
	/*
	 * no locking for this, because it does its own
	 * plus, it does a kmalloc
	 */
	sparse_index_init(section_nr, pgdat->node_id);
393
	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
394
	usemap = __kmalloc_section_usemap();
395 396

	pgdat_resize_lock(pgdat, &flags);
A
Andy Whitcroft 已提交
397

398 399 400 401 402
	ms = __pfn_to_section(start_pfn);
	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
		ret = -EEXIST;
		goto out;
	}
403 404 405 406 407

	if (!usemap) {
		ret = -ENOMEM;
		goto out;
	}
A
Andy Whitcroft 已提交
408 409
	ms->section_mem_map |= SECTION_MARKED_PRESENT;

410
	ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
411 412 413

out:
	pgdat_resize_unlock(pgdat, &flags);
414 415
	if (ret <= 0)
		__kfree_section_memmap(memmap, nr_pages);
416
	return ret;
A
Andy Whitcroft 已提交
417
}
418
#endif