sparse-vmemmap.c 8.1 KB
Newer Older
1 2 3
/*
 * Virtual Memory Map support
 *
C
Christoph Lameter 已提交
4
 * (C) 2007 sgi. Christoph Lameter.
5 6 7 8 9 10 11
 *
 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
 * virt_to_page, page_address() to be implemented as a base offset
 * calculation without memory access.
 *
 * However, virtual mappings need a page table and TLBs. Many Linux
 * architectures already map their physical space using 1-1 mappings
12
 * via TLBs. For those arches the virtual memory map is essentially
13 14 15 16
 * for free if we use the same page size as the 1-1 mappings. In that
 * case the overhead consists of a few additional pages that are
 * allocated to create a view of memory for vmemmap.
 *
17 18
 * The architecture is expected to provide a vmemmap_populate() function
 * to instantiate the mapping.
19 20 21 22
 */
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
23
#include <linux/memremap.h>
24
#include <linux/highmem.h>
25
#include <linux/slab.h>
26 27
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
28
#include <linux/sched.h>
29 30 31 32 33 34 35 36 37
#include <asm/dma.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>

/*
 * Allocate a block of memory to be used to back the virtual memory map
 * or to back the page tables that are used to create the mapping.
 * Uses the main allocators if they are available, else bootmem.
 */
38

39
static void * __ref __earlyonly_bootmem_alloc(int node,
40 41 42 43
				unsigned long size,
				unsigned long align,
				unsigned long goal)
{
44 45
	return memblock_virt_alloc_try_nid(size, align, goal,
					    BOOTMEM_ALLOC_ACCESSIBLE, node);
46 47
}

48 49
static void *vmemmap_buf;
static void *vmemmap_buf_end;
50

51 52 53 54
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
	/* If the main allocator is up use that, fallback to bootmem. */
	if (slab_is_available()) {
55 56 57
		struct page *page;

		if (node_state(node, N_HIGH_MEMORY))
58 59 60
			page = alloc_pages_node(
				node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
				get_order(size));
61
		else
62 63
			page = alloc_pages(
				GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
64
				get_order(size));
65 66 67 68
		if (page)
			return page_address(page);
		return NULL;
	} else
69
		return __earlyonly_bootmem_alloc(node, size, size,
70 71 72
				__pa(MAX_DMA_ADDRESS));
}

73
/* need to make sure size is all the same during early stage */
74
static void * __meminit alloc_block_buf(unsigned long size, int node)
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
{
	void *ptr;

	if (!vmemmap_buf)
		return vmemmap_alloc_block(size, node);

	/* take the from buf */
	ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
	if (ptr + size > vmemmap_buf_end)
		return vmemmap_alloc_block(size, node);

	vmemmap_buf = ptr + size;

	return ptr;
}

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
{
	return altmap->base_pfn + altmap->reserve + altmap->alloc
		+ altmap->align;
}

static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
{
	unsigned long allocated = altmap->alloc + altmap->align;

	if (altmap->free > allocated)
		return altmap->free - allocated;
	return 0;
}

/**
 * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation
 * @altmap - reserved page pool for the allocation
 * @nr_pfns - size (in pages) of the allocation
 *
 * Allocations are aligned to the size of the request
 */
static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
		unsigned long nr_pfns)
{
	unsigned long pfn = vmem_altmap_next_pfn(altmap);
	unsigned long nr_align;

	nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
	nr_align = ALIGN(pfn, nr_align) - pfn;

	if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
		return ULONG_MAX;
	altmap->alloc += nr_pfns;
	altmap->align += nr_align;
	return pfn + nr_align;
}

static void * __meminit altmap_alloc_block_buf(unsigned long size,
		struct vmem_altmap *altmap)
{
	unsigned long pfn, nr_pfns;
	void *ptr;

	if (size & ~PAGE_MASK) {
		pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
				__func__, size);
		return NULL;
	}

	nr_pfns = size >> PAGE_SHIFT;
	pfn = vmem_altmap_alloc(altmap, nr_pfns);
	if (pfn < ULONG_MAX)
		ptr = __va(__pfn_to_phys(pfn));
	else
		ptr = NULL;
	pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
			__func__, pfn, altmap->alloc, altmap->align, nr_pfns);

	return ptr;
}

/* need to make sure size is all the same during early stage */
void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node,
		struct vmem_altmap *altmap)
{
	if (altmap)
		return altmap_alloc_block_buf(size, altmap);
	return alloc_block_buf(size, node);
}

162 163 164 165 166 167
void __meminit vmemmap_verify(pte_t *pte, int node,
				unsigned long start, unsigned long end)
{
	unsigned long pfn = pte_pfn(*pte);
	int actual_node = early_pfn_to_nid(pfn);

168
	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
169 170
		pr_warn("[%lx-%lx] potential offnode page_structs\n",
			start, end - 1);
171 172
}

173
pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
174
{
175 176 177
	pte_t *pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte)) {
		pte_t entry;
178
		void *p = alloc_block_buf(PAGE_SIZE, node);
179
		if (!p)
A
Al Viro 已提交
180
			return NULL;
181 182 183 184
		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
		set_pte_at(&init_mm, addr, pte, entry);
	}
	return pte;
185 186
}

187
pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
188
{
189 190 191 192
	pmd_t *pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd)) {
		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
		if (!p)
A
Al Viro 已提交
193
			return NULL;
194
		pmd_populate_kernel(&init_mm, pmd, p);
195
	}
196
	return pmd;
197 198
}

199
pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
200
{
201
	pud_t *pud = pud_offset(p4d, addr);
202 203 204
	if (pud_none(*pud)) {
		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
		if (!p)
A
Al Viro 已提交
205
			return NULL;
206 207 208 209
		pud_populate(&init_mm, pud, p);
	}
	return pud;
}
210

211 212 213 214 215 216 217 218 219 220 221 222
p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
{
	p4d_t *p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d)) {
		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
		if (!p)
			return NULL;
		p4d_populate(&init_mm, p4d, p);
	}
	return p4d;
}

223 224 225 226 227 228
pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
{
	pgd_t *pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd)) {
		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
		if (!p)
A
Al Viro 已提交
229
			return NULL;
230
		pgd_populate(&init_mm, pgd, p);
231
	}
232
	return pgd;
233 234
}

235 236
int __meminit vmemmap_populate_basepages(unsigned long start,
					 unsigned long end, int node)
237
{
238
	unsigned long addr = start;
239
	pgd_t *pgd;
240
	p4d_t *p4d;
241 242 243
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
244

245 246 247 248
	for (; addr < end; addr += PAGE_SIZE) {
		pgd = vmemmap_pgd_populate(addr, node);
		if (!pgd)
			return -ENOMEM;
249 250 251 252
		p4d = vmemmap_p4d_populate(pgd, addr, node);
		if (!p4d)
			return -ENOMEM;
		pud = vmemmap_pud_populate(p4d, addr, node);
253 254 255 256 257 258 259 260 261
		if (!pud)
			return -ENOMEM;
		pmd = vmemmap_pmd_populate(pud, addr, node);
		if (!pmd)
			return -ENOMEM;
		pte = vmemmap_pte_populate(pmd, addr, node);
		if (!pte)
			return -ENOMEM;
		vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
262
	}
263 264

	return 0;
265 266
}

267
struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
268
{
269 270 271 272 273 274 275 276 277
	unsigned long start;
	unsigned long end;
	struct page *map;

	map = pfn_to_page(pnum * PAGES_PER_SECTION);
	start = (unsigned long)map;
	end = (unsigned long)(map + PAGES_PER_SECTION);

	if (vmemmap_populate(start, end, nid))
278 279 280 281
		return NULL;

	return map;
}
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310

void __init sparse_mem_maps_populate_node(struct page **map_map,
					  unsigned long pnum_begin,
					  unsigned long pnum_end,
					  unsigned long map_count, int nodeid)
{
	unsigned long pnum;
	unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
	void *vmemmap_buf_start;

	size = ALIGN(size, PMD_SIZE);
	vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
			 PMD_SIZE, __pa(MAX_DMA_ADDRESS));

	if (vmemmap_buf_start) {
		vmemmap_buf = vmemmap_buf_start;
		vmemmap_buf_end = vmemmap_buf_start + size * map_count;
	}

	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
		struct mem_section *ms;

		if (!present_section_nr(pnum))
			continue;

		map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
		if (map_map[pnum])
			continue;
		ms = __nr_to_section(pnum);
311
		pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
J
Joe Perches 已提交
312
		       __func__);
313 314 315 316 317
		ms->section_mem_map = 0;
	}

	if (vmemmap_buf_start) {
		/* need to free left buf */
318 319
		memblock_free_early(__pa(vmemmap_buf),
				    vmemmap_buf_end - vmemmap_buf);
320 321 322 323
		vmemmap_buf = NULL;
		vmemmap_buf_end = NULL;
	}
}