vmem.c 9.6 KB
Newer Older
H
Heiko Carstens 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *    Copyright IBM Corp. 2006
 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
 */

#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/list.h>
11
#include <linux/hugetlb.h>
12
#include <linux/slab.h>
H
Heiko Carstens 已提交
13 14 15 16
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
17
#include <asm/sections.h>
H
Heiko Carstens 已提交
18 19 20 21 22 23 24 25 26 27 28

static DEFINE_MUTEX(vmem_mutex);

struct memory_segment {
	struct list_head list;
	unsigned long start;
	unsigned long size;
};

static LIST_HEAD(mem_segs);

29 30 31 32 33 34 35 36
static void __ref *vmem_alloc_pages(unsigned int order)
{
	if (slab_is_available())
		return (void *)__get_free_pages(GFP_KERNEL, order);
	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}

static inline pud_t *vmem_pud_alloc(void)
37 38 39 40
{
	pud_t *pud = NULL;

#ifdef CONFIG_64BIT
41
	pud = vmem_alloc_pages(2);
42 43
	if (!pud)
		return NULL;
44
	clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
45 46 47
#endif
	return pud;
}
M
Martin Schwidefsky 已提交
48

49
static inline pmd_t *vmem_pmd_alloc(void)
H
Heiko Carstens 已提交
50
{
51
	pmd_t *pmd = NULL;
H
Heiko Carstens 已提交
52

53
#ifdef CONFIG_64BIT
54
	pmd = vmem_alloc_pages(2);
H
Heiko Carstens 已提交
55 56
	if (!pmd)
		return NULL;
57
	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
58
#endif
H
Heiko Carstens 已提交
59 60 61
	return pmd;
}

62
static pte_t __ref *vmem_pte_alloc(unsigned long address)
H
Heiko Carstens 已提交
63
{
64
	pte_t *pte;
H
Heiko Carstens 已提交
65

66
	if (slab_is_available())
67
		pte = (pte_t *) page_table_alloc(&init_mm, address);
68 69
	else
		pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
70 71
	if (!pte)
		return NULL;
72
	clear_table((unsigned long *) pte, _PAGE_INVALID,
73
		    PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
74 75 76 77 78 79
	return pte;
}

/*
 * Add a physical memory range to the 1:1 mapping.
 */
80
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
H
Heiko Carstens 已提交
81
{
82 83
	unsigned long end = start + size;
	unsigned long address = start;
H
Heiko Carstens 已提交
84
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
85
	pud_t *pu_dir;
H
Heiko Carstens 已提交
86 87 88 89
	pmd_t *pm_dir;
	pte_t *pt_dir;
	int ret = -ENOMEM;

90
	while (address < end) {
H
Heiko Carstens 已提交
91 92
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
93 94 95
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
96
			pgd_populate(&init_mm, pg_dir, pu_dir);
M
Martin Schwidefsky 已提交
97 98
		}
		pu_dir = pud_offset(pg_dir, address);
99 100 101
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
		if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
		    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
102 103
			pud_val(*pu_dir) = __pa(address) |
				_REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
104
				(ro ? _REGION_ENTRY_PROTECT : 0);
105 106 107 108
			address += PUD_SIZE;
			continue;
		}
#endif
M
Martin Schwidefsky 已提交
109
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
110 111 112
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
113
			pud_populate(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
114
		}
M
Martin Schwidefsky 已提交
115
		pm_dir = pmd_offset(pu_dir, address);
116
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
117 118
		if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
		    !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
119 120
			pmd_val(*pm_dir) = __pa(address) |
				_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
121
				(ro ? _SEGMENT_ENTRY_PROTECT : 0);
122
			address += PMD_SIZE;
123 124 125
			continue;
		}
#endif
H
Heiko Carstens 已提交
126
		if (pmd_none(*pm_dir)) {
127
			pt_dir = vmem_pte_alloc(address);
H
Heiko Carstens 已提交
128 129
			if (!pt_dir)
				goto out;
130
			pmd_populate(&init_mm, pm_dir, pt_dir);
H
Heiko Carstens 已提交
131 132 133
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
134 135
		pte_val(*pt_dir) = __pa(address) |
			pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
136
		address += PAGE_SIZE;
H
Heiko Carstens 已提交
137 138 139
	}
	ret = 0;
out:
140
	flush_tlb_kernel_range(start, end);
H
Heiko Carstens 已提交
141 142 143 144 145 146 147 148 149
	return ret;
}

/*
 * Remove a physical memory range from the 1:1 mapping.
 * Currently only invalidates page table entries.
 */
static void vmem_remove_range(unsigned long start, unsigned long size)
{
150 151
	unsigned long end = start + size;
	unsigned long address = start;
H
Heiko Carstens 已提交
152
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
153
	pud_t *pu_dir;
H
Heiko Carstens 已提交
154 155 156 157
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;

158
	pte_val(pte) = _PAGE_INVALID;
159
	while (address < end) {
H
Heiko Carstens 已提交
160
		pg_dir = pgd_offset_k(address);
161 162 163 164
		if (pgd_none(*pg_dir)) {
			address += PGDIR_SIZE;
			continue;
		}
M
Martin Schwidefsky 已提交
165
		pu_dir = pud_offset(pg_dir, address);
166 167
		if (pud_none(*pu_dir)) {
			address += PUD_SIZE;
H
Heiko Carstens 已提交
168
			continue;
169
		}
170 171 172 173 174
		if (pud_large(*pu_dir)) {
			pud_clear(pu_dir);
			address += PUD_SIZE;
			continue;
		}
M
Martin Schwidefsky 已提交
175
		pm_dir = pmd_offset(pu_dir, address);
176 177
		if (pmd_none(*pm_dir)) {
			address += PMD_SIZE;
H
Heiko Carstens 已提交
178
			continue;
179
		}
180
		if (pmd_large(*pm_dir)) {
181
			pmd_clear(pm_dir);
182
			address += PMD_SIZE;
183 184
			continue;
		}
H
Heiko Carstens 已提交
185
		pt_dir = pte_offset_kernel(pm_dir, address);
G
Gerald Schaefer 已提交
186
		*pt_dir = pte;
187
		address += PAGE_SIZE;
H
Heiko Carstens 已提交
188
	}
189
	flush_tlb_kernel_range(start, end);
H
Heiko Carstens 已提交
190 191 192 193 194
}

/*
 * Add a backed mem_map array to the virtual mem_map array.
 */
195
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
H
Heiko Carstens 已提交
196
{
197
	unsigned long address = start;
H
Heiko Carstens 已提交
198
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
199
	pud_t *pu_dir;
H
Heiko Carstens 已提交
200 201 202 203
	pmd_t *pm_dir;
	pte_t *pt_dir;
	int ret = -ENOMEM;

204
	for (address = start; address < end;) {
H
Heiko Carstens 已提交
205 206
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
207 208 209
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
210
			pgd_populate(&init_mm, pg_dir, pu_dir);
M
Martin Schwidefsky 已提交
211 212 213 214
		}

		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
215 216 217
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
218
			pud_populate(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
219 220
		}

M
Martin Schwidefsky 已提交
221
		pm_dir = pmd_offset(pu_dir, address);
H
Heiko Carstens 已提交
222
		if (pmd_none(*pm_dir)) {
223 224 225 226 227 228 229 230 231 232 233 234 235
#ifdef CONFIG_64BIT
			/* Use 1MB frames for vmemmap if available. We always
			 * use large frames even if they are only partially
			 * used.
			 * Otherwise we would have also page tables since
			 * vmemmap_populate gets called for each section
			 * separately. */
			if (MACHINE_HAS_EDAT1) {
				void *new_page;

				new_page = vmemmap_alloc_block(PMD_SIZE, node);
				if (!new_page)
					goto out;
236
				pmd_val(*pm_dir) = __pa(new_page) |
237 238
					_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
					_SEGMENT_ENTRY_CO;
239 240 241 242
				address = (address + PMD_SIZE) & PMD_MASK;
				continue;
			}
#endif
243
			pt_dir = vmem_pte_alloc(address);
H
Heiko Carstens 已提交
244 245
			if (!pt_dir)
				goto out;
246
			pmd_populate(&init_mm, pm_dir, pt_dir);
247 248 249
		} else if (pmd_large(*pm_dir)) {
			address = (address + PMD_SIZE) & PMD_MASK;
			continue;
H
Heiko Carstens 已提交
250 251 252 253 254 255
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
		if (pte_none(*pt_dir)) {
			unsigned long new_page;

256
			new_page =__pa(vmem_alloc_pages(0));
H
Heiko Carstens 已提交
257 258
			if (!new_page)
				goto out;
259 260
			pte_val(*pt_dir) =
				__pa(new_page) | pgprot_val(PAGE_KERNEL);
H
Heiko Carstens 已提交
261
		}
262
		address += PAGE_SIZE;
H
Heiko Carstens 已提交
263
	}
264
	memset((void *)start, 0, end - start);
H
Heiko Carstens 已提交
265 266
	ret = 0;
out:
267
	flush_tlb_kernel_range(start, end);
H
Heiko Carstens 已提交
268 269 270
	return ret;
}

271
void vmemmap_free(unsigned long start, unsigned long end)
272 273 274
{
}

H
Heiko Carstens 已提交
275 276 277 278 279 280 281 282
/*
 * Add memory segment to the segment list if it doesn't overlap with
 * an already present segment.
 */
static int insert_memory_segment(struct memory_segment *seg)
{
	struct memory_segment *tmp;

283
	if (seg->start + seg->size > VMEM_MAX_PHYS ||
H
Heiko Carstens 已提交
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
	    seg->start + seg->size < seg->start)
		return -ERANGE;

	list_for_each_entry(tmp, &mem_segs, list) {
		if (seg->start >= tmp->start + tmp->size)
			continue;
		if (seg->start + seg->size <= tmp->start)
			continue;
		return -ENOSPC;
	}
	list_add(&seg->list, &mem_segs);
	return 0;
}

/*
 * Remove memory segment from the segment list.
 */
static void remove_memory_segment(struct memory_segment *seg)
{
	list_del(&seg->list);
}

static void __remove_shared_memory(struct memory_segment *seg)
{
	remove_memory_segment(seg);
	vmem_remove_range(seg->start, seg->size);
}

312
int vmem_remove_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);

	ret = -ENOENT;
	list_for_each_entry(seg, &mem_segs, list) {
		if (seg->start == start && seg->size == size)
			break;
	}

	if (seg->start != start || seg->size != size)
		goto out;

	ret = 0;
	__remove_shared_memory(seg);
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

336
int vmem_add_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);
	ret = -ENOMEM;
	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
	if (!seg)
		goto out;
	seg->start = start;
	seg->size = size;

	ret = insert_memory_segment(seg);
	if (ret)
		goto out_free;

353
	ret = vmem_add_mem(start, size, 0);
H
Heiko Carstens 已提交
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	if (ret)
		goto out_remove;
	goto out;

out_remove:
	__remove_shared_memory(seg);
out_free:
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

/*
 * map whole physical memory to virtual memory (identity mapping)
369 370
 * we reserve enough space in the vmalloc area for vmemmap to hotplug
 * additional memory segments.
H
Heiko Carstens 已提交
371 372 373
 */
void __init vmem_map_init(void)
{
374 375
	unsigned long ro_start, ro_end;
	unsigned long start, end;
H
Heiko Carstens 已提交
376 377
	int i;

378 379
	ro_start = PFN_ALIGN((unsigned long)&_stext);
	ro_end = (unsigned long)&_eshared & PAGE_MASK;
380 381
	for (i = 0; i < MEMORY_CHUNKS; i++) {
		if (!memory_chunk[i].size)
M
Michael Holzheu 已提交
382
			continue;
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
		start = memory_chunk[i].addr;
		end = memory_chunk[i].addr + memory_chunk[i].size;
		if (start >= ro_end || end <= ro_start)
			vmem_add_mem(start, end - start, 0);
		else if (start >= ro_start && end <= ro_end)
			vmem_add_mem(start, end - start, 1);
		else if (start >= ro_start) {
			vmem_add_mem(start, ro_end - start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		} else if (end < ro_end) {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, end - ro_start, 1);
		} else {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, ro_end - ro_start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		}
	}
H
Heiko Carstens 已提交
401 402 403 404 405 406 407 408 409 410 411 412
}

/*
 * Convert memory chunk array to a memory segment list so there is a single
 * list that contains both r/w memory and shared memory segments.
 */
static int __init vmem_convert_memory_chunk(void)
{
	struct memory_segment *seg;
	int i;

	mutex_lock(&vmem_mutex);
413
	for (i = 0; i < MEMORY_CHUNKS; i++) {
H
Heiko Carstens 已提交
414 415 416 417 418 419 420 421 422 423 424 425 426 427
		if (!memory_chunk[i].size)
			continue;
		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
		if (!seg)
			panic("Out of memory...\n");
		seg->start = memory_chunk[i].addr;
		seg->size = memory_chunk[i].size;
		insert_memory_segment(seg);
	}
	mutex_unlock(&vmem_mutex);
	return 0;
}

core_initcall(vmem_convert_memory_chunk);