vmem.c 9.1 KB
Newer Older
H
Heiko Carstens 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *    Copyright IBM Corp. 2006
 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
 */

#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/list.h>
11
#include <linux/hugetlb.h>
12
#include <linux/slab.h>
H
Heiko Carstens 已提交
13 14 15 16
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
17
#include <asm/sections.h>
H
Heiko Carstens 已提交
18 19 20 21 22 23 24 25 26 27 28

static DEFINE_MUTEX(vmem_mutex);

struct memory_segment {
	struct list_head list;
	unsigned long start;
	unsigned long size;
};

static LIST_HEAD(mem_segs);

29 30 31 32 33 34 35 36
static void __ref *vmem_alloc_pages(unsigned int order)
{
	if (slab_is_available())
		return (void *)__get_free_pages(GFP_KERNEL, order);
	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}

static inline pud_t *vmem_pud_alloc(void)
37 38 39 40
{
	pud_t *pud = NULL;

#ifdef CONFIG_64BIT
41
	pud = vmem_alloc_pages(2);
42 43
	if (!pud)
		return NULL;
44
	clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
45 46 47
#endif
	return pud;
}
M
Martin Schwidefsky 已提交
48

49
static inline pmd_t *vmem_pmd_alloc(void)
H
Heiko Carstens 已提交
50
{
51
	pmd_t *pmd = NULL;
H
Heiko Carstens 已提交
52

53
#ifdef CONFIG_64BIT
54
	pmd = vmem_alloc_pages(2);
H
Heiko Carstens 已提交
55 56
	if (!pmd)
		return NULL;
57
	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
58
#endif
H
Heiko Carstens 已提交
59 60 61
	return pmd;
}

62
static pte_t __ref *vmem_pte_alloc(unsigned long address)
H
Heiko Carstens 已提交
63
{
64
	pte_t *pte;
H
Heiko Carstens 已提交
65

66
	if (slab_is_available())
67
		pte = (pte_t *) page_table_alloc(&init_mm, address);
68 69
	else
		pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
70 71
	if (!pte)
		return NULL;
72 73
	clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
		    PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
74 75 76 77 78 79
	return pte;
}

/*
 * Add a physical memory range to the 1:1 mapping.
 */
80
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
H
Heiko Carstens 已提交
81
{
82 83
	unsigned long end = start + size;
	unsigned long address = start;
H
Heiko Carstens 已提交
84
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
85
	pud_t *pu_dir;
H
Heiko Carstens 已提交
86 87 88 89 90
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;
	int ret = -ENOMEM;

91
	while (address < end) {
92
		pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
H
Heiko Carstens 已提交
93 94
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
95 96 97
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
98
			pgd_populate(&init_mm, pg_dir, pu_dir);
M
Martin Schwidefsky 已提交
99 100
		}
		pu_dir = pud_offset(pg_dir, address);
101 102 103 104 105 106 107 108 109 110
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
		if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
		    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
			pte_val(pte) |= _REGION3_ENTRY_LARGE;
			pte_val(pte) |= _REGION_ENTRY_TYPE_R3;
			pud_val(*pu_dir) = pte_val(pte);
			address += PUD_SIZE;
			continue;
		}
#endif
M
Martin Schwidefsky 已提交
111
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
112 113 114
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
115
			pud_populate(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
116
		}
M
Martin Schwidefsky 已提交
117
		pm_dir = pmd_offset(pu_dir, address);
118
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
119 120
		if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
		    !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
121
			pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
122
			pmd_val(*pm_dir) = pte_val(pte);
123
			address += PMD_SIZE;
124 125 126
			continue;
		}
#endif
H
Heiko Carstens 已提交
127
		if (pmd_none(*pm_dir)) {
128
			pt_dir = vmem_pte_alloc(address);
H
Heiko Carstens 已提交
129 130
			if (!pt_dir)
				goto out;
131
			pmd_populate(&init_mm, pm_dir, pt_dir);
H
Heiko Carstens 已提交
132 133 134
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
G
Gerald Schaefer 已提交
135
		*pt_dir = pte;
136
		address += PAGE_SIZE;
H
Heiko Carstens 已提交
137 138 139
	}
	ret = 0;
out:
140
	flush_tlb_kernel_range(start, end);
H
Heiko Carstens 已提交
141 142 143 144 145 146 147 148 149
	return ret;
}

/*
 * Remove a physical memory range from the 1:1 mapping.
 * Currently only invalidates page table entries.
 */
static void vmem_remove_range(unsigned long start, unsigned long size)
{
150 151
	unsigned long end = start + size;
	unsigned long address = start;
H
Heiko Carstens 已提交
152
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
153
	pud_t *pu_dir;
H
Heiko Carstens 已提交
154 155 156 157 158
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;

	pte_val(pte) = _PAGE_TYPE_EMPTY;
159
	while (address < end) {
H
Heiko Carstens 已提交
160
		pg_dir = pgd_offset_k(address);
161 162 163 164
		if (pgd_none(*pg_dir)) {
			address += PGDIR_SIZE;
			continue;
		}
M
Martin Schwidefsky 已提交
165
		pu_dir = pud_offset(pg_dir, address);
166 167
		if (pud_none(*pu_dir)) {
			address += PUD_SIZE;
H
Heiko Carstens 已提交
168
			continue;
169
		}
170 171 172 173 174
		if (pud_large(*pu_dir)) {
			pud_clear(pu_dir);
			address += PUD_SIZE;
			continue;
		}
M
Martin Schwidefsky 已提交
175
		pm_dir = pmd_offset(pu_dir, address);
176 177
		if (pmd_none(*pm_dir)) {
			address += PMD_SIZE;
H
Heiko Carstens 已提交
178
			continue;
179
		}
180
		if (pmd_large(*pm_dir)) {
181
			pmd_clear(pm_dir);
182
			address += PMD_SIZE;
183 184
			continue;
		}
H
Heiko Carstens 已提交
185
		pt_dir = pte_offset_kernel(pm_dir, address);
G
Gerald Schaefer 已提交
186
		*pt_dir = pte;
187
		address += PAGE_SIZE;
H
Heiko Carstens 已提交
188
	}
189
	flush_tlb_kernel_range(start, end);
H
Heiko Carstens 已提交
190 191 192 193 194
}

/*
 * Add a backed mem_map array to the virtual mem_map array.
 */
195
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
H
Heiko Carstens 已提交
196 197 198
{
	unsigned long address, start_addr, end_addr;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
199
	pud_t *pu_dir;
H
Heiko Carstens 已提交
200 201 202 203 204
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;
	int ret = -ENOMEM;

205 206
	start_addr = (unsigned long) start;
	end_addr = (unsigned long) (start + nr);
H
Heiko Carstens 已提交
207 208 209 210

	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
211 212 213
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
214
			pgd_populate(&init_mm, pg_dir, pu_dir);
M
Martin Schwidefsky 已提交
215 216 217 218
		}

		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
219 220 221
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
222
			pud_populate(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
223 224
		}

M
Martin Schwidefsky 已提交
225
		pm_dir = pmd_offset(pu_dir, address);
H
Heiko Carstens 已提交
226
		if (pmd_none(*pm_dir)) {
227
			pt_dir = vmem_pte_alloc(address);
H
Heiko Carstens 已提交
228 229
			if (!pt_dir)
				goto out;
230
			pmd_populate(&init_mm, pm_dir, pt_dir);
H
Heiko Carstens 已提交
231 232 233 234 235 236
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
		if (pte_none(*pt_dir)) {
			unsigned long new_page;

237
			new_page =__pa(vmem_alloc_pages(0));
H
Heiko Carstens 已提交
238 239 240
			if (!new_page)
				goto out;
			pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
G
Gerald Schaefer 已提交
241
			*pt_dir = pte;
H
Heiko Carstens 已提交
242 243
		}
	}
244
	memset(start, 0, nr * sizeof(struct page));
H
Heiko Carstens 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258
	ret = 0;
out:
	flush_tlb_kernel_range(start_addr, end_addr);
	return ret;
}

/*
 * Add memory segment to the segment list if it doesn't overlap with
 * an already present segment.
 */
static int insert_memory_segment(struct memory_segment *seg)
{
	struct memory_segment *tmp;

259
	if (seg->start + seg->size > VMEM_MAX_PHYS ||
H
Heiko Carstens 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
	    seg->start + seg->size < seg->start)
		return -ERANGE;

	list_for_each_entry(tmp, &mem_segs, list) {
		if (seg->start >= tmp->start + tmp->size)
			continue;
		if (seg->start + seg->size <= tmp->start)
			continue;
		return -ENOSPC;
	}
	list_add(&seg->list, &mem_segs);
	return 0;
}

/*
 * Remove memory segment from the segment list.
 */
static void remove_memory_segment(struct memory_segment *seg)
{
	list_del(&seg->list);
}

static void __remove_shared_memory(struct memory_segment *seg)
{
	remove_memory_segment(seg);
	vmem_remove_range(seg->start, seg->size);
}

288
int vmem_remove_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);

	ret = -ENOENT;
	list_for_each_entry(seg, &mem_segs, list) {
		if (seg->start == start && seg->size == size)
			break;
	}

	if (seg->start != start || seg->size != size)
		goto out;

	ret = 0;
	__remove_shared_memory(seg);
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

312
int vmem_add_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);
	ret = -ENOMEM;
	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
	if (!seg)
		goto out;
	seg->start = start;
	seg->size = size;

	ret = insert_memory_segment(seg);
	if (ret)
		goto out_free;

329
	ret = vmem_add_mem(start, size, 0);
H
Heiko Carstens 已提交
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
	if (ret)
		goto out_remove;
	goto out;

out_remove:
	__remove_shared_memory(seg);
out_free:
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

/*
 * map whole physical memory to virtual memory (identity mapping)
345 346
 * we reserve enough space in the vmalloc area for vmemmap to hotplug
 * additional memory segments.
H
Heiko Carstens 已提交
347 348 349
 */
void __init vmem_map_init(void)
{
350 351
	unsigned long ro_start, ro_end;
	unsigned long start, end;
H
Heiko Carstens 已提交
352 353
	int i;

354 355
	ro_start = PFN_ALIGN((unsigned long)&_stext);
	ro_end = (unsigned long)&_eshared & PAGE_MASK;
356
	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
M
Michael Holzheu 已提交
357 358 359
		if (memory_chunk[i].type == CHUNK_CRASHK ||
		    memory_chunk[i].type == CHUNK_OLDMEM)
			continue;
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
		start = memory_chunk[i].addr;
		end = memory_chunk[i].addr + memory_chunk[i].size;
		if (start >= ro_end || end <= ro_start)
			vmem_add_mem(start, end - start, 0);
		else if (start >= ro_start && end <= ro_end)
			vmem_add_mem(start, end - start, 1);
		else if (start >= ro_start) {
			vmem_add_mem(start, ro_end - start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		} else if (end < ro_end) {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, end - ro_start, 1);
		} else {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, ro_end - ro_start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		}
	}
H
Heiko Carstens 已提交
378 379 380 381 382 383 384 385 386 387 388 389
}

/*
 * Convert memory chunk array to a memory segment list so there is a single
 * list that contains both r/w memory and shared memory segments.
 */
static int __init vmem_convert_memory_chunk(void)
{
	struct memory_segment *seg;
	int i;

	mutex_lock(&vmem_mutex);
390
	for (i = 0; i < MEMORY_CHUNKS; i++) {
H
Heiko Carstens 已提交
391 392
		if (!memory_chunk[i].size)
			continue;
M
Michael Holzheu 已提交
393 394 395
		if (memory_chunk[i].type == CHUNK_CRASHK ||
		    memory_chunk[i].type == CHUNK_OLDMEM)
			continue;
H
Heiko Carstens 已提交
396 397 398 399 400 401 402 403 404 405 406 407
		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
		if (!seg)
			panic("Out of memory...\n");
		seg->start = memory_chunk[i].addr;
		seg->size = memory_chunk[i].size;
		insert_memory_segment(seg);
	}
	mutex_unlock(&vmem_mutex);
	return 0;
}

core_initcall(vmem_convert_memory_chunk);