vmem.c 8.7 KB
Newer Older
H
Heiko Carstens 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  arch/s390/mm/vmem.c
 *
 *    Copyright IBM Corp. 2006
 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
 */

#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/list.h>
13
#include <linux/hugetlb.h>
14
#include <linux/slab.h>
H
Heiko Carstens 已提交
15 16 17 18
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
19
#include <asm/sections.h>
H
Heiko Carstens 已提交
20 21 22 23 24 25 26 27 28 29 30

static DEFINE_MUTEX(vmem_mutex);

struct memory_segment {
	struct list_head list;
	unsigned long start;
	unsigned long size;
};

static LIST_HEAD(mem_segs);

31 32 33 34 35 36 37 38
static void __ref *vmem_alloc_pages(unsigned int order)
{
	if (slab_is_available())
		return (void *)__get_free_pages(GFP_KERNEL, order);
	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}

static inline pud_t *vmem_pud_alloc(void)
39 40 41 42
{
	pud_t *pud = NULL;

#ifdef CONFIG_64BIT
43
	pud = vmem_alloc_pages(2);
44 45
	if (!pud)
		return NULL;
46
	clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
47 48 49
#endif
	return pud;
}
M
Martin Schwidefsky 已提交
50

51
static inline pmd_t *vmem_pmd_alloc(void)
H
Heiko Carstens 已提交
52
{
53
	pmd_t *pmd = NULL;
H
Heiko Carstens 已提交
54

55
#ifdef CONFIG_64BIT
56
	pmd = vmem_alloc_pages(2);
H
Heiko Carstens 已提交
57 58
	if (!pmd)
		return NULL;
59
	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
60
#endif
H
Heiko Carstens 已提交
61 62 63
	return pmd;
}

64
static pte_t __ref *vmem_pte_alloc(void)
H
Heiko Carstens 已提交
65
{
66
	pte_t *pte;
H
Heiko Carstens 已提交
67

68 69 70 71
	if (slab_is_available())
		pte = (pte_t *) page_table_alloc(&init_mm);
	else
		pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
72 73
	if (!pte)
		return NULL;
74 75 76 77 78 79
	if (MACHINE_HAS_HPAGE)
		clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
			    PTRS_PER_PTE * sizeof(pte_t));
	else
		clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
			    PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
80 81 82 83 84 85
	return pte;
}

/*
 * Add a physical memory range to the 1:1 mapping.
 */
86
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
H
Heiko Carstens 已提交
87 88 89
{
	unsigned long address;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
90
	pud_t *pu_dir;
H
Heiko Carstens 已提交
91 92 93 94 95 96 97 98
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;
	int ret = -ENOMEM;

	for (address = start; address < start + size; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
99 100 101 102 103 104 105 106
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
		}

		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
107 108 109
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
M
Martin Schwidefsky 已提交
110
			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
111 112
		}

113
		pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
M
Martin Schwidefsky 已提交
114
		pm_dir = pmd_offset(pu_dir, address);
115 116 117 118 119

#ifdef __s390x__
		if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
		    (address + HPAGE_SIZE <= start + size) &&
		    (address >= HPAGE_SIZE)) {
120 121
			pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
					_SEGMENT_ENTRY_CO;
122 123 124 125 126
			pmd_val(*pm_dir) = pte_val(pte);
			address += HPAGE_SIZE - PAGE_SIZE;
			continue;
		}
#endif
H
Heiko Carstens 已提交
127 128 129 130 131 132 133 134
		if (pmd_none(*pm_dir)) {
			pt_dir = vmem_pte_alloc();
			if (!pt_dir)
				goto out;
			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
G
Gerald Schaefer 已提交
135
		*pt_dir = pte;
H
Heiko Carstens 已提交
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
	}
	ret = 0;
out:
	flush_tlb_kernel_range(start, start + size);
	return ret;
}

/*
 * Remove a physical memory range from the 1:1 mapping.
 * Currently only invalidates page table entries.
 */
static void vmem_remove_range(unsigned long start, unsigned long size)
{
	unsigned long address;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
151
	pud_t *pu_dir;
H
Heiko Carstens 已提交
152 153 154 155 156 157 158
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;

	pte_val(pte) = _PAGE_TYPE_EMPTY;
	for (address = start; address < start + size; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
M
Martin Schwidefsky 已提交
159 160
		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir))
H
Heiko Carstens 已提交
161
			continue;
M
Martin Schwidefsky 已提交
162
		pm_dir = pmd_offset(pu_dir, address);
H
Heiko Carstens 已提交
163 164
		if (pmd_none(*pm_dir))
			continue;
165 166 167 168 169 170 171

		if (pmd_huge(*pm_dir)) {
			pmd_clear_kernel(pm_dir);
			address += HPAGE_SIZE - PAGE_SIZE;
			continue;
		}

H
Heiko Carstens 已提交
172
		pt_dir = pte_offset_kernel(pm_dir, address);
G
Gerald Schaefer 已提交
173
		*pt_dir = pte;
H
Heiko Carstens 已提交
174 175 176 177 178 179 180
	}
	flush_tlb_kernel_range(start, start + size);
}

/*
 * Add a backed mem_map array to the virtual mem_map array.
 */
181
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
H
Heiko Carstens 已提交
182 183 184
{
	unsigned long address, start_addr, end_addr;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
185
	pud_t *pu_dir;
H
Heiko Carstens 已提交
186 187 188 189 190
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;
	int ret = -ENOMEM;

191 192
	start_addr = (unsigned long) start;
	end_addr = (unsigned long) (start + nr);
H
Heiko Carstens 已提交
193 194 195 196

	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
197 198 199 200 201 202 203 204
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
		}

		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
205 206 207
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
M
Martin Schwidefsky 已提交
208
			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
209 210
		}

M
Martin Schwidefsky 已提交
211
		pm_dir = pmd_offset(pu_dir, address);
H
Heiko Carstens 已提交
212 213 214 215 216 217 218 219 220 221 222
		if (pmd_none(*pm_dir)) {
			pt_dir = vmem_pte_alloc();
			if (!pt_dir)
				goto out;
			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
		if (pte_none(*pt_dir)) {
			unsigned long new_page;

223
			new_page =__pa(vmem_alloc_pages(0));
H
Heiko Carstens 已提交
224 225 226
			if (!new_page)
				goto out;
			pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
G
Gerald Schaefer 已提交
227
			*pt_dir = pte;
H
Heiko Carstens 已提交
228 229
		}
	}
230
	memset(start, 0, nr * sizeof(struct page));
H
Heiko Carstens 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244
	ret = 0;
out:
	flush_tlb_kernel_range(start_addr, end_addr);
	return ret;
}

/*
 * Add memory segment to the segment list if it doesn't overlap with
 * an already present segment.
 */
static int insert_memory_segment(struct memory_segment *seg)
{
	struct memory_segment *tmp;

245
	if (seg->start + seg->size > VMEM_MAX_PHYS ||
H
Heiko Carstens 已提交
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
	    seg->start + seg->size < seg->start)
		return -ERANGE;

	list_for_each_entry(tmp, &mem_segs, list) {
		if (seg->start >= tmp->start + tmp->size)
			continue;
		if (seg->start + seg->size <= tmp->start)
			continue;
		return -ENOSPC;
	}
	list_add(&seg->list, &mem_segs);
	return 0;
}

/*
 * Remove memory segment from the segment list.
 */
static void remove_memory_segment(struct memory_segment *seg)
{
	list_del(&seg->list);
}

static void __remove_shared_memory(struct memory_segment *seg)
{
	remove_memory_segment(seg);
	vmem_remove_range(seg->start, seg->size);
}

274
int vmem_remove_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);

	ret = -ENOENT;
	list_for_each_entry(seg, &mem_segs, list) {
		if (seg->start == start && seg->size == size)
			break;
	}

	if (seg->start != start || seg->size != size)
		goto out;

	ret = 0;
	__remove_shared_memory(seg);
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

298
int vmem_add_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);
	ret = -ENOMEM;
	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
	if (!seg)
		goto out;
	seg->start = start;
	seg->size = size;

	ret = insert_memory_segment(seg);
	if (ret)
		goto out_free;

315
	ret = vmem_add_mem(start, size, 0);
H
Heiko Carstens 已提交
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
	if (ret)
		goto out_remove;
	goto out;

out_remove:
	__remove_shared_memory(seg);
out_free:
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

/*
 * map whole physical memory to virtual memory (identity mapping)
331 332
 * we reserve enough space in the vmalloc area for vmemmap to hotplug
 * additional memory segments.
H
Heiko Carstens 已提交
333 334 335
 */
void __init vmem_map_init(void)
{
336 337
	unsigned long ro_start, ro_end;
	unsigned long start, end;
H
Heiko Carstens 已提交
338 339
	int i;

340
	spin_lock_init(&init_mm.context.list_lock);
341 342 343
	INIT_LIST_HEAD(&init_mm.context.crst_list);
	INIT_LIST_HEAD(&init_mm.context.pgtable_list);
	init_mm.context.noexec = 0;
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
	ro_start = ((unsigned long)&_stext) & PAGE_MASK;
	ro_end = PFN_ALIGN((unsigned long)&_eshared);
	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
		start = memory_chunk[i].addr;
		end = memory_chunk[i].addr + memory_chunk[i].size;
		if (start >= ro_end || end <= ro_start)
			vmem_add_mem(start, end - start, 0);
		else if (start >= ro_start && end <= ro_end)
			vmem_add_mem(start, end - start, 1);
		else if (start >= ro_start) {
			vmem_add_mem(start, ro_end - start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		} else if (end < ro_end) {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, end - ro_start, 1);
		} else {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, ro_end - ro_start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		}
	}
H
Heiko Carstens 已提交
365 366 367 368 369 370 371 372 373 374 375 376
}

/*
 * Convert memory chunk array to a memory segment list so there is a single
 * list that contains both r/w memory and shared memory segments.
 */
static int __init vmem_convert_memory_chunk(void)
{
	struct memory_segment *seg;
	int i;

	mutex_lock(&vmem_mutex);
377
	for (i = 0; i < MEMORY_CHUNKS; i++) {
H
Heiko Carstens 已提交
378 379 380 381 382 383 384 385 386 387 388 389 390 391
		if (!memory_chunk[i].size)
			continue;
		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
		if (!seg)
			panic("Out of memory...\n");
		seg->start = memory_chunk[i].addr;
		seg->size = memory_chunk[i].size;
		insert_memory_segment(seg);
	}
	mutex_unlock(&vmem_mutex);
	return 0;
}

core_initcall(vmem_convert_memory_chunk);