vmem.c 8.7 KB
Newer Older
H
Heiko Carstens 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  arch/s390/mm/vmem.c
 *
 *    Copyright IBM Corp. 2006
 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
 */

#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/list.h>
13
#include <linux/hugetlb.h>
H
Heiko Carstens 已提交
14 15 16 17
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
18
#include <asm/sections.h>
H
Heiko Carstens 已提交
19 20 21 22 23 24 25 26 27 28 29

static DEFINE_MUTEX(vmem_mutex);

struct memory_segment {
	struct list_head list;
	unsigned long start;
	unsigned long size;
};

static LIST_HEAD(mem_segs);

30 31 32 33 34 35 36 37
static void __ref *vmem_alloc_pages(unsigned int order)
{
	if (slab_is_available())
		return (void *)__get_free_pages(GFP_KERNEL, order);
	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}

static inline pud_t *vmem_pud_alloc(void)
38 39 40 41
{
	pud_t *pud = NULL;

#ifdef CONFIG_64BIT
42
	pud = vmem_alloc_pages(2);
43 44
	if (!pud)
		return NULL;
45
	clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
46 47 48
#endif
	return pud;
}
M
Martin Schwidefsky 已提交
49

50
static inline pmd_t *vmem_pmd_alloc(void)
H
Heiko Carstens 已提交
51
{
52
	pmd_t *pmd = NULL;
H
Heiko Carstens 已提交
53

54
#ifdef CONFIG_64BIT
55
	pmd = vmem_alloc_pages(2);
H
Heiko Carstens 已提交
56 57
	if (!pmd)
		return NULL;
58
	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
59
#endif
H
Heiko Carstens 已提交
60 61 62
	return pmd;
}

63
static pte_t __ref *vmem_pte_alloc(void)
H
Heiko Carstens 已提交
64
{
65
	pte_t *pte;
H
Heiko Carstens 已提交
66

67 68 69 70
	if (slab_is_available())
		pte = (pte_t *) page_table_alloc(&init_mm);
	else
		pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
71 72
	if (!pte)
		return NULL;
73 74 75 76 77 78
	if (MACHINE_HAS_HPAGE)
		clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
			    PTRS_PER_PTE * sizeof(pte_t));
	else
		clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
			    PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
79 80 81 82 83 84
	return pte;
}

/*
 * Add a physical memory range to the 1:1 mapping.
 */
85
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
H
Heiko Carstens 已提交
86 87 88
{
	unsigned long address;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
89
	pud_t *pu_dir;
H
Heiko Carstens 已提交
90 91 92 93 94 95 96 97
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;
	int ret = -ENOMEM;

	for (address = start; address < start + size; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
98 99 100 101 102 103 104 105
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
		}

		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
106 107 108
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
M
Martin Schwidefsky 已提交
109
			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
110 111
		}

112
		pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
M
Martin Schwidefsky 已提交
113
		pm_dir = pmd_offset(pu_dir, address);
114 115 116 117 118

#ifdef __s390x__
		if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
		    (address + HPAGE_SIZE <= start + size) &&
		    (address >= HPAGE_SIZE)) {
119 120
			pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
					_SEGMENT_ENTRY_CO;
121 122 123 124 125
			pmd_val(*pm_dir) = pte_val(pte);
			address += HPAGE_SIZE - PAGE_SIZE;
			continue;
		}
#endif
H
Heiko Carstens 已提交
126 127 128 129 130 131 132 133
		if (pmd_none(*pm_dir)) {
			pt_dir = vmem_pte_alloc();
			if (!pt_dir)
				goto out;
			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
G
Gerald Schaefer 已提交
134
		*pt_dir = pte;
H
Heiko Carstens 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
	}
	ret = 0;
out:
	flush_tlb_kernel_range(start, start + size);
	return ret;
}

/*
 * Remove a physical memory range from the 1:1 mapping.
 * Currently only invalidates page table entries.
 */
static void vmem_remove_range(unsigned long start, unsigned long size)
{
	unsigned long address;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
150
	pud_t *pu_dir;
H
Heiko Carstens 已提交
151 152 153 154 155 156 157
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;

	pte_val(pte) = _PAGE_TYPE_EMPTY;
	for (address = start; address < start + size; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
M
Martin Schwidefsky 已提交
158 159
		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir))
H
Heiko Carstens 已提交
160
			continue;
M
Martin Schwidefsky 已提交
161
		pm_dir = pmd_offset(pu_dir, address);
H
Heiko Carstens 已提交
162 163
		if (pmd_none(*pm_dir))
			continue;
164 165 166 167 168 169 170

		if (pmd_huge(*pm_dir)) {
			pmd_clear_kernel(pm_dir);
			address += HPAGE_SIZE - PAGE_SIZE;
			continue;
		}

H
Heiko Carstens 已提交
171
		pt_dir = pte_offset_kernel(pm_dir, address);
G
Gerald Schaefer 已提交
172
		*pt_dir = pte;
H
Heiko Carstens 已提交
173 174 175 176 177 178 179
	}
	flush_tlb_kernel_range(start, start + size);
}

/*
 * Add a backed mem_map array to the virtual mem_map array.
 */
180
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
H
Heiko Carstens 已提交
181 182 183
{
	unsigned long address, start_addr, end_addr;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
184
	pud_t *pu_dir;
H
Heiko Carstens 已提交
185 186 187 188 189
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;
	int ret = -ENOMEM;

190 191
	start_addr = (unsigned long) start;
	end_addr = (unsigned long) (start + nr);
H
Heiko Carstens 已提交
192 193 194 195

	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
196 197 198 199 200 201 202 203
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
		}

		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
204 205 206
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
M
Martin Schwidefsky 已提交
207
			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
208 209
		}

M
Martin Schwidefsky 已提交
210
		pm_dir = pmd_offset(pu_dir, address);
H
Heiko Carstens 已提交
211 212 213 214 215 216 217 218 219 220 221
		if (pmd_none(*pm_dir)) {
			pt_dir = vmem_pte_alloc();
			if (!pt_dir)
				goto out;
			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
		if (pte_none(*pt_dir)) {
			unsigned long new_page;

222
			new_page =__pa(vmem_alloc_pages(0));
H
Heiko Carstens 已提交
223 224 225
			if (!new_page)
				goto out;
			pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
G
Gerald Schaefer 已提交
226
			*pt_dir = pte;
H
Heiko Carstens 已提交
227 228
		}
	}
229
	memset(start, 0, nr * sizeof(struct page));
H
Heiko Carstens 已提交
230 231 232 233 234 235 236 237 238 239 240 241 242 243
	ret = 0;
out:
	flush_tlb_kernel_range(start_addr, end_addr);
	return ret;
}

/*
 * Add memory segment to the segment list if it doesn't overlap with
 * an already present segment.
 */
static int insert_memory_segment(struct memory_segment *seg)
{
	struct memory_segment *tmp;

244
	if (seg->start + seg->size > VMEM_MAX_PHYS ||
H
Heiko Carstens 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
	    seg->start + seg->size < seg->start)
		return -ERANGE;

	list_for_each_entry(tmp, &mem_segs, list) {
		if (seg->start >= tmp->start + tmp->size)
			continue;
		if (seg->start + seg->size <= tmp->start)
			continue;
		return -ENOSPC;
	}
	list_add(&seg->list, &mem_segs);
	return 0;
}

/*
 * Remove memory segment from the segment list.
 */
static void remove_memory_segment(struct memory_segment *seg)
{
	list_del(&seg->list);
}

static void __remove_shared_memory(struct memory_segment *seg)
{
	remove_memory_segment(seg);
	vmem_remove_range(seg->start, seg->size);
}

273
int vmem_remove_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);

	ret = -ENOENT;
	list_for_each_entry(seg, &mem_segs, list) {
		if (seg->start == start && seg->size == size)
			break;
	}

	if (seg->start != start || seg->size != size)
		goto out;

	ret = 0;
	__remove_shared_memory(seg);
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

297
int vmem_add_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);
	ret = -ENOMEM;
	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
	if (!seg)
		goto out;
	seg->start = start;
	seg->size = size;

	ret = insert_memory_segment(seg);
	if (ret)
		goto out_free;

314
	ret = vmem_add_mem(start, size, 0);
H
Heiko Carstens 已提交
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	if (ret)
		goto out_remove;
	goto out;

out_remove:
	__remove_shared_memory(seg);
out_free:
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

/*
 * map whole physical memory to virtual memory (identity mapping)
330 331
 * we reserve enough space in the vmalloc area for vmemmap to hotplug
 * additional memory segments.
H
Heiko Carstens 已提交
332 333 334
 */
void __init vmem_map_init(void)
{
335 336
	unsigned long ro_start, ro_end;
	unsigned long start, end;
H
Heiko Carstens 已提交
337 338
	int i;

339
	spin_lock_init(&init_mm.context.list_lock);
340 341 342
	INIT_LIST_HEAD(&init_mm.context.crst_list);
	INIT_LIST_HEAD(&init_mm.context.pgtable_list);
	init_mm.context.noexec = 0;
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
	ro_start = ((unsigned long)&_stext) & PAGE_MASK;
	ro_end = PFN_ALIGN((unsigned long)&_eshared);
	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
		start = memory_chunk[i].addr;
		end = memory_chunk[i].addr + memory_chunk[i].size;
		if (start >= ro_end || end <= ro_start)
			vmem_add_mem(start, end - start, 0);
		else if (start >= ro_start && end <= ro_end)
			vmem_add_mem(start, end - start, 1);
		else if (start >= ro_start) {
			vmem_add_mem(start, ro_end - start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		} else if (end < ro_end) {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, end - ro_start, 1);
		} else {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, ro_end - ro_start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		}
	}
H
Heiko Carstens 已提交
364 365 366 367 368 369 370 371 372 373 374 375
}

/*
 * Convert memory chunk array to a memory segment list so there is a single
 * list that contains both r/w memory and shared memory segments.
 */
static int __init vmem_convert_memory_chunk(void)
{
	struct memory_segment *seg;
	int i;

	mutex_lock(&vmem_mutex);
376
	for (i = 0; i < MEMORY_CHUNKS; i++) {
H
Heiko Carstens 已提交
377 378 379 380 381 382 383 384 385 386 387 388 389 390
		if (!memory_chunk[i].size)
			continue;
		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
		if (!seg)
			panic("Out of memory...\n");
		seg->start = memory_chunk[i].addr;
		seg->size = memory_chunk[i].size;
		insert_memory_segment(seg);
	}
	mutex_unlock(&vmem_mutex);
	return 0;
}

core_initcall(vmem_convert_memory_chunk);