vmem.c 8.5 KB
Newer Older
H
Heiko Carstens 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  arch/s390/mm/vmem.c
 *
 *    Copyright IBM Corp. 2006
 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
 */

#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/list.h>
13
#include <linux/hugetlb.h>
H
Heiko Carstens 已提交
14 15 16 17
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
18
#include <asm/sections.h>
H
Heiko Carstens 已提交
19 20 21 22 23 24 25 26 27 28 29

static DEFINE_MUTEX(vmem_mutex);

struct memory_segment {
	struct list_head list;
	unsigned long start;
	unsigned long size;
};

static LIST_HEAD(mem_segs);

30 31 32 33 34 35 36 37
static void __ref *vmem_alloc_pages(unsigned int order)
{
	if (slab_is_available())
		return (void *)__get_free_pages(GFP_KERNEL, order);
	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}

static inline pud_t *vmem_pud_alloc(void)
38 39 40 41
{
	pud_t *pud = NULL;

#ifdef CONFIG_64BIT
42
	pud = vmem_alloc_pages(2);
43 44
	if (!pud)
		return NULL;
45
	clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
46 47 48
#endif
	return pud;
}
M
Martin Schwidefsky 已提交
49

50
static inline pmd_t *vmem_pmd_alloc(void)
H
Heiko Carstens 已提交
51
{
52
	pmd_t *pmd = NULL;
H
Heiko Carstens 已提交
53

54
#ifdef CONFIG_64BIT
55
	pmd = vmem_alloc_pages(2);
H
Heiko Carstens 已提交
56 57
	if (!pmd)
		return NULL;
58
	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
59
#endif
H
Heiko Carstens 已提交
60 61 62
	return pmd;
}

63
static pte_t __ref *vmem_pte_alloc(void)
H
Heiko Carstens 已提交
64
{
65
	pte_t *pte;
H
Heiko Carstens 已提交
66

67 68 69 70
	if (slab_is_available())
		pte = (pte_t *) page_table_alloc(&init_mm);
	else
		pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
71 72
	if (!pte)
		return NULL;
73 74
	clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
		    PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
75 76 77 78 79 80
	return pte;
}

/*
 * Add a physical memory range to the 1:1 mapping.
 */
81
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
H
Heiko Carstens 已提交
82 83 84
{
	unsigned long address;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
85
	pud_t *pu_dir;
H
Heiko Carstens 已提交
86 87 88 89 90 91 92 93
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;
	int ret = -ENOMEM;

	for (address = start; address < start + size; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
94 95 96 97 98 99 100 101
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
		}

		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
102 103 104
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
M
Martin Schwidefsky 已提交
105
			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
106 107
		}

108
		pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
M
Martin Schwidefsky 已提交
109
		pm_dir = pmd_offset(pu_dir, address);
110 111 112 113 114 115 116 117 118 119 120

#ifdef __s390x__
		if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
		    (address + HPAGE_SIZE <= start + size) &&
		    (address >= HPAGE_SIZE)) {
			pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
			pmd_val(*pm_dir) = pte_val(pte);
			address += HPAGE_SIZE - PAGE_SIZE;
			continue;
		}
#endif
H
Heiko Carstens 已提交
121 122 123 124 125 126 127 128
		if (pmd_none(*pm_dir)) {
			pt_dir = vmem_pte_alloc();
			if (!pt_dir)
				goto out;
			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
G
Gerald Schaefer 已提交
129
		*pt_dir = pte;
H
Heiko Carstens 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
	}
	ret = 0;
out:
	flush_tlb_kernel_range(start, start + size);
	return ret;
}

/*
 * Remove a physical memory range from the 1:1 mapping.
 * Currently only invalidates page table entries.
 */
static void vmem_remove_range(unsigned long start, unsigned long size)
{
	unsigned long address;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
145
	pud_t *pu_dir;
H
Heiko Carstens 已提交
146 147 148 149 150 151 152
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;

	pte_val(pte) = _PAGE_TYPE_EMPTY;
	for (address = start; address < start + size; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
M
Martin Schwidefsky 已提交
153 154
		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir))
H
Heiko Carstens 已提交
155
			continue;
M
Martin Schwidefsky 已提交
156
		pm_dir = pmd_offset(pu_dir, address);
H
Heiko Carstens 已提交
157 158
		if (pmd_none(*pm_dir))
			continue;
159 160 161 162 163 164 165

		if (pmd_huge(*pm_dir)) {
			pmd_clear_kernel(pm_dir);
			address += HPAGE_SIZE - PAGE_SIZE;
			continue;
		}

H
Heiko Carstens 已提交
166
		pt_dir = pte_offset_kernel(pm_dir, address);
G
Gerald Schaefer 已提交
167
		*pt_dir = pte;
H
Heiko Carstens 已提交
168 169 170 171 172 173 174
	}
	flush_tlb_kernel_range(start, start + size);
}

/*
 * Add a backed mem_map array to the virtual mem_map array.
 */
175
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
H
Heiko Carstens 已提交
176 177 178
{
	unsigned long address, start_addr, end_addr;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
179
	pud_t *pu_dir;
H
Heiko Carstens 已提交
180 181 182 183 184
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;
	int ret = -ENOMEM;

185 186
	start_addr = (unsigned long) start;
	end_addr = (unsigned long) (start + nr);
H
Heiko Carstens 已提交
187 188 189 190

	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
191 192 193 194 195 196 197 198
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
		}

		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
199 200 201
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
M
Martin Schwidefsky 已提交
202
			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
203 204
		}

M
Martin Schwidefsky 已提交
205
		pm_dir = pmd_offset(pu_dir, address);
H
Heiko Carstens 已提交
206 207 208 209 210 211 212 213 214 215 216
		if (pmd_none(*pm_dir)) {
			pt_dir = vmem_pte_alloc();
			if (!pt_dir)
				goto out;
			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
		if (pte_none(*pt_dir)) {
			unsigned long new_page;

217
			new_page =__pa(vmem_alloc_pages(0));
H
Heiko Carstens 已提交
218 219 220
			if (!new_page)
				goto out;
			pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
G
Gerald Schaefer 已提交
221
			*pt_dir = pte;
H
Heiko Carstens 已提交
222 223
		}
	}
224
	memset(start, 0, nr * sizeof(struct page));
H
Heiko Carstens 已提交
225 226 227 228 229 230 231 232 233 234 235 236 237 238
	ret = 0;
out:
	flush_tlb_kernel_range(start_addr, end_addr);
	return ret;
}

/*
 * Add memory segment to the segment list if it doesn't overlap with
 * an already present segment.
 */
static int insert_memory_segment(struct memory_segment *seg)
{
	struct memory_segment *tmp;

239
	if (seg->start + seg->size >= VMEM_MAX_PHYS ||
H
Heiko Carstens 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
	    seg->start + seg->size < seg->start)
		return -ERANGE;

	list_for_each_entry(tmp, &mem_segs, list) {
		if (seg->start >= tmp->start + tmp->size)
			continue;
		if (seg->start + seg->size <= tmp->start)
			continue;
		return -ENOSPC;
	}
	list_add(&seg->list, &mem_segs);
	return 0;
}

/*
 * Remove memory segment from the segment list.
 */
static void remove_memory_segment(struct memory_segment *seg)
{
	list_del(&seg->list);
}

static void __remove_shared_memory(struct memory_segment *seg)
{
	remove_memory_segment(seg);
	vmem_remove_range(seg->start, seg->size);
}

268
int vmem_remove_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);

	ret = -ENOENT;
	list_for_each_entry(seg, &mem_segs, list) {
		if (seg->start == start && seg->size == size)
			break;
	}

	if (seg->start != start || seg->size != size)
		goto out;

	ret = 0;
	__remove_shared_memory(seg);
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

292
int vmem_add_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);
	ret = -ENOMEM;
	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
	if (!seg)
		goto out;
	seg->start = start;
	seg->size = size;

	ret = insert_memory_segment(seg);
	if (ret)
		goto out_free;

309
	ret = vmem_add_mem(start, size, 0);
H
Heiko Carstens 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
	if (ret)
		goto out_remove;
	goto out;

out_remove:
	__remove_shared_memory(seg);
out_free:
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

/*
 * map whole physical memory to virtual memory (identity mapping)
325 326
 * we reserve enough space in the vmalloc area for vmemmap to hotplug
 * additional memory segments.
H
Heiko Carstens 已提交
327 328 329
 */
void __init vmem_map_init(void)
{
330 331
	unsigned long ro_start, ro_end;
	unsigned long start, end;
H
Heiko Carstens 已提交
332 333
	int i;

334 335 336
	INIT_LIST_HEAD(&init_mm.context.crst_list);
	INIT_LIST_HEAD(&init_mm.context.pgtable_list);
	init_mm.context.noexec = 0;
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
	ro_start = ((unsigned long)&_stext) & PAGE_MASK;
	ro_end = PFN_ALIGN((unsigned long)&_eshared);
	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
		start = memory_chunk[i].addr;
		end = memory_chunk[i].addr + memory_chunk[i].size;
		if (start >= ro_end || end <= ro_start)
			vmem_add_mem(start, end - start, 0);
		else if (start >= ro_start && end <= ro_end)
			vmem_add_mem(start, end - start, 1);
		else if (start >= ro_start) {
			vmem_add_mem(start, ro_end - start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		} else if (end < ro_end) {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, end - ro_start, 1);
		} else {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, ro_end - ro_start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		}
	}
H
Heiko Carstens 已提交
358 359 360 361 362 363 364 365 366 367 368 369
}

/*
 * Convert memory chunk array to a memory segment list so there is a single
 * list that contains both r/w memory and shared memory segments.
 */
static int __init vmem_convert_memory_chunk(void)
{
	struct memory_segment *seg;
	int i;

	mutex_lock(&vmem_mutex);
370
	for (i = 0; i < MEMORY_CHUNKS; i++) {
H
Heiko Carstens 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384
		if (!memory_chunk[i].size)
			continue;
		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
		if (!seg)
			panic("Out of memory...\n");
		seg->start = memory_chunk[i].addr;
		seg->size = memory_chunk[i].size;
		insert_memory_segment(seg);
	}
	mutex_unlock(&vmem_mutex);
	return 0;
}

core_initcall(vmem_convert_memory_chunk);