vmem.c 8.4 KB
Newer Older
H
Heiko Carstens 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  arch/s390/mm/vmem.c
 *
 *    Copyright IBM Corp. 2006
 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
 */

#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/list.h>
13
#include <linux/hugetlb.h>
14
#include <linux/slab.h>
H
Heiko Carstens 已提交
15 16 17 18
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
19
#include <asm/sections.h>
H
Heiko Carstens 已提交
20 21 22 23 24 25 26 27 28 29 30

static DEFINE_MUTEX(vmem_mutex);

struct memory_segment {
	struct list_head list;
	unsigned long start;
	unsigned long size;
};

static LIST_HEAD(mem_segs);

31 32 33 34 35 36 37 38
static void __ref *vmem_alloc_pages(unsigned int order)
{
	if (slab_is_available())
		return (void *)__get_free_pages(GFP_KERNEL, order);
	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}

static inline pud_t *vmem_pud_alloc(void)
39 40 41 42
{
	pud_t *pud = NULL;

#ifdef CONFIG_64BIT
43
	pud = vmem_alloc_pages(2);
44 45
	if (!pud)
		return NULL;
46
	clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
47 48 49
#endif
	return pud;
}
M
Martin Schwidefsky 已提交
50

51
static inline pmd_t *vmem_pmd_alloc(void)
H
Heiko Carstens 已提交
52
{
53
	pmd_t *pmd = NULL;
H
Heiko Carstens 已提交
54

55
#ifdef CONFIG_64BIT
56
	pmd = vmem_alloc_pages(2);
H
Heiko Carstens 已提交
57 58
	if (!pmd)
		return NULL;
59
	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
60
#endif
H
Heiko Carstens 已提交
61 62 63
	return pmd;
}

64
static pte_t __ref *vmem_pte_alloc(void)
H
Heiko Carstens 已提交
65
{
66
	pte_t *pte;
H
Heiko Carstens 已提交
67

68 69 70 71
	if (slab_is_available())
		pte = (pte_t *) page_table_alloc(&init_mm);
	else
		pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
72 73
	if (!pte)
		return NULL;
74 75
	clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
		    PTRS_PER_PTE * sizeof(pte_t));
H
Heiko Carstens 已提交
76 77 78 79 80 81
	return pte;
}

/*
 * Add a physical memory range to the 1:1 mapping.
 */
82
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
H
Heiko Carstens 已提交
83 84 85
{
	unsigned long address;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
86
	pud_t *pu_dir;
H
Heiko Carstens 已提交
87 88 89 90 91 92 93 94
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;
	int ret = -ENOMEM;

	for (address = start; address < start + size; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
95 96 97 98 99 100 101 102
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
		}

		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
103 104 105
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
M
Martin Schwidefsky 已提交
106
			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
107 108
		}

109
		pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
M
Martin Schwidefsky 已提交
110
		pm_dir = pmd_offset(pu_dir, address);
111 112 113 114 115

#ifdef __s390x__
		if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
		    (address + HPAGE_SIZE <= start + size) &&
		    (address >= HPAGE_SIZE)) {
116
			pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
117 118 119 120 121
			pmd_val(*pm_dir) = pte_val(pte);
			address += HPAGE_SIZE - PAGE_SIZE;
			continue;
		}
#endif
H
Heiko Carstens 已提交
122 123 124 125 126 127 128 129
		if (pmd_none(*pm_dir)) {
			pt_dir = vmem_pte_alloc();
			if (!pt_dir)
				goto out;
			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
G
Gerald Schaefer 已提交
130
		*pt_dir = pte;
H
Heiko Carstens 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
	}
	ret = 0;
out:
	flush_tlb_kernel_range(start, start + size);
	return ret;
}

/*
 * Remove a physical memory range from the 1:1 mapping.
 * Currently only invalidates page table entries.
 */
static void vmem_remove_range(unsigned long start, unsigned long size)
{
	unsigned long address;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
146
	pud_t *pu_dir;
H
Heiko Carstens 已提交
147 148 149 150 151 152 153
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;

	pte_val(pte) = _PAGE_TYPE_EMPTY;
	for (address = start; address < start + size; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
M
Martin Schwidefsky 已提交
154 155
		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir))
H
Heiko Carstens 已提交
156
			continue;
M
Martin Schwidefsky 已提交
157
		pm_dir = pmd_offset(pu_dir, address);
H
Heiko Carstens 已提交
158 159
		if (pmd_none(*pm_dir))
			continue;
160 161 162 163 164 165 166

		if (pmd_huge(*pm_dir)) {
			pmd_clear_kernel(pm_dir);
			address += HPAGE_SIZE - PAGE_SIZE;
			continue;
		}

H
Heiko Carstens 已提交
167
		pt_dir = pte_offset_kernel(pm_dir, address);
G
Gerald Schaefer 已提交
168
		*pt_dir = pte;
H
Heiko Carstens 已提交
169 170 171 172 173 174 175
	}
	flush_tlb_kernel_range(start, start + size);
}

/*
 * Add a backed mem_map array to the virtual mem_map array.
 */
176
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
H
Heiko Carstens 已提交
177 178 179
{
	unsigned long address, start_addr, end_addr;
	pgd_t *pg_dir;
M
Martin Schwidefsky 已提交
180
	pud_t *pu_dir;
H
Heiko Carstens 已提交
181 182 183 184 185
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pte_t  pte;
	int ret = -ENOMEM;

186 187
	start_addr = (unsigned long) start;
	end_addr = (unsigned long) (start + nr);
H
Heiko Carstens 已提交
188 189 190 191

	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
M
Martin Schwidefsky 已提交
192 193 194 195 196 197 198 199
			pu_dir = vmem_pud_alloc();
			if (!pu_dir)
				goto out;
			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
		}

		pu_dir = pud_offset(pg_dir, address);
		if (pud_none(*pu_dir)) {
H
Heiko Carstens 已提交
200 201 202
			pm_dir = vmem_pmd_alloc();
			if (!pm_dir)
				goto out;
M
Martin Schwidefsky 已提交
203
			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
H
Heiko Carstens 已提交
204 205
		}

M
Martin Schwidefsky 已提交
206
		pm_dir = pmd_offset(pu_dir, address);
H
Heiko Carstens 已提交
207 208 209 210 211 212 213 214 215 216 217
		if (pmd_none(*pm_dir)) {
			pt_dir = vmem_pte_alloc();
			if (!pt_dir)
				goto out;
			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
		if (pte_none(*pt_dir)) {
			unsigned long new_page;

218
			new_page =__pa(vmem_alloc_pages(0));
H
Heiko Carstens 已提交
219 220 221
			if (!new_page)
				goto out;
			pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
G
Gerald Schaefer 已提交
222
			*pt_dir = pte;
H
Heiko Carstens 已提交
223 224
		}
	}
225
	memset(start, 0, nr * sizeof(struct page));
H
Heiko Carstens 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239
	ret = 0;
out:
	flush_tlb_kernel_range(start_addr, end_addr);
	return ret;
}

/*
 * Add memory segment to the segment list if it doesn't overlap with
 * an already present segment.
 */
static int insert_memory_segment(struct memory_segment *seg)
{
	struct memory_segment *tmp;

240
	if (seg->start + seg->size > VMEM_MAX_PHYS ||
H
Heiko Carstens 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
	    seg->start + seg->size < seg->start)
		return -ERANGE;

	list_for_each_entry(tmp, &mem_segs, list) {
		if (seg->start >= tmp->start + tmp->size)
			continue;
		if (seg->start + seg->size <= tmp->start)
			continue;
		return -ENOSPC;
	}
	list_add(&seg->list, &mem_segs);
	return 0;
}

/*
 * Remove memory segment from the segment list.
 */
static void remove_memory_segment(struct memory_segment *seg)
{
	list_del(&seg->list);
}

static void __remove_shared_memory(struct memory_segment *seg)
{
	remove_memory_segment(seg);
	vmem_remove_range(seg->start, seg->size);
}

269
int vmem_remove_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);

	ret = -ENOENT;
	list_for_each_entry(seg, &mem_segs, list) {
		if (seg->start == start && seg->size == size)
			break;
	}

	if (seg->start != start || seg->size != size)
		goto out;

	ret = 0;
	__remove_shared_memory(seg);
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

293
int vmem_add_mapping(unsigned long start, unsigned long size)
H
Heiko Carstens 已提交
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
{
	struct memory_segment *seg;
	int ret;

	mutex_lock(&vmem_mutex);
	ret = -ENOMEM;
	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
	if (!seg)
		goto out;
	seg->start = start;
	seg->size = size;

	ret = insert_memory_segment(seg);
	if (ret)
		goto out_free;

310
	ret = vmem_add_mem(start, size, 0);
H
Heiko Carstens 已提交
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
	if (ret)
		goto out_remove;
	goto out;

out_remove:
	__remove_shared_memory(seg);
out_free:
	kfree(seg);
out:
	mutex_unlock(&vmem_mutex);
	return ret;
}

/*
 * map whole physical memory to virtual memory (identity mapping)
326 327
 * we reserve enough space in the vmalloc area for vmemmap to hotplug
 * additional memory segments.
H
Heiko Carstens 已提交
328 329 330
 */
void __init vmem_map_init(void)
{
331 332
	unsigned long ro_start, ro_end;
	unsigned long start, end;
H
Heiko Carstens 已提交
333 334
	int i;

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
	ro_start = ((unsigned long)&_stext) & PAGE_MASK;
	ro_end = PFN_ALIGN((unsigned long)&_eshared);
	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
		start = memory_chunk[i].addr;
		end = memory_chunk[i].addr + memory_chunk[i].size;
		if (start >= ro_end || end <= ro_start)
			vmem_add_mem(start, end - start, 0);
		else if (start >= ro_start && end <= ro_end)
			vmem_add_mem(start, end - start, 1);
		else if (start >= ro_start) {
			vmem_add_mem(start, ro_end - start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		} else if (end < ro_end) {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, end - ro_start, 1);
		} else {
			vmem_add_mem(start, ro_start - start, 0);
			vmem_add_mem(ro_start, ro_end - ro_start, 1);
			vmem_add_mem(ro_end, end - ro_end, 0);
		}
	}
H
Heiko Carstens 已提交
356 357 358 359 360 361 362 363 364 365 366 367
}

/*
 * Convert memory chunk array to a memory segment list so there is a single
 * list that contains both r/w memory and shared memory segments.
 */
static int __init vmem_convert_memory_chunk(void)
{
	struct memory_segment *seg;
	int i;

	mutex_lock(&vmem_mutex);
368
	for (i = 0; i < MEMORY_CHUNKS; i++) {
H
Heiko Carstens 已提交
369 370 371 372 373 374 375 376 377 378 379 380 381 382
		if (!memory_chunk[i].size)
			continue;
		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
		if (!seg)
			panic("Out of memory...\n");
		seg->start = memory_chunk[i].addr;
		seg->size = memory_chunk[i].size;
		insert_memory_segment(seg);
	}
	mutex_unlock(&vmem_mutex);
	return 0;
}

core_initcall(vmem_convert_memory_chunk);