vmalloc.c 68.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *  linux/mm/vmalloc.c
 *
 *  Copyright (C) 1993  Linus Torvalds
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
C
Christoph Lameter 已提交
8
 *  Numa awareness, Christoph Lameter, SGI, June 2005
L
Linus Torvalds 已提交
9 10
 */

N
Nick Piggin 已提交
11
#include <linux/vmalloc.h>
L
Linus Torvalds 已提交
12 13 14
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/highmem.h>
15
#include <linux/sched.h>
L
Linus Torvalds 已提交
16 17 18
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
19
#include <linux/proc_fs.h>
20
#include <linux/seq_file.h>
21
#include <linux/debugobjects.h>
22
#include <linux/kallsyms.h>
N
Nick Piggin 已提交
23
#include <linux/list.h>
24
#include <linux/notifier.h>
N
Nick Piggin 已提交
25 26 27
#include <linux/rbtree.h>
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
28
#include <linux/pfn.h>
29
#include <linux/kmemleak.h>
A
Arun Sharma 已提交
30
#include <linux/atomic.h>
31
#include <linux/compiler.h>
32
#include <linux/llist.h>
33
#include <linux/bitops.h>
34

L
Linus Torvalds 已提交
35 36
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
37
#include <asm/shmparam.h>
L
Linus Torvalds 已提交
38

39 40
#include "internal.h"

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
struct vfree_deferred {
	struct llist_head list;
	struct work_struct wq;
};
static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);

static void __vunmap(const void *, int);

static void free_work(struct work_struct *w)
{
	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
	struct llist_node *llnode = llist_del_all(&p->list);
	while (llnode) {
		void *p = llnode;
		llnode = llist_next(llnode);
		__vunmap(p, 1);
	}
}

N
Nick Piggin 已提交
60
/*** Page table manipulation functions ***/
A
Adrian Bunk 已提交
61

L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70 71 72
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
	pte_t *pte;

	pte = pte_offset_kernel(pmd, addr);
	do {
		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
	} while (pte++, addr += PAGE_SIZE, addr != end);
}

N
Nick Piggin 已提交
73
static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
L
Linus Torvalds 已提交
74 75 76 77 78 79 80
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
81 82
		if (pmd_clear_huge(pmd))
			continue;
L
Linus Torvalds 已提交
83 84 85 86 87 88
		if (pmd_none_or_clear_bad(pmd))
			continue;
		vunmap_pte_range(pmd, addr, next);
	} while (pmd++, addr = next, addr != end);
}

N
Nick Piggin 已提交
89
static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
L
Linus Torvalds 已提交
90 91 92 93 94 95 96
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
97 98
		if (pud_clear_huge(pud))
			continue;
L
Linus Torvalds 已提交
99 100 101 102 103 104
		if (pud_none_or_clear_bad(pud))
			continue;
		vunmap_pmd_range(pud, addr, next);
	} while (pud++, addr = next, addr != end);
}

N
Nick Piggin 已提交
105
static void vunmap_page_range(unsigned long addr, unsigned long end)
L
Linus Torvalds 已提交
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
{
	pgd_t *pgd;
	unsigned long next;

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
		vunmap_pud_range(pgd, addr, next);
	} while (pgd++, addr = next, addr != end);
}

static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
N
Nick Piggin 已提交
121
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
L
Linus Torvalds 已提交
122 123 124
{
	pte_t *pte;

N
Nick Piggin 已提交
125 126 127 128 129
	/*
	 * nr is a running index into the array which helps higher level
	 * callers keep track of where we're up to.
	 */

H
Hugh Dickins 已提交
130
	pte = pte_alloc_kernel(pmd, addr);
L
Linus Torvalds 已提交
131 132 133
	if (!pte)
		return -ENOMEM;
	do {
N
Nick Piggin 已提交
134 135 136 137 138
		struct page *page = pages[*nr];

		if (WARN_ON(!pte_none(*pte)))
			return -EBUSY;
		if (WARN_ON(!page))
L
Linus Torvalds 已提交
139 140
			return -ENOMEM;
		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
N
Nick Piggin 已提交
141
		(*nr)++;
L
Linus Torvalds 已提交
142 143 144 145
	} while (pte++, addr += PAGE_SIZE, addr != end);
	return 0;
}

N
Nick Piggin 已提交
146 147
static int vmap_pmd_range(pud_t *pud, unsigned long addr,
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155 156
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_alloc(&init_mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);
N
Nick Piggin 已提交
157
		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
L
Linus Torvalds 已提交
158 159 160 161 162
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

N
Nick Piggin 已提交
163 164
static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
L
Linus Torvalds 已提交
165 166 167 168 169 170 171 172 173
{
	pud_t *pud;
	unsigned long next;

	pud = pud_alloc(&init_mm, pgd, addr);
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);
N
Nick Piggin 已提交
174
		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
L
Linus Torvalds 已提交
175 176 177 178 179
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

N
Nick Piggin 已提交
180 181 182 183 184 185
/*
 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
 * will have pfns corresponding to the "pages" array.
 *
 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
 */
186 187
static int vmap_page_range_noflush(unsigned long start, unsigned long end,
				   pgprot_t prot, struct page **pages)
L
Linus Torvalds 已提交
188 189 190
{
	pgd_t *pgd;
	unsigned long next;
191
	unsigned long addr = start;
N
Nick Piggin 已提交
192 193
	int err = 0;
	int nr = 0;
L
Linus Torvalds 已提交
194 195 196 197 198

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
N
Nick Piggin 已提交
199
		err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
L
Linus Torvalds 已提交
200
		if (err)
201
			return err;
L
Linus Torvalds 已提交
202
	} while (pgd++, addr = next, addr != end);
N
Nick Piggin 已提交
203 204

	return nr;
L
Linus Torvalds 已提交
205 206
}

207 208 209 210 211 212 213 214 215 216
static int vmap_page_range(unsigned long start, unsigned long end,
			   pgprot_t prot, struct page **pages)
{
	int ret;

	ret = vmap_page_range_noflush(start, end, prot, pages);
	flush_cache_vmap(start, end);
	return ret;
}

217
int is_vmalloc_or_module_addr(const void *x)
218 219
{
	/*
220
	 * ARM, x86-64 and sparc64 put modules in a special place,
221 222 223 224 225 226 227 228 229 230 231
	 * and fall back on vmalloc() if that fails. Others
	 * just put it in the vmalloc space.
	 */
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
	unsigned long addr = (unsigned long)x;
	if (addr >= MODULES_VADDR && addr < MODULES_END)
		return 1;
#endif
	return is_vmalloc_addr(x);
}

232
/*
233
 * Walk a vmap address to the struct page it maps.
234
 */
235
struct page *vmalloc_to_page(const void *vmalloc_addr)
236 237
{
	unsigned long addr = (unsigned long) vmalloc_addr;
238
	struct page *page = NULL;
239 240
	pgd_t *pgd = pgd_offset_k(addr);

241 242 243 244
	/*
	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
	 * architectures that do not vmalloc module space
	 */
245
	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
J
Jiri Slaby 已提交
246

247
	if (!pgd_none(*pgd)) {
N
Nick Piggin 已提交
248
		pud_t *pud = pud_offset(pgd, addr);
249
		if (!pud_none(*pud)) {
N
Nick Piggin 已提交
250
			pmd_t *pmd = pmd_offset(pud, addr);
251
			if (!pmd_none(*pmd)) {
N
Nick Piggin 已提交
252 253
				pte_t *ptep, pte;

254 255 256
				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				if (pte_present(pte))
257
					page = pte_page(pte);
258 259 260 261
				pte_unmap(ptep);
			}
		}
	}
262
	return page;
263
}
264
EXPORT_SYMBOL(vmalloc_to_page);
265 266

/*
267
 * Map a vmalloc()-space virtual address to the physical page frame number.
268
 */
269
unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
270
{
271
	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
272
}
273
EXPORT_SYMBOL(vmalloc_to_pfn);
274

N
Nick Piggin 已提交
275 276 277 278 279 280 281 282

/*** Global kva allocator ***/

#define VM_LAZY_FREE	0x01
#define VM_LAZY_FREEING	0x02
#define VM_VM_AREA	0x04

static DEFINE_SPINLOCK(vmap_area_lock);
283 284
/* Export for kexec only */
LIST_HEAD(vmap_area_list);
N
Nick Piggin 已提交
285 286 287 288 289 290 291 292
static struct rb_root vmap_area_root = RB_ROOT;

/* The vmap cache globals are protected by vmap_area_lock */
static struct rb_node *free_vmap_cache;
static unsigned long cached_hole_size;
static unsigned long cached_vstart;
static unsigned long cached_align;

293
static unsigned long vmap_area_pcpu_hole;
N
Nick Piggin 已提交
294 295

static struct vmap_area *__find_vmap_area(unsigned long addr)
L
Linus Torvalds 已提交
296
{
N
Nick Piggin 已提交
297 298 299 300 301 302 303 304
	struct rb_node *n = vmap_area_root.rb_node;

	while (n) {
		struct vmap_area *va;

		va = rb_entry(n, struct vmap_area, rb_node);
		if (addr < va->va_start)
			n = n->rb_left;
305
		else if (addr >= va->va_end)
N
Nick Piggin 已提交
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
			n = n->rb_right;
		else
			return va;
	}

	return NULL;
}

static void __insert_vmap_area(struct vmap_area *va)
{
	struct rb_node **p = &vmap_area_root.rb_node;
	struct rb_node *parent = NULL;
	struct rb_node *tmp;

	while (*p) {
321
		struct vmap_area *tmp_va;
N
Nick Piggin 已提交
322 323

		parent = *p;
324 325
		tmp_va = rb_entry(parent, struct vmap_area, rb_node);
		if (va->va_start < tmp_va->va_end)
N
Nick Piggin 已提交
326
			p = &(*p)->rb_left;
327
		else if (va->va_end > tmp_va->va_start)
N
Nick Piggin 已提交
328 329 330 331 332 333 334 335
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&va->rb_node, parent, p);
	rb_insert_color(&va->rb_node, &vmap_area_root);

336
	/* address-sort this list */
N
Nick Piggin 已提交
337 338 339 340 341 342 343 344 345 346 347
	tmp = rb_prev(&va->rb_node);
	if (tmp) {
		struct vmap_area *prev;
		prev = rb_entry(tmp, struct vmap_area, rb_node);
		list_add_rcu(&va->list, &prev->list);
	} else
		list_add_rcu(&va->list, &vmap_area_list);
}

static void purge_vmap_area_lazy(void);

348 349
static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);

N
Nick Piggin 已提交
350 351 352 353 354 355 356 357 358 359 360
/*
 * Allocate a region of KVA of the specified size and alignment, within the
 * vstart and vend.
 */
static struct vmap_area *alloc_vmap_area(unsigned long size,
				unsigned long align,
				unsigned long vstart, unsigned long vend,
				int node, gfp_t gfp_mask)
{
	struct vmap_area *va;
	struct rb_node *n;
L
Linus Torvalds 已提交
361
	unsigned long addr;
N
Nick Piggin 已提交
362
	int purged = 0;
N
Nick Piggin 已提交
363
	struct vmap_area *first;
N
Nick Piggin 已提交
364

N
Nick Piggin 已提交
365
	BUG_ON(!size);
366
	BUG_ON(offset_in_page(size));
N
Nick Piggin 已提交
367
	BUG_ON(!is_power_of_2(align));
N
Nick Piggin 已提交
368

369 370
	might_sleep_if(gfpflags_allow_blocking(gfp_mask));

N
Nick Piggin 已提交
371 372 373 374 375
	va = kmalloc_node(sizeof(struct vmap_area),
			gfp_mask & GFP_RECLAIM_MASK, node);
	if (unlikely(!va))
		return ERR_PTR(-ENOMEM);

376 377 378 379 380 381
	/*
	 * Only scan the relevant parts containing pointers to other objects
	 * to avoid false negatives.
	 */
	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);

N
Nick Piggin 已提交
382 383
retry:
	spin_lock(&vmap_area_lock);
N
Nick Piggin 已提交
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	/*
	 * Invalidate cache if we have more permissive parameters.
	 * cached_hole_size notes the largest hole noticed _below_
	 * the vmap_area cached in free_vmap_cache: if size fits
	 * into that hole, we want to scan from vstart to reuse
	 * the hole instead of allocating above free_vmap_cache.
	 * Note that __free_vmap_area may update free_vmap_cache
	 * without updating cached_hole_size or cached_align.
	 */
	if (!free_vmap_cache ||
			size < cached_hole_size ||
			vstart < cached_vstart ||
			align < cached_align) {
nocache:
		cached_hole_size = 0;
		free_vmap_cache = NULL;
	}
	/* record if we encounter less permissive parameters */
	cached_vstart = vstart;
	cached_align = align;

	/* find starting point for our search */
	if (free_vmap_cache) {
		first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
408
		addr = ALIGN(first->va_end, align);
N
Nick Piggin 已提交
409 410
		if (addr < vstart)
			goto nocache;
411
		if (addr + size < addr)
N
Nick Piggin 已提交
412 413 414 415
			goto overflow;

	} else {
		addr = ALIGN(vstart, align);
416
		if (addr + size < addr)
N
Nick Piggin 已提交
417 418 419 420 421 422
			goto overflow;

		n = vmap_area_root.rb_node;
		first = NULL;

		while (n) {
N
Nick Piggin 已提交
423 424 425 426
			struct vmap_area *tmp;
			tmp = rb_entry(n, struct vmap_area, rb_node);
			if (tmp->va_end >= addr) {
				first = tmp;
N
Nick Piggin 已提交
427 428 429 430
				if (tmp->va_start <= addr)
					break;
				n = n->rb_left;
			} else
N
Nick Piggin 已提交
431
				n = n->rb_right;
N
Nick Piggin 已提交
432
		}
N
Nick Piggin 已提交
433 434 435 436

		if (!first)
			goto found;
	}
N
Nick Piggin 已提交
437 438

	/* from the starting point, walk areas until a suitable hole is found */
439
	while (addr + size > first->va_start && addr + size <= vend) {
N
Nick Piggin 已提交
440 441
		if (addr + cached_hole_size < first->va_start)
			cached_hole_size = first->va_start - addr;
442
		addr = ALIGN(first->va_end, align);
443
		if (addr + size < addr)
N
Nick Piggin 已提交
444 445
			goto overflow;

446
		if (list_is_last(&first->list, &vmap_area_list))
N
Nick Piggin 已提交
447
			goto found;
448

449
		first = list_next_entry(first, list);
N
Nick Piggin 已提交
450 451
	}

N
Nick Piggin 已提交
452 453 454
found:
	if (addr + size > vend)
		goto overflow;
N
Nick Piggin 已提交
455 456 457 458 459

	va->va_start = addr;
	va->va_end = addr + size;
	va->flags = 0;
	__insert_vmap_area(va);
N
Nick Piggin 已提交
460
	free_vmap_cache = &va->rb_node;
N
Nick Piggin 已提交
461 462
	spin_unlock(&vmap_area_lock);

463
	BUG_ON(!IS_ALIGNED(va->va_start, align));
N
Nick Piggin 已提交
464 465 466
	BUG_ON(va->va_start < vstart);
	BUG_ON(va->va_end > vend);

N
Nick Piggin 已提交
467
	return va;
N
Nick Piggin 已提交
468 469 470 471 472 473 474 475

overflow:
	spin_unlock(&vmap_area_lock);
	if (!purged) {
		purge_vmap_area_lazy();
		purged = 1;
		goto retry;
	}
476 477 478 479 480 481 482 483 484 485

	if (gfpflags_allow_blocking(gfp_mask)) {
		unsigned long freed = 0;
		blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
		if (freed > 0) {
			purged = 0;
			goto retry;
		}
	}

N
Nick Piggin 已提交
486
	if (printk_ratelimit())
487
		pr_warn("vmap allocation for size %lu failed: "
N
Nick Piggin 已提交
488 489 490
			"use vmalloc=<size> to increase size.\n", size);
	kfree(va);
	return ERR_PTR(-EBUSY);
N
Nick Piggin 已提交
491 492
}

493 494 495 496 497 498 499 500 501 502 503 504
int register_vmap_purge_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&vmap_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);

int unregister_vmap_purge_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);

N
Nick Piggin 已提交
505 506 507
static void __free_vmap_area(struct vmap_area *va)
{
	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
N
Nick Piggin 已提交
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523

	if (free_vmap_cache) {
		if (va->va_end < cached_vstart) {
			free_vmap_cache = NULL;
		} else {
			struct vmap_area *cache;
			cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
			if (va->va_start <= cache->va_start) {
				free_vmap_cache = rb_prev(&va->rb_node);
				/*
				 * We don't try to update cached_hole_size or
				 * cached_align, but it won't go very wrong.
				 */
			}
		}
	}
N
Nick Piggin 已提交
524 525 526 527
	rb_erase(&va->rb_node, &vmap_area_root);
	RB_CLEAR_NODE(&va->rb_node);
	list_del_rcu(&va->list);

528 529 530 531 532 533 534 535 536
	/*
	 * Track the highest possible candidate for pcpu area
	 * allocation.  Areas outside of vmalloc area can be returned
	 * here too, consider only end addresses which fall inside
	 * vmalloc area proper.
	 */
	if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
		vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);

537
	kfree_rcu(va, rcu_head);
N
Nick Piggin 已提交
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
}

/*
 * Free a region of KVA allocated by alloc_vmap_area
 */
static void free_vmap_area(struct vmap_area *va)
{
	spin_lock(&vmap_area_lock);
	__free_vmap_area(va);
	spin_unlock(&vmap_area_lock);
}

/*
 * Clear the pagetable entries of a given vmap_area
 */
static void unmap_vmap_area(struct vmap_area *va)
{
	vunmap_page_range(va->va_start, va->va_end);
}

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
static void vmap_debug_free_range(unsigned long start, unsigned long end)
{
	/*
	 * Unmap page tables and force a TLB flush immediately if
	 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
	 * bugs similarly to those in linear kernel virtual address
	 * space after a page has been freed.
	 *
	 * All the lazy freeing logic is still retained, in order to
	 * minimise intrusiveness of this debugging feature.
	 *
	 * This is going to be *slow* (linear kernel virtual address
	 * debugging doesn't do a broadcast TLB flush so it is a lot
	 * faster).
	 */
#ifdef CONFIG_DEBUG_PAGEALLOC
	vunmap_page_range(start, end);
	flush_tlb_kernel_range(start, end);
#endif
}

N
Nick Piggin 已提交
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
/*
 * lazy_max_pages is the maximum amount of virtual address space we gather up
 * before attempting to purge with a TLB flush.
 *
 * There is a tradeoff here: a larger number will cover more kernel page tables
 * and take slightly longer to purge, but it will linearly reduce the number of
 * global TLB flushes that must be performed. It would seem natural to scale
 * this number up linearly with the number of CPUs (because vmapping activity
 * could also scale linearly with the number of CPUs), however it is likely
 * that in practice, workloads might be constrained in other ways that mean
 * vmap activity will not scale linearly with CPUs. Also, I want to be
 * conservative and not introduce a big latency on huge systems, so go with
 * a less aggressive log scale. It will still be an improvement over the old
 * code, and it will be simple to change the scale factor if we find that it
 * becomes a problem on bigger systems.
 */
static unsigned long lazy_max_pages(void)
{
	unsigned int log;

	log = fls(num_online_cpus());

	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
}

static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);

606 607 608
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);

609 610 611 612 613 614 615 616 617
/*
 * called before a call to iounmap() if the caller wants vm_area_struct's
 * immediately freed.
 */
void set_iounmap_nonlazy(void)
{
	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
}

N
Nick Piggin 已提交
618 619 620 621 622 623 624 625 626 627 628 629 630
/*
 * Purges all lazily-freed vmap areas.
 *
 * If sync is 0 then don't purge if there is already a purge in progress.
 * If force_flush is 1, then flush kernel TLBs between *start and *end even
 * if we found no lazy vmap areas to unmap (callers can use this to optimise
 * their own TLB flushing).
 * Returns with *start = min(*start, lowest purged address)
 *              *end = max(*end, highest purged address)
 */
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
					int sync, int force_flush)
{
631
	static DEFINE_SPINLOCK(purge_lock);
N
Nick Piggin 已提交
632 633
	LIST_HEAD(valist);
	struct vmap_area *va;
634
	struct vmap_area *n_va;
N
Nick Piggin 已提交
635 636 637 638 639 640 641 642
	int nr = 0;

	/*
	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
	 * should not expect such behaviour. This just simplifies locking for
	 * the case that isn't actually used at the moment anyway.
	 */
	if (!sync && !force_flush) {
643
		if (!spin_trylock(&purge_lock))
N
Nick Piggin 已提交
644 645
			return;
	} else
646
		spin_lock(&purge_lock);
N
Nick Piggin 已提交
647

648 649 650
	if (sync)
		purge_fragmented_blocks_allcpus();

N
Nick Piggin 已提交
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
	rcu_read_lock();
	list_for_each_entry_rcu(va, &vmap_area_list, list) {
		if (va->flags & VM_LAZY_FREE) {
			if (va->va_start < *start)
				*start = va->va_start;
			if (va->va_end > *end)
				*end = va->va_end;
			nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
			list_add_tail(&va->purge_list, &valist);
			va->flags |= VM_LAZY_FREEING;
			va->flags &= ~VM_LAZY_FREE;
		}
	}
	rcu_read_unlock();

666
	if (nr)
N
Nick Piggin 已提交
667 668 669 670 671 672 673
		atomic_sub(nr, &vmap_lazy_nr);

	if (nr || force_flush)
		flush_tlb_kernel_range(*start, *end);

	if (nr) {
		spin_lock(&vmap_area_lock);
674
		list_for_each_entry_safe(va, n_va, &valist, purge_list)
N
Nick Piggin 已提交
675 676 677
			__free_vmap_area(va);
		spin_unlock(&vmap_area_lock);
	}
678
	spin_unlock(&purge_lock);
N
Nick Piggin 已提交
679 680
}

N
Nick Piggin 已提交
681 682 683 684 685 686 687 688 689 690 691
/*
 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
 * is already purging.
 */
static void try_purge_vmap_area_lazy(void)
{
	unsigned long start = ULONG_MAX, end = 0;

	__purge_vmap_area_lazy(&start, &end, 0, 0);
}

N
Nick Piggin 已提交
692 693 694 695 696 697 698
/*
 * Kick off a purge of the outstanding lazy areas.
 */
static void purge_vmap_area_lazy(void)
{
	unsigned long start = ULONG_MAX, end = 0;

N
Nick Piggin 已提交
699
	__purge_vmap_area_lazy(&start, &end, 1, 0);
N
Nick Piggin 已提交
700 701 702
}

/*
703 704 705
 * Free a vmap area, caller ensuring that the area has been unmapped
 * and flush_cache_vunmap had been called for the correct range
 * previously.
N
Nick Piggin 已提交
706
 */
707
static void free_vmap_area_noflush(struct vmap_area *va)
N
Nick Piggin 已提交
708 709 710 711
{
	va->flags |= VM_LAZY_FREE;
	atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
	if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
N
Nick Piggin 已提交
712
		try_purge_vmap_area_lazy();
N
Nick Piggin 已提交
713 714
}

715 716 717 718 719 720 721 722 723 724
/*
 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
 * called for the correct range previously.
 */
static void free_unmap_vmap_area_noflush(struct vmap_area *va)
{
	unmap_vmap_area(va);
	free_vmap_area_noflush(va);
}

725 726 727 728 729 730 731 732 733
/*
 * Free and unmap a vmap area
 */
static void free_unmap_vmap_area(struct vmap_area *va)
{
	flush_cache_vunmap(va->va_start, va->va_end);
	free_unmap_vmap_area_noflush(va);
}

N
Nick Piggin 已提交
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
static struct vmap_area *find_vmap_area(unsigned long addr)
{
	struct vmap_area *va;

	spin_lock(&vmap_area_lock);
	va = __find_vmap_area(addr);
	spin_unlock(&vmap_area_lock);

	return va;
}

static void free_unmap_vmap_area_addr(unsigned long addr)
{
	struct vmap_area *va;

	va = find_vmap_area(addr);
	BUG_ON(!va);
	free_unmap_vmap_area(va);
}


/*** Per cpu kva allocator ***/

/*
 * vmap space is limited especially on 32 bit architectures. Ensure there is
 * room for at least 16 percpu vmap blocks per CPU.
 */
/*
 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
 * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
 * instead (we just need a rough idea)
 */
#if BITS_PER_LONG == 32
#define VMALLOC_SPACE		(128UL*1024*1024)
#else
#define VMALLOC_SPACE		(128UL*1024*1024*1024)
#endif

#define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
#define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
#define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
#define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
#define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
#define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
778 779 780 781
#define VMAP_BBMAP_BITS		\
		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
N
Nick Piggin 已提交
782 783 784

#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)

785 786
static bool vmap_initialized __read_mostly = false;

N
Nick Piggin 已提交
787 788 789 790 791 792 793 794 795
struct vmap_block_queue {
	spinlock_t lock;
	struct list_head free;
};

struct vmap_block {
	spinlock_t lock;
	struct vmap_area *va;
	unsigned long free, dirty;
796
	unsigned long dirty_min, dirty_max; /*< dirty range */
797 798
	struct list_head free_list;
	struct rcu_head rcu_head;
799
	struct list_head purge;
N
Nick Piggin 已提交
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
};

/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);

/*
 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
 * in the free path. Could get rid of this if we change the API to return a
 * "cookie" from alloc, to be passed to free. But no big deal yet.
 */
static DEFINE_SPINLOCK(vmap_block_tree_lock);
static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);

/*
 * We should probably have a fallback mechanism to allocate virtual memory
 * out of partially filled vmap blocks. However vmap block sizing should be
 * fairly reasonable according to the vmalloc size, so it shouldn't be a
 * big problem.
 */

static unsigned long addr_to_vb_idx(unsigned long addr)
{
	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
	addr /= VMAP_BLOCK_SIZE;
	return addr;
}

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
{
	unsigned long addr;

	addr = va_start + (pages_off << PAGE_SHIFT);
	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
	return (void *)addr;
}

/**
 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
 *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
 * @order:    how many 2^order pages should be occupied in newly allocated block
 * @gfp_mask: flags for the page level allocator
 *
 * Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
 */
static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
N
Nick Piggin 已提交
845 846 847 848 849 850
{
	struct vmap_block_queue *vbq;
	struct vmap_block *vb;
	struct vmap_area *va;
	unsigned long vb_idx;
	int node, err;
851
	void *vaddr;
N
Nick Piggin 已提交
852 853 854 855 856 857 858 859 860 861 862

	node = numa_node_id();

	vb = kmalloc_node(sizeof(struct vmap_block),
			gfp_mask & GFP_RECLAIM_MASK, node);
	if (unlikely(!vb))
		return ERR_PTR(-ENOMEM);

	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
					VMALLOC_START, VMALLOC_END,
					node, gfp_mask);
863
	if (IS_ERR(va)) {
N
Nick Piggin 已提交
864
		kfree(vb);
J
Julia Lawall 已提交
865
		return ERR_CAST(va);
N
Nick Piggin 已提交
866 867 868 869 870 871 872 873 874
	}

	err = radix_tree_preload(gfp_mask);
	if (unlikely(err)) {
		kfree(vb);
		free_vmap_area(va);
		return ERR_PTR(err);
	}

875
	vaddr = vmap_block_vaddr(va->va_start, 0);
N
Nick Piggin 已提交
876 877
	spin_lock_init(&vb->lock);
	vb->va = va;
878 879 880
	/* At least something should be left free */
	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
	vb->free = VMAP_BBMAP_BITS - (1UL << order);
N
Nick Piggin 已提交
881
	vb->dirty = 0;
882 883
	vb->dirty_min = VMAP_BBMAP_BITS;
	vb->dirty_max = 0;
N
Nick Piggin 已提交
884 885 886 887 888 889 890 891 892 893 894
	INIT_LIST_HEAD(&vb->free_list);

	vb_idx = addr_to_vb_idx(va->va_start);
	spin_lock(&vmap_block_tree_lock);
	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
	spin_unlock(&vmap_block_tree_lock);
	BUG_ON(err);
	radix_tree_preload_end();

	vbq = &get_cpu_var(vmap_block_queue);
	spin_lock(&vbq->lock);
895
	list_add_tail_rcu(&vb->free_list, &vbq->free);
N
Nick Piggin 已提交
896
	spin_unlock(&vbq->lock);
897
	put_cpu_var(vmap_block_queue);
N
Nick Piggin 已提交
898

899
	return vaddr;
N
Nick Piggin 已提交
900 901 902 903 904 905 906 907 908 909 910 911 912
}

static void free_vmap_block(struct vmap_block *vb)
{
	struct vmap_block *tmp;
	unsigned long vb_idx;

	vb_idx = addr_to_vb_idx(vb->va->va_start);
	spin_lock(&vmap_block_tree_lock);
	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
	spin_unlock(&vmap_block_tree_lock);
	BUG_ON(tmp != vb);

913
	free_vmap_area_noflush(vb->va);
914
	kfree_rcu(vb, rcu_head);
N
Nick Piggin 已提交
915 916
}

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
static void purge_fragmented_blocks(int cpu)
{
	LIST_HEAD(purge);
	struct vmap_block *vb;
	struct vmap_block *n_vb;
	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);

	rcu_read_lock();
	list_for_each_entry_rcu(vb, &vbq->free, free_list) {

		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
			continue;

		spin_lock(&vb->lock);
		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
			vb->free = 0; /* prevent further allocs after releasing lock */
			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
934 935
			vb->dirty_min = 0;
			vb->dirty_max = VMAP_BBMAP_BITS;
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
			spin_lock(&vbq->lock);
			list_del_rcu(&vb->free_list);
			spin_unlock(&vbq->lock);
			spin_unlock(&vb->lock);
			list_add_tail(&vb->purge, &purge);
		} else
			spin_unlock(&vb->lock);
	}
	rcu_read_unlock();

	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
		list_del(&vb->purge);
		free_vmap_block(vb);
	}
}

static void purge_fragmented_blocks_allcpus(void)
{
	int cpu;

	for_each_possible_cpu(cpu)
		purge_fragmented_blocks(cpu);
}

N
Nick Piggin 已提交
960 961 962 963
static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
{
	struct vmap_block_queue *vbq;
	struct vmap_block *vb;
964
	void *vaddr = NULL;
N
Nick Piggin 已提交
965 966
	unsigned int order;

967
	BUG_ON(offset_in_page(size));
N
Nick Piggin 已提交
968
	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
J
Jan Kara 已提交
969 970 971 972 973 974 975 976
	if (WARN_ON(size == 0)) {
		/*
		 * Allocating 0 bytes isn't what caller wants since
		 * get_order(0) returns funny result. Just warn and terminate
		 * early.
		 */
		return NULL;
	}
N
Nick Piggin 已提交
977 978 979 980 981
	order = get_order(size);

	rcu_read_lock();
	vbq = &get_cpu_var(vmap_block_queue);
	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
982
		unsigned long pages_off;
N
Nick Piggin 已提交
983 984

		spin_lock(&vb->lock);
985 986 987 988
		if (vb->free < (1UL << order)) {
			spin_unlock(&vb->lock);
			continue;
		}
989

990 991
		pages_off = VMAP_BBMAP_BITS - vb->free;
		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
992 993 994 995 996 997
		vb->free -= 1UL << order;
		if (vb->free == 0) {
			spin_lock(&vbq->lock);
			list_del_rcu(&vb->free_list);
			spin_unlock(&vbq->lock);
		}
998

999 1000
		spin_unlock(&vb->lock);
		break;
N
Nick Piggin 已提交
1001
	}
1002

1003
	put_cpu_var(vmap_block_queue);
N
Nick Piggin 已提交
1004 1005
	rcu_read_unlock();

1006 1007 1008
	/* Allocate new block if nothing was found */
	if (!vaddr)
		vaddr = new_vmap_block(order, gfp_mask);
N
Nick Piggin 已提交
1009

1010
	return vaddr;
N
Nick Piggin 已提交
1011 1012 1013 1014 1015 1016 1017 1018 1019
}

static void vb_free(const void *addr, unsigned long size)
{
	unsigned long offset;
	unsigned long vb_idx;
	unsigned int order;
	struct vmap_block *vb;

1020
	BUG_ON(offset_in_page(size));
N
Nick Piggin 已提交
1021
	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1022 1023 1024

	flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);

N
Nick Piggin 已提交
1025 1026 1027
	order = get_order(size);

	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1028
	offset >>= PAGE_SHIFT;
N
Nick Piggin 已提交
1029 1030 1031 1032 1033 1034 1035

	vb_idx = addr_to_vb_idx((unsigned long)addr);
	rcu_read_lock();
	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
	rcu_read_unlock();
	BUG_ON(!vb);

1036 1037
	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);

N
Nick Piggin 已提交
1038
	spin_lock(&vb->lock);
1039 1040 1041 1042

	/* Expand dirty range */
	vb->dirty_min = min(vb->dirty_min, offset);
	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1043

N
Nick Piggin 已提交
1044 1045
	vb->dirty += 1UL << order;
	if (vb->dirty == VMAP_BBMAP_BITS) {
1046
		BUG_ON(vb->free);
N
Nick Piggin 已提交
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
		spin_unlock(&vb->lock);
		free_vmap_block(vb);
	} else
		spin_unlock(&vb->lock);
}

/**
 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
 *
 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
 * to amortize TLB flushing overheads. What this means is that any page you
 * have now, may, in a former life, have been mapped into kernel virtual
 * address by the vmap layer and so there might be some CPUs with TLB entries
 * still referencing that page (additional to the regular 1:1 kernel mapping).
 *
 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
 * be sure that none of the pages we have control over will have any aliases
 * from the vmap layer.
 */
void vm_unmap_aliases(void)
{
	unsigned long start = ULONG_MAX, end = 0;
	int cpu;
	int flush = 0;

1072 1073 1074
	if (unlikely(!vmap_initialized))
		return;

N
Nick Piggin 已提交
1075 1076 1077 1078 1079 1080 1081
	for_each_possible_cpu(cpu) {
		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
		struct vmap_block *vb;

		rcu_read_lock();
		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
			spin_lock(&vb->lock);
1082 1083
			if (vb->dirty) {
				unsigned long va_start = vb->va->va_start;
N
Nick Piggin 已提交
1084
				unsigned long s, e;
1085

1086 1087
				s = va_start + (vb->dirty_min << PAGE_SHIFT);
				e = va_start + (vb->dirty_max << PAGE_SHIFT);
N
Nick Piggin 已提交
1088

1089 1090
				start = min(s, start);
				end   = max(e, end);
N
Nick Piggin 已提交
1091

1092
				flush = 1;
N
Nick Piggin 已提交
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
			}
			spin_unlock(&vb->lock);
		}
		rcu_read_unlock();
	}

	__purge_vmap_area_lazy(&start, &end, 1, flush);
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

/**
 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
 * @mem: the pointer returned by vm_map_ram
 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
 */
void vm_unmap_ram(const void *mem, unsigned int count)
{
	unsigned long size = count << PAGE_SHIFT;
	unsigned long addr = (unsigned long)mem;

	BUG_ON(!addr);
	BUG_ON(addr < VMALLOC_START);
	BUG_ON(addr > VMALLOC_END);
1116
	BUG_ON(!IS_ALIGNED(addr, PAGE_SIZE));
N
Nick Piggin 已提交
1117 1118

	debug_check_no_locks_freed(mem, size);
1119
	vmap_debug_free_range(addr, addr+size);
N
Nick Piggin 已提交
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133

	if (likely(count <= VMAP_MAX_ALLOC))
		vb_free(mem, size);
	else
		free_unmap_vmap_area_addr(addr);
}
EXPORT_SYMBOL(vm_unmap_ram);

/**
 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
 * @pages: an array of pointers to the pages to be mapped
 * @count: number of pages
 * @node: prefer to allocate data structures on this node
 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1134
 *
1135 1136 1137 1138 1139 1140
 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
 * faster than vmap so it's good.  But if you mix long-life and short-life
 * objects with vm_map_ram(), it could consume lots of address space through
 * fragmentation (especially on a 32bit machine).  You could see failures in
 * the end.  Please use this function for short-lived objects.
 *
1141
 * Returns: a pointer to the address that has been mapped, or %NULL on failure
N
Nick Piggin 已提交
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
 */
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{
	unsigned long size = count << PAGE_SHIFT;
	unsigned long addr;
	void *mem;

	if (likely(count <= VMAP_MAX_ALLOC)) {
		mem = vb_alloc(size, GFP_KERNEL);
		if (IS_ERR(mem))
			return NULL;
		addr = (unsigned long)mem;
	} else {
		struct vmap_area *va;
		va = alloc_vmap_area(size, PAGE_SIZE,
				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
		if (IS_ERR(va))
			return NULL;

		addr = va->va_start;
		mem = (void *)addr;
	}
	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
		vm_unmap_ram(mem, count);
		return NULL;
	}
	return mem;
}
EXPORT_SYMBOL(vm_map_ram);

1172
static struct vm_struct *vmlist __initdata;
N
Nicolas Pitre 已提交
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
/**
 * vm_area_add_early - add vmap area early during boot
 * @vm: vm_struct to add
 *
 * This function is used to add fixed kernel vm area to vmlist before
 * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
 * should contain proper values and the other fields should be zero.
 *
 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
 */
void __init vm_area_add_early(struct vm_struct *vm)
{
	struct vm_struct *tmp, **p;

	BUG_ON(vmap_initialized);
	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
		if (tmp->addr >= vm->addr) {
			BUG_ON(tmp->addr < vm->addr + vm->size);
			break;
		} else
			BUG_ON(tmp->addr + tmp->size > vm->addr);
	}
	vm->next = *p;
	*p = vm;
}

1199 1200 1201
/**
 * vm_area_register_early - register vmap area early during boot
 * @vm: vm_struct to register
1202
 * @align: requested alignment
1203 1204 1205 1206 1207 1208 1209 1210
 *
 * This function is used to register kernel vm area before
 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
 * proper values on entry and other fields should be zero.  On return,
 * vm->addr contains the allocated address.
 *
 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
 */
1211
void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1212 1213
{
	static size_t vm_init_off __initdata;
1214 1215 1216 1217
	unsigned long addr;

	addr = ALIGN(VMALLOC_START + vm_init_off, align);
	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1218

1219
	vm->addr = (void *)addr;
1220

N
Nicolas Pitre 已提交
1221
	vm_area_add_early(vm);
1222 1223
}

N
Nick Piggin 已提交
1224 1225
void __init vmalloc_init(void)
{
I
Ivan Kokshaysky 已提交
1226 1227
	struct vmap_area *va;
	struct vm_struct *tmp;
N
Nick Piggin 已提交
1228 1229 1230 1231
	int i;

	for_each_possible_cpu(i) {
		struct vmap_block_queue *vbq;
1232
		struct vfree_deferred *p;
N
Nick Piggin 已提交
1233 1234 1235 1236

		vbq = &per_cpu(vmap_block_queue, i);
		spin_lock_init(&vbq->lock);
		INIT_LIST_HEAD(&vbq->free);
1237 1238 1239
		p = &per_cpu(vfree_deferred, i);
		init_llist_head(&p->list);
		INIT_WORK(&p->wq, free_work);
N
Nick Piggin 已提交
1240
	}
1241

I
Ivan Kokshaysky 已提交
1242 1243
	/* Import existing vmlist entries. */
	for (tmp = vmlist; tmp; tmp = tmp->next) {
1244
		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1245
		va->flags = VM_VM_AREA;
I
Ivan Kokshaysky 已提交
1246 1247
		va->va_start = (unsigned long)tmp->addr;
		va->va_end = va->va_start + tmp->size;
1248
		va->vm = tmp;
I
Ivan Kokshaysky 已提交
1249 1250
		__insert_vmap_area(va);
	}
1251 1252 1253

	vmap_area_pcpu_hole = VMALLOC_END;

1254
	vmap_initialized = true;
N
Nick Piggin 已提交
1255 1256
}

1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
/**
 * map_kernel_range_noflush - map kernel VM area with the specified pages
 * @addr: start of the VM area to map
 * @size: size of the VM area to map
 * @prot: page protection flags to use
 * @pages: pages to map
 *
 * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
 * specify should have been allocated using get_vm_area() and its
 * friends.
 *
 * NOTE:
 * This function does NOT do any cache flushing.  The caller is
 * responsible for calling flush_cache_vmap() on to-be-mapped areas
 * before calling this function.
 *
 * RETURNS:
 * The number of pages mapped on success, -errno on failure.
 */
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
			     pgprot_t prot, struct page **pages)
{
	return vmap_page_range_noflush(addr, addr + size, prot, pages);
}

/**
 * unmap_kernel_range_noflush - unmap kernel VM area
 * @addr: start of the VM area to unmap
 * @size: size of the VM area to unmap
 *
 * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
 * specify should have been allocated using get_vm_area() and its
 * friends.
 *
 * NOTE:
 * This function does NOT do any cache flushing.  The caller is
 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
 * before calling this function and flush_tlb_kernel_range() after.
 */
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
	vunmap_page_range(addr, addr + size);
}
1300
EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1301 1302 1303 1304 1305 1306 1307 1308 1309

/**
 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
 * @addr: start of the VM area to unmap
 * @size: size of the VM area to unmap
 *
 * Similar to unmap_kernel_range_noflush() but flushes vcache before
 * the unmapping and tlb after.
 */
N
Nick Piggin 已提交
1310 1311 1312
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
	unsigned long end = addr + size;
1313 1314

	flush_cache_vunmap(addr, end);
N
Nick Piggin 已提交
1315 1316 1317
	vunmap_page_range(addr, end);
	flush_tlb_kernel_range(addr, end);
}
1318
EXPORT_SYMBOL_GPL(unmap_kernel_range);
N
Nick Piggin 已提交
1319

1320
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
N
Nick Piggin 已提交
1321 1322
{
	unsigned long addr = (unsigned long)area->addr;
1323
	unsigned long end = addr + get_vm_area_size(area);
N
Nick Piggin 已提交
1324 1325
	int err;

1326
	err = vmap_page_range(addr, end, prot, pages);
N
Nick Piggin 已提交
1327

1328
	return err > 0 ? 0 : err;
N
Nick Piggin 已提交
1329 1330 1331
}
EXPORT_SYMBOL_GPL(map_vm_area);

1332
static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1333
			      unsigned long flags, const void *caller)
1334
{
1335
	spin_lock(&vmap_area_lock);
1336 1337 1338 1339
	vm->flags = flags;
	vm->addr = (void *)va->va_start;
	vm->size = va->va_end - va->va_start;
	vm->caller = caller;
1340
	va->vm = vm;
1341
	va->flags |= VM_VM_AREA;
1342
	spin_unlock(&vmap_area_lock);
1343
}
1344

1345
static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1346
{
1347
	/*
1348
	 * Before removing VM_UNINITIALIZED,
1349 1350 1351 1352
	 * we should make sure that vm has proper values.
	 * Pair with smp_rmb() in show_numa_info().
	 */
	smp_wmb();
1353
	vm->flags &= ~VM_UNINITIALIZED;
1354 1355
}

N
Nick Piggin 已提交
1356
static struct vm_struct *__get_vm_area_node(unsigned long size,
1357
		unsigned long align, unsigned long flags, unsigned long start,
1358
		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
N
Nick Piggin 已提交
1359
{
1360
	struct vmap_area *va;
N
Nick Piggin 已提交
1361
	struct vm_struct *area;
L
Linus Torvalds 已提交
1362

1363
	BUG_ON(in_interrupt());
1364
	if (flags & VM_IOREMAP)
1365 1366
		align = 1ul << clamp_t(int, fls_long(size),
				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
N
Nick Piggin 已提交
1367

L
Linus Torvalds 已提交
1368
	size = PAGE_ALIGN(size);
1369 1370
	if (unlikely(!size))
		return NULL;
L
Linus Torvalds 已提交
1371

1372
	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
L
Linus Torvalds 已提交
1373 1374 1375
	if (unlikely(!area))
		return NULL;

1376 1377
	if (!(flags & VM_NO_GUARD))
		size += PAGE_SIZE;
L
Linus Torvalds 已提交
1378

N
Nick Piggin 已提交
1379 1380 1381 1382
	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
	if (IS_ERR(va)) {
		kfree(area);
		return NULL;
L
Linus Torvalds 已提交
1383 1384
	}

1385
	setup_vmalloc_vm(area, va, flags, caller);
1386

L
Linus Torvalds 已提交
1387 1388 1389
	return area;
}

C
Christoph Lameter 已提交
1390 1391 1392
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
				unsigned long start, unsigned long end)
{
D
David Rientjes 已提交
1393 1394
	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
				  GFP_KERNEL, __builtin_return_address(0));
C
Christoph Lameter 已提交
1395
}
1396
EXPORT_SYMBOL_GPL(__get_vm_area);
C
Christoph Lameter 已提交
1397

1398 1399
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
				       unsigned long start, unsigned long end,
1400
				       const void *caller)
1401
{
D
David Rientjes 已提交
1402 1403
	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
				  GFP_KERNEL, caller);
1404 1405
}

L
Linus Torvalds 已提交
1406
/**
S
Simon Arlott 已提交
1407
 *	get_vm_area  -  reserve a contiguous kernel virtual area
L
Linus Torvalds 已提交
1408 1409 1410 1411 1412 1413 1414 1415 1416
 *	@size:		size of the area
 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
 *
 *	Search an area of @size in the kernel virtual mapping area,
 *	and reserved it for out purposes.  Returns the area descriptor
 *	on success or %NULL on failure.
 */
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
1417
	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
D
David Rientjes 已提交
1418 1419
				  NUMA_NO_NODE, GFP_KERNEL,
				  __builtin_return_address(0));
1420 1421 1422
}

struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1423
				const void *caller)
1424
{
1425
	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
D
David Rientjes 已提交
1426
				  NUMA_NO_NODE, GFP_KERNEL, caller);
L
Linus Torvalds 已提交
1427 1428
}

1429 1430 1431 1432 1433 1434 1435 1436 1437
/**
 *	find_vm_area  -  find a continuous kernel virtual area
 *	@addr:		base address
 *
 *	Search for the kernel VM area starting at @addr, and return it.
 *	It is up to the caller to do all required locking to keep the returned
 *	pointer valid.
 */
struct vm_struct *find_vm_area(const void *addr)
1438
{
N
Nick Piggin 已提交
1439
	struct vmap_area *va;
1440

N
Nick Piggin 已提交
1441 1442
	va = find_vmap_area((unsigned long)addr);
	if (va && va->flags & VM_VM_AREA)
1443
		return va->vm;
L
Linus Torvalds 已提交
1444 1445 1446 1447

	return NULL;
}

1448
/**
S
Simon Arlott 已提交
1449
 *	remove_vm_area  -  find and remove a continuous kernel virtual area
1450 1451 1452 1453 1454 1455
 *	@addr:		base address
 *
 *	Search for the kernel VM area starting at @addr, and remove it.
 *	This function returns the found VM area, but using it is NOT safe
 *	on SMP machines, except for its size or flags.
 */
1456
struct vm_struct *remove_vm_area(const void *addr)
1457
{
N
Nick Piggin 已提交
1458 1459 1460 1461
	struct vmap_area *va;

	va = find_vmap_area((unsigned long)addr);
	if (va && va->flags & VM_VM_AREA) {
1462
		struct vm_struct *vm = va->vm;
1463

1464 1465 1466 1467 1468
		spin_lock(&vmap_area_lock);
		va->vm = NULL;
		va->flags &= ~VM_VM_AREA;
		spin_unlock(&vmap_area_lock);

1469
		vmap_debug_free_range(va->va_start, va->va_end);
1470
		kasan_free_shadow(vm);
1471 1472
		free_unmap_vmap_area(va);

N
Nick Piggin 已提交
1473 1474 1475
		return vm;
	}
	return NULL;
1476 1477
}

1478
static void __vunmap(const void *addr, int deallocate_pages)
L
Linus Torvalds 已提交
1479 1480 1481 1482 1483 1484
{
	struct vm_struct *area;

	if (!addr)
		return;

1485
	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
D
Dan Carpenter 已提交
1486
			addr))
L
Linus Torvalds 已提交
1487 1488 1489 1490
		return;

	area = remove_vm_area(addr);
	if (unlikely(!area)) {
A
Arjan van de Ven 已提交
1491
		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
L
Linus Torvalds 已提交
1492 1493 1494 1495
				addr);
		return;
	}

1496 1497
	debug_check_no_locks_freed(addr, get_vm_area_size(area));
	debug_check_no_obj_freed(addr, get_vm_area_size(area));
1498

L
Linus Torvalds 已提交
1499 1500 1501 1502
	if (deallocate_pages) {
		int i;

		for (i = 0; i < area->nr_pages; i++) {
1503 1504 1505
			struct page *page = area->pages[i];

			BUG_ON(!page);
1506
			__free_kmem_pages(page, 0);
L
Linus Torvalds 已提交
1507 1508
		}

D
David Rientjes 已提交
1509
		kvfree(area->pages);
L
Linus Torvalds 已提交
1510 1511 1512 1513 1514
	}

	kfree(area);
	return;
}
1515
 
L
Linus Torvalds 已提交
1516 1517 1518 1519
/**
 *	vfree  -  release memory allocated by vmalloc()
 *	@addr:		memory base address
 *
S
Simon Arlott 已提交
1520
 *	Free the virtually continuous memory area starting at @addr, as
1521 1522
 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
 *	NULL, no operation is performed.
L
Linus Torvalds 已提交
1523
 *
1524 1525 1526
 *	Must not be called in NMI context (strictly speaking, only if we don't
 *	have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
 *	conventions for vfree() arch-depenedent would be a really bad idea)
A
Andrew Morton 已提交
1527 1528
 *
 *	NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
L
Linus Torvalds 已提交
1529
 */
1530
void vfree(const void *addr)
L
Linus Torvalds 已提交
1531
{
1532
	BUG_ON(in_nmi());
1533 1534 1535

	kmemleak_free(addr);

1536 1537 1538
	if (!addr)
		return;
	if (unlikely(in_interrupt())) {
1539
		struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
1540 1541
		if (llist_add((struct llist_node *)addr, &p->list))
			schedule_work(&p->wq);
1542 1543
	} else
		__vunmap(addr, 1);
L
Linus Torvalds 已提交
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
}
EXPORT_SYMBOL(vfree);

/**
 *	vunmap  -  release virtual mapping obtained by vmap()
 *	@addr:		memory base address
 *
 *	Free the virtually contiguous memory area starting at @addr,
 *	which was created from the page array passed to vmap().
 *
1554
 *	Must not be called in interrupt context.
L
Linus Torvalds 已提交
1555
 */
1556
void vunmap(const void *addr)
L
Linus Torvalds 已提交
1557 1558
{
	BUG_ON(in_interrupt());
1559
	might_sleep();
1560 1561
	if (addr)
		__vunmap(addr, 0);
L
Linus Torvalds 已提交
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
}
EXPORT_SYMBOL(vunmap);

/**
 *	vmap  -  map an array of pages into virtually contiguous space
 *	@pages:		array of page pointers
 *	@count:		number of pages to map
 *	@flags:		vm_area->flags
 *	@prot:		page protection for the mapping
 *
 *	Maps @count pages from @pages into contiguous kernel virtual
 *	space.
 */
void *vmap(struct page **pages, unsigned int count,
		unsigned long flags, pgprot_t prot)
{
	struct vm_struct *area;

1580 1581
	might_sleep();

1582
	if (count > totalram_pages)
L
Linus Torvalds 已提交
1583 1584
		return NULL;

1585 1586
	area = get_vm_area_caller((count << PAGE_SHIFT), flags,
					__builtin_return_address(0));
L
Linus Torvalds 已提交
1587 1588
	if (!area)
		return NULL;
1589

1590
	if (map_vm_area(area, prot, pages)) {
L
Linus Torvalds 已提交
1591 1592 1593 1594 1595 1596 1597 1598
		vunmap(area->addr);
		return NULL;
	}

	return area->addr;
}
EXPORT_SYMBOL(vmap);

1599 1600
static void *__vmalloc_node(unsigned long size, unsigned long align,
			    gfp_t gfp_mask, pgprot_t prot,
1601
			    int node, const void *caller);
A
Adrian Bunk 已提交
1602
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1603
				 pgprot_t prot, int node)
L
Linus Torvalds 已提交
1604
{
1605
	const int order = 0;
L
Linus Torvalds 已提交
1606 1607
	struct page **pages;
	unsigned int nr_pages, array_size, i;
1608 1609
	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
L
Linus Torvalds 已提交
1610

1611
	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
1612 1613 1614 1615
	array_size = (nr_pages * sizeof(struct page *));

	area->nr_pages = nr_pages;
	/* Please note that the recursion is strictly bounded. */
1616
	if (array_size > PAGE_SIZE) {
1617
		pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1618
				PAGE_KERNEL, node, area->caller);
1619
	} else {
1620
		pages = kmalloc_node(array_size, nested_gfp, node);
1621
	}
L
Linus Torvalds 已提交
1622 1623 1624 1625 1626 1627 1628 1629
	area->pages = pages;
	if (!area->pages) {
		remove_vm_area(area->addr);
		kfree(area);
		return NULL;
	}

	for (i = 0; i < area->nr_pages; i++) {
1630 1631
		struct page *page;

J
Jianguo Wu 已提交
1632
		if (node == NUMA_NO_NODE)
1633
			page = alloc_kmem_pages(alloc_mask, order);
C
Christoph Lameter 已提交
1634
		else
1635
			page = alloc_kmem_pages_node(node, alloc_mask, order);
1636 1637

		if (unlikely(!page)) {
L
Linus Torvalds 已提交
1638 1639 1640 1641
			/* Successfully allocated i pages, free them in __vunmap() */
			area->nr_pages = i;
			goto fail;
		}
1642
		area->pages[i] = page;
1643
		if (gfpflags_allow_blocking(gfp_mask))
1644
			cond_resched();
L
Linus Torvalds 已提交
1645 1646
	}

1647
	if (map_vm_area(area, prot, pages))
L
Linus Torvalds 已提交
1648 1649 1650 1651
		goto fail;
	return area->addr;

fail:
J
Joe Perches 已提交
1652 1653
	warn_alloc_failed(gfp_mask, order,
			  "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
1654
			  (area->nr_pages*PAGE_SIZE), area->size);
L
Linus Torvalds 已提交
1655 1656 1657 1658 1659
	vfree(area->addr);
	return NULL;
}

/**
1660
 *	__vmalloc_node_range  -  allocate virtually contiguous memory
L
Linus Torvalds 已提交
1661
 *	@size:		allocation size
1662
 *	@align:		desired alignment
1663 1664
 *	@start:		vm area range start
 *	@end:		vm area range end
L
Linus Torvalds 已提交
1665 1666
 *	@gfp_mask:	flags for the page level allocator
 *	@prot:		protection mask for the allocated pages
1667
 *	@vm_flags:	additional vm area flags (e.g. %VM_NO_GUARD)
D
David Rientjes 已提交
1668
 *	@node:		node to use for allocation or NUMA_NO_NODE
1669
 *	@caller:	caller's return address
L
Linus Torvalds 已提交
1670 1671 1672 1673 1674
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator with @gfp_mask flags.  Map them into contiguous
 *	kernel virtual space, using a pagetable protection of @prot.
 */
1675 1676
void *__vmalloc_node_range(unsigned long size, unsigned long align,
			unsigned long start, unsigned long end, gfp_t gfp_mask,
1677 1678
			pgprot_t prot, unsigned long vm_flags, int node,
			const void *caller)
L
Linus Torvalds 已提交
1679 1680
{
	struct vm_struct *area;
1681 1682
	void *addr;
	unsigned long real_size = size;
L
Linus Torvalds 已提交
1683 1684

	size = PAGE_ALIGN(size);
1685
	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1686
		goto fail;
L
Linus Torvalds 已提交
1687

1688 1689
	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
				vm_flags, start, end, node, gfp_mask, caller);
L
Linus Torvalds 已提交
1690
	if (!area)
1691
		goto fail;
L
Linus Torvalds 已提交
1692

1693
	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1694
	if (!addr)
1695
		return NULL;
1696

1697
	/*
1698 1699
	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
	 * flag. It means that vm_struct is not fully initialized.
1700
	 * Now, it is fully initialized, so remove this flag here.
1701
	 */
1702
	clear_vm_uninitialized_flag(area);
1703

1704
	/*
1705 1706 1707
	 * A ref_count = 2 is needed because vm_struct allocated in
	 * __get_vm_area_node() contains a reference to the virtual address of
	 * the vmalloc'ed block.
1708
	 */
1709
	kmemleak_alloc(addr, real_size, 2, gfp_mask);
1710 1711

	return addr;
1712 1713 1714 1715 1716 1717

fail:
	warn_alloc_failed(gfp_mask, 0,
			  "vmalloc: allocation failure: %lu bytes\n",
			  real_size);
	return NULL;
L
Linus Torvalds 已提交
1718 1719
}

1720 1721 1722 1723 1724 1725
/**
 *	__vmalloc_node  -  allocate virtually contiguous memory
 *	@size:		allocation size
 *	@align:		desired alignment
 *	@gfp_mask:	flags for the page level allocator
 *	@prot:		protection mask for the allocated pages
D
David Rientjes 已提交
1726
 *	@node:		node to use for allocation or NUMA_NO_NODE
1727 1728 1729 1730 1731 1732 1733 1734
 *	@caller:	caller's return address
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator with @gfp_mask flags.  Map them into contiguous
 *	kernel virtual space, using a pagetable protection of @prot.
 */
static void *__vmalloc_node(unsigned long size, unsigned long align,
			    gfp_t gfp_mask, pgprot_t prot,
1735
			    int node, const void *caller)
1736 1737
{
	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1738
				gfp_mask, prot, 0, node, caller);
1739 1740
}

C
Christoph Lameter 已提交
1741 1742
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
D
David Rientjes 已提交
1743
	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1744
				__builtin_return_address(0));
C
Christoph Lameter 已提交
1745
}
L
Linus Torvalds 已提交
1746 1747
EXPORT_SYMBOL(__vmalloc);

1748 1749 1750 1751 1752 1753 1754
static inline void *__vmalloc_node_flags(unsigned long size,
					int node, gfp_t flags)
{
	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
					node, __builtin_return_address(0));
}

L
Linus Torvalds 已提交
1755 1756 1757 1758 1759 1760
/**
 *	vmalloc  -  allocate virtually contiguous memory
 *	@size:		allocation size
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *
1761
 *	For tight control over page level allocator and protection flags
L
Linus Torvalds 已提交
1762 1763 1764 1765
 *	use __vmalloc() instead.
 */
void *vmalloc(unsigned long size)
{
D
David Rientjes 已提交
1766 1767
	return __vmalloc_node_flags(size, NUMA_NO_NODE,
				    GFP_KERNEL | __GFP_HIGHMEM);
L
Linus Torvalds 已提交
1768 1769 1770
}
EXPORT_SYMBOL(vmalloc);

1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
/**
 *	vzalloc - allocate virtually contiguous memory with zero fill
 *	@size:	allocation size
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *	The memory allocated is set to zero.
 *
 *	For tight control over page level allocator and protection flags
 *	use __vmalloc() instead.
 */
void *vzalloc(unsigned long size)
{
D
David Rientjes 已提交
1783
	return __vmalloc_node_flags(size, NUMA_NO_NODE,
1784 1785 1786 1787
				GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
}
EXPORT_SYMBOL(vzalloc);

1788
/**
1789 1790
 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
 * @size: allocation size
1791
 *
1792 1793
 * The resulting memory area is zeroed so it can be mapped to userspace
 * without leaking data.
1794 1795 1796 1797 1798 1799
 */
void *vmalloc_user(unsigned long size)
{
	struct vm_struct *area;
	void *ret;

1800 1801
	ret = __vmalloc_node(size, SHMLBA,
			     GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
D
David Rientjes 已提交
1802 1803
			     PAGE_KERNEL, NUMA_NO_NODE,
			     __builtin_return_address(0));
1804
	if (ret) {
N
Nick Piggin 已提交
1805
		area = find_vm_area(ret);
1806 1807
		area->flags |= VM_USERMAP;
	}
1808 1809 1810 1811
	return ret;
}
EXPORT_SYMBOL(vmalloc_user);

C
Christoph Lameter 已提交
1812 1813 1814
/**
 *	vmalloc_node  -  allocate memory on a specific node
 *	@size:		allocation size
1815
 *	@node:		numa node
C
Christoph Lameter 已提交
1816 1817 1818 1819
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *
1820
 *	For tight control over page level allocator and protection flags
C
Christoph Lameter 已提交
1821 1822 1823 1824
 *	use __vmalloc() instead.
 */
void *vmalloc_node(unsigned long size, int node)
{
1825
	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1826
					node, __builtin_return_address(0));
C
Christoph Lameter 已提交
1827 1828 1829
}
EXPORT_SYMBOL(vmalloc_node);

1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
/**
 * vzalloc_node - allocate memory on a specific node with zero fill
 * @size:	allocation size
 * @node:	numa node
 *
 * Allocate enough pages to cover @size from the page level
 * allocator and map them into contiguous kernel virtual space.
 * The memory allocated is set to zero.
 *
 * For tight control over page level allocator and protection flags
 * use __vmalloc_node() instead.
 */
void *vzalloc_node(unsigned long size, int node)
{
	return __vmalloc_node_flags(size, node,
			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
}
EXPORT_SYMBOL(vzalloc_node);

1849 1850 1851 1852
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif

L
Linus Torvalds 已提交
1853 1854 1855 1856 1857 1858 1859 1860
/**
 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
 *	@size:		allocation size
 *
 *	Kernel-internal function to allocate enough pages to cover @size
 *	the page level allocator and map them into contiguous and
 *	executable kernel virtual space.
 *
1861
 *	For tight control over page level allocator and protection flags
L
Linus Torvalds 已提交
1862 1863 1864 1865 1866
 *	use __vmalloc() instead.
 */

void *vmalloc_exec(unsigned long size)
{
1867
	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
D
David Rientjes 已提交
1868
			      NUMA_NO_NODE, __builtin_return_address(0));
L
Linus Torvalds 已提交
1869 1870
}

1871
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1872
#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1873
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1874
#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1875 1876 1877 1878
#else
#define GFP_VMALLOC32 GFP_KERNEL
#endif

L
Linus Torvalds 已提交
1879 1880 1881 1882 1883 1884 1885 1886 1887
/**
 *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 *	@size:		allocation size
 *
 *	Allocate enough 32bit PA addressable pages to cover @size from the
 *	page level allocator and map them into contiguous kernel virtual space.
 */
void *vmalloc_32(unsigned long size)
{
1888
	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
D
David Rientjes 已提交
1889
			      NUMA_NO_NODE, __builtin_return_address(0));
L
Linus Torvalds 已提交
1890 1891 1892
}
EXPORT_SYMBOL(vmalloc_32);

1893
/**
1894
 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1895
 *	@size:		allocation size
1896 1897 1898
 *
 * The resulting memory area is 32bit addressable and zeroed so it can be
 * mapped to userspace without leaking data.
1899 1900 1901 1902 1903 1904
 */
void *vmalloc_32_user(unsigned long size)
{
	struct vm_struct *area;
	void *ret;

1905
	ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
D
David Rientjes 已提交
1906
			     NUMA_NO_NODE, __builtin_return_address(0));
1907
	if (ret) {
N
Nick Piggin 已提交
1908
		area = find_vm_area(ret);
1909 1910
		area->flags |= VM_USERMAP;
	}
1911 1912 1913 1914
	return ret;
}
EXPORT_SYMBOL(vmalloc_32_user);

1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927
/*
 * small helper routine , copy contents to buf from addr.
 * If the page is not present, fill zero.
 */

static int aligned_vread(char *buf, char *addr, unsigned long count)
{
	struct page *p;
	int copied = 0;

	while (count) {
		unsigned long offset, length;

1928
		offset = offset_in_page(addr);
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944
		length = PAGE_SIZE - offset;
		if (length > count)
			length = count;
		p = vmalloc_to_page(addr);
		/*
		 * To do safe access to this _mapped_ area, we need
		 * lock. But adding lock here means that we need to add
		 * overhead of vmalloc()/vfree() calles for this _debug_
		 * interface, rarely used. Instead of that, we'll use
		 * kmap() and get small overhead in this access function.
		 */
		if (p) {
			/*
			 * we can expect USER0 is not used (see vread/vwrite's
			 * function description)
			 */
1945
			void *map = kmap_atomic(p);
1946
			memcpy(buf, map + offset, length);
1947
			kunmap_atomic(map);
1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966
		} else
			memset(buf, 0, length);

		addr += length;
		buf += length;
		copied += length;
		count -= length;
	}
	return copied;
}

static int aligned_vwrite(char *buf, char *addr, unsigned long count)
{
	struct page *p;
	int copied = 0;

	while (count) {
		unsigned long offset, length;

1967
		offset = offset_in_page(addr);
1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
		length = PAGE_SIZE - offset;
		if (length > count)
			length = count;
		p = vmalloc_to_page(addr);
		/*
		 * To do safe access to this _mapped_ area, we need
		 * lock. But adding lock here means that we need to add
		 * overhead of vmalloc()/vfree() calles for this _debug_
		 * interface, rarely used. Instead of that, we'll use
		 * kmap() and get small overhead in this access function.
		 */
		if (p) {
			/*
			 * we can expect USER0 is not used (see vread/vwrite's
			 * function description)
			 */
1984
			void *map = kmap_atomic(p);
1985
			memcpy(map + offset, buf, length);
1986
			kunmap_atomic(map);
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
		}
		addr += length;
		buf += length;
		copied += length;
		count -= length;
	}
	return copied;
}

/**
 *	vread() -  read vmalloc area in a safe way.
 *	@buf:		buffer for reading data
 *	@addr:		vm address.
 *	@count:		number of bytes to be read.
 *
 *	Returns # of bytes which addr and buf should be increased.
 *	(same number to @count). Returns 0 if [addr...addr+count) doesn't
 *	includes any intersect with alive vmalloc area.
 *
 *	This function checks that addr is a valid vmalloc'ed area, and
 *	copy data from that area to a given buffer. If the given memory range
 *	of [addr...addr+count) includes some valid address, data is copied to
 *	proper area of @buf. If there are memory holes, they'll be zero-filled.
 *	IOREMAP area is treated as memory hole and no copy is done.
 *
 *	If [addr...addr+count) doesn't includes any intersects with alive
2013
 *	vm_struct area, returns 0. @buf should be kernel's buffer.
2014 2015 2016 2017 2018 2019 2020 2021
 *
 *	Note: In usual ops, vread() is never necessary because the caller
 *	should know vmalloc() area is valid and can use memcpy().
 *	This is for routines which have to access vmalloc area without
 *	any informaion, as /dev/kmem.
 *
 */

L
Linus Torvalds 已提交
2022 2023
long vread(char *buf, char *addr, unsigned long count)
{
2024 2025
	struct vmap_area *va;
	struct vm_struct *vm;
L
Linus Torvalds 已提交
2026
	char *vaddr, *buf_start = buf;
2027
	unsigned long buflen = count;
L
Linus Torvalds 已提交
2028 2029 2030 2031 2032 2033
	unsigned long n;

	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
	spin_lock(&vmap_area_lock);
	list_for_each_entry(va, &vmap_area_list, list) {
		if (!count)
			break;

		if (!(va->flags & VM_VM_AREA))
			continue;

		vm = va->vm;
		vaddr = (char *) vm->addr;
2044
		if (addr >= vaddr + get_vm_area_size(vm))
L
Linus Torvalds 已提交
2045 2046 2047 2048 2049 2050 2051 2052 2053
			continue;
		while (addr < vaddr) {
			if (count == 0)
				goto finished;
			*buf = '\0';
			buf++;
			addr++;
			count--;
		}
2054
		n = vaddr + get_vm_area_size(vm) - addr;
2055 2056
		if (n > count)
			n = count;
2057
		if (!(vm->flags & VM_IOREMAP))
2058 2059 2060 2061 2062 2063
			aligned_vread(buf, addr, n);
		else /* IOREMAP area is treated as memory hole */
			memset(buf, 0, n);
		buf += n;
		addr += n;
		count -= n;
L
Linus Torvalds 已提交
2064 2065
	}
finished:
2066
	spin_unlock(&vmap_area_lock);
2067 2068 2069 2070 2071 2072 2073 2074

	if (buf == buf_start)
		return 0;
	/* zero-fill memory holes */
	if (buf != buf_start + buflen)
		memset(buf, 0, buflen - (buf - buf_start));

	return buflen;
L
Linus Torvalds 已提交
2075 2076
}

2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
/**
 *	vwrite() -  write vmalloc area in a safe way.
 *	@buf:		buffer for source data
 *	@addr:		vm address.
 *	@count:		number of bytes to be read.
 *
 *	Returns # of bytes which addr and buf should be incresed.
 *	(same number to @count).
 *	If [addr...addr+count) doesn't includes any intersect with valid
 *	vmalloc area, returns 0.
 *
 *	This function checks that addr is a valid vmalloc'ed area, and
 *	copy data from a buffer to the given addr. If specified range of
 *	[addr...addr+count) includes some valid address, data is copied from
 *	proper area of @buf. If there are memory holes, no copy to hole.
 *	IOREMAP area is treated as memory hole and no copy is done.
 *
 *	If [addr...addr+count) doesn't includes any intersects with alive
2095
 *	vm_struct area, returns 0. @buf should be kernel's buffer.
2096 2097 2098 2099 2100 2101 2102
 *
 *	Note: In usual ops, vwrite() is never necessary because the caller
 *	should know vmalloc() area is valid and can use memcpy().
 *	This is for routines which have to access vmalloc area without
 *	any informaion, as /dev/kmem.
 */

L
Linus Torvalds 已提交
2103 2104
long vwrite(char *buf, char *addr, unsigned long count)
{
2105 2106
	struct vmap_area *va;
	struct vm_struct *vm;
2107 2108 2109
	char *vaddr;
	unsigned long n, buflen;
	int copied = 0;
L
Linus Torvalds 已提交
2110 2111 2112 2113

	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;
2114
	buflen = count;
L
Linus Torvalds 已提交
2115

2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
	spin_lock(&vmap_area_lock);
	list_for_each_entry(va, &vmap_area_list, list) {
		if (!count)
			break;

		if (!(va->flags & VM_VM_AREA))
			continue;

		vm = va->vm;
		vaddr = (char *) vm->addr;
2126
		if (addr >= vaddr + get_vm_area_size(vm))
L
Linus Torvalds 已提交
2127 2128 2129 2130 2131 2132 2133 2134
			continue;
		while (addr < vaddr) {
			if (count == 0)
				goto finished;
			buf++;
			addr++;
			count--;
		}
2135
		n = vaddr + get_vm_area_size(vm) - addr;
2136 2137
		if (n > count)
			n = count;
2138
		if (!(vm->flags & VM_IOREMAP)) {
2139 2140 2141 2142 2143 2144
			aligned_vwrite(buf, addr, n);
			copied++;
		}
		buf += n;
		addr += n;
		count -= n;
L
Linus Torvalds 已提交
2145 2146
	}
finished:
2147
	spin_unlock(&vmap_area_lock);
2148 2149 2150
	if (!copied)
		return 0;
	return buflen;
L
Linus Torvalds 已提交
2151
}
2152 2153

/**
2154 2155 2156 2157 2158
 *	remap_vmalloc_range_partial  -  map vmalloc pages to userspace
 *	@vma:		vma to cover
 *	@uaddr:		target user address to start at
 *	@kaddr:		virtual address of vmalloc kernel memory
 *	@size:		size of map area
2159 2160
 *
 *	Returns:	0 for success, -Exxx on failure
2161
 *
2162 2163 2164 2165
 *	This function checks that @kaddr is a valid vmalloc'ed area,
 *	and that it is big enough to cover the range starting at
 *	@uaddr in @vma. Will return failure if that criteria isn't
 *	met.
2166
 *
2167
 *	Similar to remap_pfn_range() (see mm/memory.c)
2168
 */
2169 2170
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
				void *kaddr, unsigned long size)
2171 2172 2173
{
	struct vm_struct *area;

2174 2175 2176
	size = PAGE_ALIGN(size);

	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2177 2178
		return -EINVAL;

2179
	area = find_vm_area(kaddr);
2180
	if (!area)
N
Nick Piggin 已提交
2181
		return -EINVAL;
2182 2183

	if (!(area->flags & VM_USERMAP))
N
Nick Piggin 已提交
2184
		return -EINVAL;
2185

2186
	if (kaddr + size > area->addr + area->size)
N
Nick Piggin 已提交
2187
		return -EINVAL;
2188 2189

	do {
2190
		struct page *page = vmalloc_to_page(kaddr);
N
Nick Piggin 已提交
2191 2192
		int ret;

2193 2194 2195 2196 2197
		ret = vm_insert_page(vma, uaddr, page);
		if (ret)
			return ret;

		uaddr += PAGE_SIZE;
2198 2199 2200
		kaddr += PAGE_SIZE;
		size -= PAGE_SIZE;
	} while (size > 0);
2201

2202
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2203

N
Nick Piggin 已提交
2204
	return 0;
2205
}
2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228
EXPORT_SYMBOL(remap_vmalloc_range_partial);

/**
 *	remap_vmalloc_range  -  map vmalloc pages to userspace
 *	@vma:		vma to cover (map full range of vma)
 *	@addr:		vmalloc memory
 *	@pgoff:		number of pages into addr before first page to map
 *
 *	Returns:	0 for success, -Exxx on failure
 *
 *	This function checks that addr is a valid vmalloc'ed area, and
 *	that it is big enough to cover the vma. Will return failure if
 *	that criteria isn't met.
 *
 *	Similar to remap_pfn_range() (see mm/memory.c)
 */
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
						unsigned long pgoff)
{
	return remap_vmalloc_range_partial(vma, vma->vm_start,
					   addr + (pgoff << PAGE_SHIFT),
					   vma->vm_end - vma->vm_start);
}
2229 2230
EXPORT_SYMBOL(remap_vmalloc_range);

2231 2232 2233 2234
/*
 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 * have one.
 */
2235
void __weak vmalloc_sync_all(void)
2236 2237
{
}
2238 2239


2240
static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2241
{
2242 2243 2244 2245 2246 2247
	pte_t ***p = data;

	if (p) {
		*(*p) = pte;
		(*p)++;
	}
2248 2249 2250 2251 2252 2253
	return 0;
}

/**
 *	alloc_vm_area - allocate a range of kernel address space
 *	@size:		size of the area
2254
 *	@ptes:		returns the PTEs for the address space
2255 2256
 *
 *	Returns:	NULL on failure, vm_struct on success
2257 2258 2259
 *
 *	This function reserves a range of kernel address space, and
 *	allocates pagetables to map that range.  No actual mappings
2260 2261 2262 2263
 *	are created.
 *
 *	If @ptes is non-NULL, pointers to the PTEs (in init_mm)
 *	allocated for the VM area are returned.
2264
 */
2265
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2266 2267 2268
{
	struct vm_struct *area;

2269 2270
	area = get_vm_area_caller(size, VM_IOREMAP,
				__builtin_return_address(0));
2271 2272 2273 2274 2275 2276 2277 2278
	if (area == NULL)
		return NULL;

	/*
	 * This ensures that page tables are constructed for this region
	 * of kernel virtual address space and mapped into init_mm.
	 */
	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2279
				size, f, ptes ? &ptes : NULL)) {
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
		free_vm_area(area);
		return NULL;
	}

	return area;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);

void free_vm_area(struct vm_struct *area)
{
	struct vm_struct *ret;
	ret = remove_vm_area(area->addr);
	BUG_ON(ret != area);
	kfree(area);
}
EXPORT_SYMBOL_GPL(free_vm_area);
2296

2297
#ifdef CONFIG_SMP
2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
static struct vmap_area *node_to_va(struct rb_node *n)
{
	return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
}

/**
 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
 * @end: target address
 * @pnext: out arg for the next vmap_area
 * @pprev: out arg for the previous vmap_area
 *
 * Returns: %true if either or both of next and prev are found,
 *	    %false if no vmap_area exists
 *
 * Find vmap_areas end addresses of which enclose @end.  ie. if not
 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
 */
static bool pvm_find_next_prev(unsigned long end,
			       struct vmap_area **pnext,
			       struct vmap_area **pprev)
{
	struct rb_node *n = vmap_area_root.rb_node;
	struct vmap_area *va = NULL;

	while (n) {
		va = rb_entry(n, struct vmap_area, rb_node);
		if (end < va->va_end)
			n = n->rb_left;
		else if (end > va->va_end)
			n = n->rb_right;
		else
			break;
	}

	if (!va)
		return false;

	if (va->va_end > end) {
		*pnext = va;
		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
	} else {
		*pprev = va;
		*pnext = node_to_va(rb_next(&(*pprev)->rb_node));
	}
	return true;
}

/**
 * pvm_determine_end - find the highest aligned address between two vmap_areas
 * @pnext: in/out arg for the next vmap_area
 * @pprev: in/out arg for the previous vmap_area
 * @align: alignment
 *
 * Returns: determined end address
 *
 * Find the highest aligned address between *@pnext and *@pprev below
 * VMALLOC_END.  *@pnext and *@pprev are adjusted so that the aligned
 * down address is between the end addresses of the two vmap_areas.
 *
 * Please note that the address returned by this function may fall
 * inside *@pnext vmap_area.  The caller is responsible for checking
 * that.
 */
static unsigned long pvm_determine_end(struct vmap_area **pnext,
				       struct vmap_area **pprev,
				       unsigned long align)
{
	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
	unsigned long addr;

	if (*pnext)
		addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
	else
		addr = vmalloc_end;

	while (*pprev && (*pprev)->va_end > addr) {
		*pnext = *pprev;
		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
	}

	return addr;
}

/**
 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
 * @offsets: array containing offset of each area
 * @sizes: array containing size of each area
 * @nr_vms: the number of areas to allocate
 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
 *
 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
 *	    vm_structs on success, %NULL on failure
 *
 * Percpu allocator wants to use congruent vm areas so that it can
 * maintain the offsets among percpu areas.  This function allocates
2393 2394 2395 2396
 * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
 * be scattered pretty far, distance between two areas easily going up
 * to gigabytes.  To avoid interacting with regular vmallocs, these
 * areas are allocated from top.
2397 2398 2399 2400 2401 2402 2403 2404 2405 2406
 *
 * Despite its complicated look, this allocator is rather simple.  It
 * does everything top-down and scans areas from the end looking for
 * matching slot.  While scanning, if any of the areas overlaps with
 * existing vmap_area, the base address is pulled down to fit the
 * area.  Scanning is repeated till all the areas fit and then all
 * necessary data structres are inserted and the result is returned.
 */
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
				     const size_t *sizes, int nr_vms,
2407
				     size_t align)
2408 2409 2410 2411 2412 2413 2414 2415 2416 2417
{
	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
	struct vmap_area **vas, *prev, *next;
	struct vm_struct **vms;
	int area, area2, last_area, term_area;
	unsigned long base, start, end, last_end;
	bool purged = false;

	/* verify parameters and allocate data structures */
2418
	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448
	for (last_area = 0, area = 0; area < nr_vms; area++) {
		start = offsets[area];
		end = start + sizes[area];

		/* is everything aligned properly? */
		BUG_ON(!IS_ALIGNED(offsets[area], align));
		BUG_ON(!IS_ALIGNED(sizes[area], align));

		/* detect the area with the highest address */
		if (start > offsets[last_area])
			last_area = area;

		for (area2 = 0; area2 < nr_vms; area2++) {
			unsigned long start2 = offsets[area2];
			unsigned long end2 = start2 + sizes[area2];

			if (area2 == area)
				continue;

			BUG_ON(start2 >= start && start2 < end);
			BUG_ON(end2 <= end && end2 > start);
		}
	}
	last_end = offsets[last_area] + sizes[last_area];

	if (vmalloc_end - vmalloc_start < last_end) {
		WARN_ON(true);
		return NULL;
	}

2449 2450
	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2451
	if (!vas || !vms)
2452
		goto err_free2;
2453 2454

	for (area = 0; area < nr_vms; area++) {
2455 2456
		vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541
		if (!vas[area] || !vms[area])
			goto err_free;
	}
retry:
	spin_lock(&vmap_area_lock);

	/* start scanning - we scan from the top, begin with the last area */
	area = term_area = last_area;
	start = offsets[area];
	end = start + sizes[area];

	if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
		base = vmalloc_end - last_end;
		goto found;
	}
	base = pvm_determine_end(&next, &prev, align) - end;

	while (true) {
		BUG_ON(next && next->va_end <= base + end);
		BUG_ON(prev && prev->va_end > base + end);

		/*
		 * base might have underflowed, add last_end before
		 * comparing.
		 */
		if (base + last_end < vmalloc_start + last_end) {
			spin_unlock(&vmap_area_lock);
			if (!purged) {
				purge_vmap_area_lazy();
				purged = true;
				goto retry;
			}
			goto err_free;
		}

		/*
		 * If next overlaps, move base downwards so that it's
		 * right below next and then recheck.
		 */
		if (next && next->va_start < base + end) {
			base = pvm_determine_end(&next, &prev, align) - end;
			term_area = area;
			continue;
		}

		/*
		 * If prev overlaps, shift down next and prev and move
		 * base so that it's right below new next and then
		 * recheck.
		 */
		if (prev && prev->va_end > base + start)  {
			next = prev;
			prev = node_to_va(rb_prev(&next->rb_node));
			base = pvm_determine_end(&next, &prev, align) - end;
			term_area = area;
			continue;
		}

		/*
		 * This area fits, move on to the previous one.  If
		 * the previous one is the terminal one, we're done.
		 */
		area = (area + nr_vms - 1) % nr_vms;
		if (area == term_area)
			break;
		start = offsets[area];
		end = start + sizes[area];
		pvm_find_next_prev(base + end, &next, &prev);
	}
found:
	/* we've found a fitting base, insert all va's */
	for (area = 0; area < nr_vms; area++) {
		struct vmap_area *va = vas[area];

		va->va_start = base + offsets[area];
		va->va_end = va->va_start + sizes[area];
		__insert_vmap_area(va);
	}

	vmap_area_pcpu_hole = base + offsets[last_area];

	spin_unlock(&vmap_area_lock);

	/* insert all vm's */
	for (area = 0; area < nr_vms; area++)
2542 2543
		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
				 pcpu_get_vm_areas);
2544 2545 2546 2547 2548 2549

	kfree(vas);
	return vms;

err_free:
	for (area = 0; area < nr_vms; area++) {
2550 2551
		kfree(vas[area]);
		kfree(vms[area]);
2552
	}
2553
err_free2:
2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573
	kfree(vas);
	kfree(vms);
	return NULL;
}

/**
 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
 * @nr_vms: the number of allocated areas
 *
 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
 */
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
{
	int i;

	for (i = 0; i < nr_vms; i++)
		free_vm_area(vms[i]);
	kfree(vms);
}
2574
#endif	/* CONFIG_SMP */
2575 2576 2577

#ifdef CONFIG_PROC_FS
static void *s_start(struct seq_file *m, loff_t *pos)
2578
	__acquires(&vmap_area_lock)
2579 2580
{
	loff_t n = *pos;
2581
	struct vmap_area *va;
2582

2583
	spin_lock(&vmap_area_lock);
2584
	va = list_first_entry(&vmap_area_list, typeof(*va), list);
2585
	while (n > 0 && &va->list != &vmap_area_list) {
2586
		n--;
2587
		va = list_next_entry(va, list);
2588
	}
2589 2590
	if (!n && &va->list != &vmap_area_list)
		return va;
2591 2592 2593 2594 2595 2596 2597

	return NULL;

}

static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
2598
	struct vmap_area *va = p, *next;
2599 2600

	++*pos;
2601
	next = list_next_entry(va, list);
2602 2603 2604 2605
	if (&next->list != &vmap_area_list)
		return next;

	return NULL;
2606 2607 2608
}

static void s_stop(struct seq_file *m, void *p)
2609
	__releases(&vmap_area_lock)
2610
{
2611
	spin_unlock(&vmap_area_lock);
2612 2613
}

E
Eric Dumazet 已提交
2614 2615
static void show_numa_info(struct seq_file *m, struct vm_struct *v)
{
2616
	if (IS_ENABLED(CONFIG_NUMA)) {
E
Eric Dumazet 已提交
2617 2618 2619 2620 2621
		unsigned int nr, *counters = m->private;

		if (!counters)
			return;

2622 2623
		if (v->flags & VM_UNINITIALIZED)
			return;
2624 2625
		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
		smp_rmb();
2626

E
Eric Dumazet 已提交
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
		memset(counters, 0, nr_node_ids * sizeof(unsigned int));

		for (nr = 0; nr < v->nr_pages; nr++)
			counters[page_to_nid(v->pages[nr])]++;

		for_each_node_state(nr, N_HIGH_MEMORY)
			if (counters[nr])
				seq_printf(m, " N%u=%u", nr, counters[nr]);
	}
}

2638 2639
static int s_show(struct seq_file *m, void *p)
{
2640 2641 2642
	struct vmap_area *va = p;
	struct vm_struct *v;

2643 2644 2645 2646 2647
	/*
	 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
	 * behalf of vmap area is being tear down or vm_map_ram allocation.
	 */
	if (!(va->flags & VM_VM_AREA))
2648 2649 2650
		return 0;

	v = va->vm;
2651

K
Kees Cook 已提交
2652
	seq_printf(m, "0x%pK-0x%pK %7ld",
2653 2654
		v->addr, v->addr + v->size, v->size);

J
Joe Perches 已提交
2655 2656
	if (v->caller)
		seq_printf(m, " %pS", v->caller);
2657

2658 2659 2660 2661
	if (v->nr_pages)
		seq_printf(m, " pages=%d", v->nr_pages);

	if (v->phys_addr)
2662
		seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
2663 2664

	if (v->flags & VM_IOREMAP)
2665
		seq_puts(m, " ioremap");
2666 2667

	if (v->flags & VM_ALLOC)
2668
		seq_puts(m, " vmalloc");
2669 2670

	if (v->flags & VM_MAP)
2671
		seq_puts(m, " vmap");
2672 2673

	if (v->flags & VM_USERMAP)
2674
		seq_puts(m, " user");
2675

D
David Rientjes 已提交
2676
	if (is_vmalloc_addr(v->pages))
2677
		seq_puts(m, " vpages");
2678

E
Eric Dumazet 已提交
2679
	show_numa_info(m, v);
2680 2681 2682 2683
	seq_putc(m, '\n');
	return 0;
}

2684
static const struct seq_operations vmalloc_op = {
2685 2686 2687 2688 2689
	.start = s_start,
	.next = s_next,
	.stop = s_stop,
	.show = s_show,
};
2690 2691 2692

static int vmalloc_open(struct inode *inode, struct file *file)
{
2693 2694 2695 2696 2697
	if (IS_ENABLED(CONFIG_NUMA))
		return seq_open_private(file, &vmalloc_op,
					nr_node_ids * sizeof(unsigned int));
	else
		return seq_open(file, &vmalloc_op);
2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712
}

static const struct file_operations proc_vmalloc_operations = {
	.open		= vmalloc_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

static int __init proc_vmalloc_init(void)
{
	proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
	return 0;
}
module_init(proc_vmalloc_init);
2713

2714 2715
#endif