vmalloc.c 70.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *  linux/mm/vmalloc.c
 *
 *  Copyright (C) 1993  Linus Torvalds
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
C
Christoph Lameter 已提交
8
 *  Numa awareness, Christoph Lameter, SGI, June 2005
L
Linus Torvalds 已提交
9 10
 */

N
Nick Piggin 已提交
11
#include <linux/vmalloc.h>
L
Linus Torvalds 已提交
12 13 14
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/highmem.h>
15
#include <linux/sched/signal.h>
L
Linus Torvalds 已提交
16 17 18
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
19
#include <linux/proc_fs.h>
20
#include <linux/seq_file.h>
21
#include <linux/debugobjects.h>
22
#include <linux/kallsyms.h>
N
Nick Piggin 已提交
23
#include <linux/list.h>
24
#include <linux/notifier.h>
N
Nick Piggin 已提交
25 26 27
#include <linux/rbtree.h>
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
28
#include <linux/pfn.h>
29
#include <linux/kmemleak.h>
A
Arun Sharma 已提交
30
#include <linux/atomic.h>
31
#include <linux/compiler.h>
32
#include <linux/llist.h>
33
#include <linux/bitops.h>
34

35
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
36
#include <asm/tlbflush.h>
37
#include <asm/shmparam.h>
L
Linus Torvalds 已提交
38

39 40
#include "internal.h"

41 42 43 44 45 46 47 48 49 50 51
struct vfree_deferred {
	struct llist_head list;
	struct work_struct wq;
};
static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);

static void __vunmap(const void *, int);

static void free_work(struct work_struct *w)
{
	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
52 53 54 55
	struct llist_node *t, *llnode;

	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
		__vunmap((void *)llnode, 1);
56 57
}

N
Nick Piggin 已提交
58
/*** Page table manipulation functions ***/
A
Adrian Bunk 已提交
59

L
Linus Torvalds 已提交
60 61 62 63 64 65 66 67 68 69 70
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
	pte_t *pte;

	pte = pte_offset_kernel(pmd, addr);
	do {
		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
	} while (pte++, addr += PAGE_SIZE, addr != end);
}

N
Nick Piggin 已提交
71
static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
L
Linus Torvalds 已提交
72 73 74 75 76 77 78
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
79 80
		if (pmd_clear_huge(pmd))
			continue;
L
Linus Torvalds 已提交
81 82 83 84 85 86
		if (pmd_none_or_clear_bad(pmd))
			continue;
		vunmap_pte_range(pmd, addr, next);
	} while (pmd++, addr = next, addr != end);
}

87
static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
L
Linus Torvalds 已提交
88 89 90 91
{
	pud_t *pud;
	unsigned long next;

92
	pud = pud_offset(p4d, addr);
L
Linus Torvalds 已提交
93 94
	do {
		next = pud_addr_end(addr, end);
95 96
		if (pud_clear_huge(pud))
			continue;
L
Linus Torvalds 已提交
97 98 99 100 101 102
		if (pud_none_or_clear_bad(pud))
			continue;
		vunmap_pmd_range(pud, addr, next);
	} while (pud++, addr = next, addr != end);
}

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
{
	p4d_t *p4d;
	unsigned long next;

	p4d = p4d_offset(pgd, addr);
	do {
		next = p4d_addr_end(addr, end);
		if (p4d_clear_huge(p4d))
			continue;
		if (p4d_none_or_clear_bad(p4d))
			continue;
		vunmap_pud_range(p4d, addr, next);
	} while (p4d++, addr = next, addr != end);
}

N
Nick Piggin 已提交
119
static void vunmap_page_range(unsigned long addr, unsigned long end)
L
Linus Torvalds 已提交
120 121 122 123 124 125 126 127 128 129
{
	pgd_t *pgd;
	unsigned long next;

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
130
		vunmap_p4d_range(pgd, addr, next);
L
Linus Torvalds 已提交
131 132 133 134
	} while (pgd++, addr = next, addr != end);
}

static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
N
Nick Piggin 已提交
135
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
L
Linus Torvalds 已提交
136 137 138
{
	pte_t *pte;

N
Nick Piggin 已提交
139 140 141 142 143
	/*
	 * nr is a running index into the array which helps higher level
	 * callers keep track of where we're up to.
	 */

H
Hugh Dickins 已提交
144
	pte = pte_alloc_kernel(pmd, addr);
L
Linus Torvalds 已提交
145 146 147
	if (!pte)
		return -ENOMEM;
	do {
N
Nick Piggin 已提交
148 149 150 151 152
		struct page *page = pages[*nr];

		if (WARN_ON(!pte_none(*pte)))
			return -EBUSY;
		if (WARN_ON(!page))
L
Linus Torvalds 已提交
153 154
			return -ENOMEM;
		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
N
Nick Piggin 已提交
155
		(*nr)++;
L
Linus Torvalds 已提交
156 157 158 159
	} while (pte++, addr += PAGE_SIZE, addr != end);
	return 0;
}

N
Nick Piggin 已提交
160 161
static int vmap_pmd_range(pud_t *pud, unsigned long addr,
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
L
Linus Torvalds 已提交
162 163 164 165 166 167 168 169 170
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_alloc(&init_mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);
N
Nick Piggin 已提交
171
		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
L
Linus Torvalds 已提交
172 173 174 175 176
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

177
static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
N
Nick Piggin 已提交
178
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
L
Linus Torvalds 已提交
179 180 181 182
{
	pud_t *pud;
	unsigned long next;

183
	pud = pud_alloc(&init_mm, p4d, addr);
L
Linus Torvalds 已提交
184 185 186 187
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);
N
Nick Piggin 已提交
188
		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
L
Linus Torvalds 已提交
189 190 191 192 193
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
{
	p4d_t *p4d;
	unsigned long next;

	p4d = p4d_alloc(&init_mm, pgd, addr);
	if (!p4d)
		return -ENOMEM;
	do {
		next = p4d_addr_end(addr, end);
		if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
			return -ENOMEM;
	} while (p4d++, addr = next, addr != end);
	return 0;
}

N
Nick Piggin 已提交
211 212 213 214 215 216
/*
 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
 * will have pfns corresponding to the "pages" array.
 *
 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
 */
217 218
static int vmap_page_range_noflush(unsigned long start, unsigned long end,
				   pgprot_t prot, struct page **pages)
L
Linus Torvalds 已提交
219 220 221
{
	pgd_t *pgd;
	unsigned long next;
222
	unsigned long addr = start;
N
Nick Piggin 已提交
223 224
	int err = 0;
	int nr = 0;
L
Linus Torvalds 已提交
225 226 227 228 229

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
230
		err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
L
Linus Torvalds 已提交
231
		if (err)
232
			return err;
L
Linus Torvalds 已提交
233
	} while (pgd++, addr = next, addr != end);
N
Nick Piggin 已提交
234 235

	return nr;
L
Linus Torvalds 已提交
236 237
}

238 239 240 241 242 243 244 245 246 247
static int vmap_page_range(unsigned long start, unsigned long end,
			   pgprot_t prot, struct page **pages)
{
	int ret;

	ret = vmap_page_range_noflush(start, end, prot, pages);
	flush_cache_vmap(start, end);
	return ret;
}

248
int is_vmalloc_or_module_addr(const void *x)
249 250
{
	/*
251
	 * ARM, x86-64 and sparc64 put modules in a special place,
252 253 254 255 256 257 258 259 260 261 262
	 * and fall back on vmalloc() if that fails. Others
	 * just put it in the vmalloc space.
	 */
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
	unsigned long addr = (unsigned long)x;
	if (addr >= MODULES_VADDR && addr < MODULES_END)
		return 1;
#endif
	return is_vmalloc_addr(x);
}

263
/*
264
 * Walk a vmap address to the struct page it maps.
265
 */
266
struct page *vmalloc_to_page(const void *vmalloc_addr)
267 268
{
	unsigned long addr = (unsigned long) vmalloc_addr;
269
	struct page *page = NULL;
270
	pgd_t *pgd = pgd_offset_k(addr);
271 272 273 274
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep, pte;
275

276 277 278 279
	/*
	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
	 * architectures that do not vmalloc module space
	 */
280
	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
J
Jiri Slaby 已提交
281

282 283 284 285 286 287
	if (pgd_none(*pgd))
		return NULL;
	p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d))
		return NULL;
	pud = pud_offset(p4d, addr);
288 289 290 291 292 293 294 295 296 297 298

	/*
	 * Don't dereference bad PUD or PMD (below) entries. This will also
	 * identify huge mappings, which we may encounter on architectures
	 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
	 * identified as vmalloc addresses by is_vmalloc_addr(), but are
	 * not [unambiguously] associated with a struct page, so there is
	 * no correct value to return for them.
	 */
	WARN_ON_ONCE(pud_bad(*pud));
	if (pud_none(*pud) || pud_bad(*pud))
299 300
		return NULL;
	pmd = pmd_offset(pud, addr);
301 302
	WARN_ON_ONCE(pmd_bad(*pmd));
	if (pmd_none(*pmd) || pmd_bad(*pmd))
303 304 305 306 307 308 309
		return NULL;

	ptep = pte_offset_map(pmd, addr);
	pte = *ptep;
	if (pte_present(pte))
		page = pte_page(pte);
	pte_unmap(ptep);
310
	return page;
311
}
312
EXPORT_SYMBOL(vmalloc_to_page);
313 314

/*
315
 * Map a vmalloc()-space virtual address to the physical page frame number.
316
 */
317
unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
318
{
319
	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
320
}
321
EXPORT_SYMBOL(vmalloc_to_pfn);
322

N
Nick Piggin 已提交
323 324 325

/*** Global kva allocator ***/

326
#define VM_LAZY_FREE	0x02
N
Nick Piggin 已提交
327 328 329
#define VM_VM_AREA	0x04

static DEFINE_SPINLOCK(vmap_area_lock);
330 331
/* Export for kexec only */
LIST_HEAD(vmap_area_list);
332
static LLIST_HEAD(vmap_purge_list);
N
Nick Piggin 已提交
333 334 335 336 337 338 339 340
static struct rb_root vmap_area_root = RB_ROOT;

/* The vmap cache globals are protected by vmap_area_lock */
static struct rb_node *free_vmap_cache;
static unsigned long cached_hole_size;
static unsigned long cached_vstart;
static unsigned long cached_align;

341
static unsigned long vmap_area_pcpu_hole;
N
Nick Piggin 已提交
342 343

static struct vmap_area *__find_vmap_area(unsigned long addr)
L
Linus Torvalds 已提交
344
{
N
Nick Piggin 已提交
345 346 347 348 349 350 351 352
	struct rb_node *n = vmap_area_root.rb_node;

	while (n) {
		struct vmap_area *va;

		va = rb_entry(n, struct vmap_area, rb_node);
		if (addr < va->va_start)
			n = n->rb_left;
353
		else if (addr >= va->va_end)
N
Nick Piggin 已提交
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
			n = n->rb_right;
		else
			return va;
	}

	return NULL;
}

static void __insert_vmap_area(struct vmap_area *va)
{
	struct rb_node **p = &vmap_area_root.rb_node;
	struct rb_node *parent = NULL;
	struct rb_node *tmp;

	while (*p) {
369
		struct vmap_area *tmp_va;
N
Nick Piggin 已提交
370 371

		parent = *p;
372 373
		tmp_va = rb_entry(parent, struct vmap_area, rb_node);
		if (va->va_start < tmp_va->va_end)
N
Nick Piggin 已提交
374
			p = &(*p)->rb_left;
375
		else if (va->va_end > tmp_va->va_start)
N
Nick Piggin 已提交
376 377 378 379 380 381 382 383
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&va->rb_node, parent, p);
	rb_insert_color(&va->rb_node, &vmap_area_root);

384
	/* address-sort this list */
N
Nick Piggin 已提交
385 386 387 388 389 390 391 392 393 394 395
	tmp = rb_prev(&va->rb_node);
	if (tmp) {
		struct vmap_area *prev;
		prev = rb_entry(tmp, struct vmap_area, rb_node);
		list_add_rcu(&va->list, &prev->list);
	} else
		list_add_rcu(&va->list, &vmap_area_list);
}

static void purge_vmap_area_lazy(void);

396 397
static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);

N
Nick Piggin 已提交
398 399 400 401 402 403 404 405 406 407 408
/*
 * Allocate a region of KVA of the specified size and alignment, within the
 * vstart and vend.
 */
static struct vmap_area *alloc_vmap_area(unsigned long size,
				unsigned long align,
				unsigned long vstart, unsigned long vend,
				int node, gfp_t gfp_mask)
{
	struct vmap_area *va;
	struct rb_node *n;
L
Linus Torvalds 已提交
409
	unsigned long addr;
N
Nick Piggin 已提交
410
	int purged = 0;
N
Nick Piggin 已提交
411
	struct vmap_area *first;
N
Nick Piggin 已提交
412

N
Nick Piggin 已提交
413
	BUG_ON(!size);
414
	BUG_ON(offset_in_page(size));
N
Nick Piggin 已提交
415
	BUG_ON(!is_power_of_2(align));
N
Nick Piggin 已提交
416

417
	might_sleep();
418

N
Nick Piggin 已提交
419 420 421 422 423
	va = kmalloc_node(sizeof(struct vmap_area),
			gfp_mask & GFP_RECLAIM_MASK, node);
	if (unlikely(!va))
		return ERR_PTR(-ENOMEM);

424 425 426 427 428 429
	/*
	 * Only scan the relevant parts containing pointers to other objects
	 * to avoid false negatives.
	 */
	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);

N
Nick Piggin 已提交
430 431
retry:
	spin_lock(&vmap_area_lock);
N
Nick Piggin 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
	/*
	 * Invalidate cache if we have more permissive parameters.
	 * cached_hole_size notes the largest hole noticed _below_
	 * the vmap_area cached in free_vmap_cache: if size fits
	 * into that hole, we want to scan from vstart to reuse
	 * the hole instead of allocating above free_vmap_cache.
	 * Note that __free_vmap_area may update free_vmap_cache
	 * without updating cached_hole_size or cached_align.
	 */
	if (!free_vmap_cache ||
			size < cached_hole_size ||
			vstart < cached_vstart ||
			align < cached_align) {
nocache:
		cached_hole_size = 0;
		free_vmap_cache = NULL;
	}
	/* record if we encounter less permissive parameters */
	cached_vstart = vstart;
	cached_align = align;

	/* find starting point for our search */
	if (free_vmap_cache) {
		first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
456
		addr = ALIGN(first->va_end, align);
N
Nick Piggin 已提交
457 458
		if (addr < vstart)
			goto nocache;
459
		if (addr + size < addr)
N
Nick Piggin 已提交
460 461 462 463
			goto overflow;

	} else {
		addr = ALIGN(vstart, align);
464
		if (addr + size < addr)
N
Nick Piggin 已提交
465 466 467 468 469 470
			goto overflow;

		n = vmap_area_root.rb_node;
		first = NULL;

		while (n) {
N
Nick Piggin 已提交
471 472 473 474
			struct vmap_area *tmp;
			tmp = rb_entry(n, struct vmap_area, rb_node);
			if (tmp->va_end >= addr) {
				first = tmp;
N
Nick Piggin 已提交
475 476 477 478
				if (tmp->va_start <= addr)
					break;
				n = n->rb_left;
			} else
N
Nick Piggin 已提交
479
				n = n->rb_right;
N
Nick Piggin 已提交
480
		}
N
Nick Piggin 已提交
481 482 483 484

		if (!first)
			goto found;
	}
N
Nick Piggin 已提交
485 486

	/* from the starting point, walk areas until a suitable hole is found */
487
	while (addr + size > first->va_start && addr + size <= vend) {
N
Nick Piggin 已提交
488 489
		if (addr + cached_hole_size < first->va_start)
			cached_hole_size = first->va_start - addr;
490
		addr = ALIGN(first->va_end, align);
491
		if (addr + size < addr)
N
Nick Piggin 已提交
492 493
			goto overflow;

494
		if (list_is_last(&first->list, &vmap_area_list))
N
Nick Piggin 已提交
495
			goto found;
496

497
		first = list_next_entry(first, list);
N
Nick Piggin 已提交
498 499
	}

N
Nick Piggin 已提交
500 501 502
found:
	if (addr + size > vend)
		goto overflow;
N
Nick Piggin 已提交
503 504 505 506 507

	va->va_start = addr;
	va->va_end = addr + size;
	va->flags = 0;
	__insert_vmap_area(va);
N
Nick Piggin 已提交
508
	free_vmap_cache = &va->rb_node;
N
Nick Piggin 已提交
509 510
	spin_unlock(&vmap_area_lock);

511
	BUG_ON(!IS_ALIGNED(va->va_start, align));
N
Nick Piggin 已提交
512 513 514
	BUG_ON(va->va_start < vstart);
	BUG_ON(va->va_end > vend);

N
Nick Piggin 已提交
515
	return va;
N
Nick Piggin 已提交
516 517 518 519 520 521 522 523

overflow:
	spin_unlock(&vmap_area_lock);
	if (!purged) {
		purge_vmap_area_lazy();
		purged = 1;
		goto retry;
	}
524 525 526 527 528 529 530 531 532 533

	if (gfpflags_allow_blocking(gfp_mask)) {
		unsigned long freed = 0;
		blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
		if (freed > 0) {
			purged = 0;
			goto retry;
		}
	}

534
	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
J
Joe Perches 已提交
535 536
		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
			size);
N
Nick Piggin 已提交
537 538
	kfree(va);
	return ERR_PTR(-EBUSY);
N
Nick Piggin 已提交
539 540
}

541 542 543 544 545 546 547 548 549 550 551 552
int register_vmap_purge_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&vmap_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);

int unregister_vmap_purge_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);

N
Nick Piggin 已提交
553 554 555
static void __free_vmap_area(struct vmap_area *va)
{
	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
N
Nick Piggin 已提交
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571

	if (free_vmap_cache) {
		if (va->va_end < cached_vstart) {
			free_vmap_cache = NULL;
		} else {
			struct vmap_area *cache;
			cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
			if (va->va_start <= cache->va_start) {
				free_vmap_cache = rb_prev(&va->rb_node);
				/*
				 * We don't try to update cached_hole_size or
				 * cached_align, but it won't go very wrong.
				 */
			}
		}
	}
N
Nick Piggin 已提交
572 573 574 575
	rb_erase(&va->rb_node, &vmap_area_root);
	RB_CLEAR_NODE(&va->rb_node);
	list_del_rcu(&va->list);

576 577 578 579 580 581 582 583 584
	/*
	 * Track the highest possible candidate for pcpu area
	 * allocation.  Areas outside of vmalloc area can be returned
	 * here too, consider only end addresses which fall inside
	 * vmalloc area proper.
	 */
	if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
		vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);

585
	kfree_rcu(va, rcu_head);
N
Nick Piggin 已提交
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
}

/*
 * Free a region of KVA allocated by alloc_vmap_area
 */
static void free_vmap_area(struct vmap_area *va)
{
	spin_lock(&vmap_area_lock);
	__free_vmap_area(va);
	spin_unlock(&vmap_area_lock);
}

/*
 * Clear the pagetable entries of a given vmap_area
 */
static void unmap_vmap_area(struct vmap_area *va)
{
	vunmap_page_range(va->va_start, va->va_end);
}

606 607 608
static void vmap_debug_free_range(unsigned long start, unsigned long end)
{
	/*
609 610 611 612
	 * Unmap page tables and force a TLB flush immediately if pagealloc
	 * debugging is enabled.  This catches use after free bugs similarly to
	 * those in linear kernel virtual address space after a page has been
	 * freed.
613
	 *
614 615
	 * All the lazy freeing logic is still retained, in order to minimise
	 * intrusiveness of this debugging feature.
616
	 *
617 618
	 * This is going to be *slow* (linear kernel virtual address debugging
	 * doesn't do a broadcast TLB flush so it is a lot faster).
619
	 */
620 621 622 623
	if (debug_pagealloc_enabled()) {
		vunmap_page_range(start, end);
		flush_tlb_kernel_range(start, end);
	}
624 625
}

N
Nick Piggin 已提交
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
/*
 * lazy_max_pages is the maximum amount of virtual address space we gather up
 * before attempting to purge with a TLB flush.
 *
 * There is a tradeoff here: a larger number will cover more kernel page tables
 * and take slightly longer to purge, but it will linearly reduce the number of
 * global TLB flushes that must be performed. It would seem natural to scale
 * this number up linearly with the number of CPUs (because vmapping activity
 * could also scale linearly with the number of CPUs), however it is likely
 * that in practice, workloads might be constrained in other ways that mean
 * vmap activity will not scale linearly with CPUs. Also, I want to be
 * conservative and not introduce a big latency on huge systems, so go with
 * a less aggressive log scale. It will still be an improvement over the old
 * code, and it will be simple to change the scale factor if we find that it
 * becomes a problem on bigger systems.
 */
static unsigned long lazy_max_pages(void)
{
	unsigned int log;

	log = fls(num_online_cpus());

	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
}

static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);

653 654 655 656 657
/*
 * Serialize vmap purging.  There is no actual criticial section protected
 * by this look, but we want to avoid concurrent calls for performance
 * reasons and to make the pcpu_get_vm_areas more deterministic.
 */
658
static DEFINE_MUTEX(vmap_purge_lock);
659

660 661 662
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);

663 664 665 666 667 668 669 670 671
/*
 * called before a call to iounmap() if the caller wants vm_area_struct's
 * immediately freed.
 */
void set_iounmap_nonlazy(void)
{
	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
}

N
Nick Piggin 已提交
672 673 674
/*
 * Purges all lazily-freed vmap areas.
 */
675
static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
N
Nick Piggin 已提交
676
{
677
	struct llist_node *valist;
N
Nick Piggin 已提交
678
	struct vmap_area *va;
679
	struct vmap_area *n_va;
680
	bool do_free = false;
N
Nick Piggin 已提交
681

682
	lockdep_assert_held(&vmap_purge_lock);
683

684 685
	valist = llist_del_all(&vmap_purge_list);
	llist_for_each_entry(va, valist, purge_list) {
686 687 688 689
		if (va->va_start < start)
			start = va->va_start;
		if (va->va_end > end)
			end = va->va_end;
690
		do_free = true;
N
Nick Piggin 已提交
691 692
	}

693
	if (!do_free)
694
		return false;
N
Nick Piggin 已提交
695

696
	flush_tlb_kernel_range(start, end);
N
Nick Piggin 已提交
697

698
	spin_lock(&vmap_area_lock);
699 700 701
	llist_for_each_entry_safe(va, n_va, valist, purge_list) {
		int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;

702
		__free_vmap_area(va);
703 704 705
		atomic_sub(nr, &vmap_lazy_nr);
		cond_resched_lock(&vmap_area_lock);
	}
706 707
	spin_unlock(&vmap_area_lock);
	return true;
N
Nick Piggin 已提交
708 709
}

N
Nick Piggin 已提交
710 711 712 713 714 715
/*
 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
 * is already purging.
 */
static void try_purge_vmap_area_lazy(void)
{
716
	if (mutex_trylock(&vmap_purge_lock)) {
717
		__purge_vmap_area_lazy(ULONG_MAX, 0);
718
		mutex_unlock(&vmap_purge_lock);
719
	}
N
Nick Piggin 已提交
720 721
}

N
Nick Piggin 已提交
722 723 724 725 726
/*
 * Kick off a purge of the outstanding lazy areas.
 */
static void purge_vmap_area_lazy(void)
{
727
	mutex_lock(&vmap_purge_lock);
728 729
	purge_fragmented_blocks_allcpus();
	__purge_vmap_area_lazy(ULONG_MAX, 0);
730
	mutex_unlock(&vmap_purge_lock);
N
Nick Piggin 已提交
731 732 733
}

/*
734 735 736
 * Free a vmap area, caller ensuring that the area has been unmapped
 * and flush_cache_vunmap had been called for the correct range
 * previously.
N
Nick Piggin 已提交
737
 */
738
static void free_vmap_area_noflush(struct vmap_area *va)
N
Nick Piggin 已提交
739
{
740 741 742 743 744 745 746 747 748
	int nr_lazy;

	nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT,
				    &vmap_lazy_nr);

	/* After this point, we may free va at any time */
	llist_add(&va->purge_list, &vmap_purge_list);

	if (unlikely(nr_lazy > lazy_max_pages()))
N
Nick Piggin 已提交
749
		try_purge_vmap_area_lazy();
N
Nick Piggin 已提交
750 751
}

752 753 754 755 756 757
/*
 * Free and unmap a vmap area
 */
static void free_unmap_vmap_area(struct vmap_area *va)
{
	flush_cache_vunmap(va->va_start, va->va_end);
758 759
	unmap_vmap_area(va);
	free_vmap_area_noflush(va);
760 761
}

N
Nick Piggin 已提交
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
static struct vmap_area *find_vmap_area(unsigned long addr)
{
	struct vmap_area *va;

	spin_lock(&vmap_area_lock);
	va = __find_vmap_area(addr);
	spin_unlock(&vmap_area_lock);

	return va;
}

/*** Per cpu kva allocator ***/

/*
 * vmap space is limited especially on 32 bit architectures. Ensure there is
 * room for at least 16 percpu vmap blocks per CPU.
 */
/*
 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
 * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
 * instead (we just need a rough idea)
 */
#if BITS_PER_LONG == 32
#define VMALLOC_SPACE		(128UL*1024*1024)
#else
#define VMALLOC_SPACE		(128UL*1024*1024*1024)
#endif

#define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
#define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
#define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
#define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
#define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
#define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
796 797 798 799
#define VMAP_BBMAP_BITS		\
		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
N
Nick Piggin 已提交
800 801 802

#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)

803 804
static bool vmap_initialized __read_mostly = false;

N
Nick Piggin 已提交
805 806 807 808 809 810 811 812 813
struct vmap_block_queue {
	spinlock_t lock;
	struct list_head free;
};

struct vmap_block {
	spinlock_t lock;
	struct vmap_area *va;
	unsigned long free, dirty;
814
	unsigned long dirty_min, dirty_max; /*< dirty range */
815 816
	struct list_head free_list;
	struct rcu_head rcu_head;
817
	struct list_head purge;
N
Nick Piggin 已提交
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
};

/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);

/*
 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
 * in the free path. Could get rid of this if we change the API to return a
 * "cookie" from alloc, to be passed to free. But no big deal yet.
 */
static DEFINE_SPINLOCK(vmap_block_tree_lock);
static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);

/*
 * We should probably have a fallback mechanism to allocate virtual memory
 * out of partially filled vmap blocks. However vmap block sizing should be
 * fairly reasonable according to the vmalloc size, so it shouldn't be a
 * big problem.
 */

static unsigned long addr_to_vb_idx(unsigned long addr)
{
	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
	addr /= VMAP_BLOCK_SIZE;
	return addr;
}

845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
{
	unsigned long addr;

	addr = va_start + (pages_off << PAGE_SHIFT);
	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
	return (void *)addr;
}

/**
 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
 *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
 * @order:    how many 2^order pages should be occupied in newly allocated block
 * @gfp_mask: flags for the page level allocator
 *
 * Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
 */
static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
N
Nick Piggin 已提交
863 864 865 866 867 868
{
	struct vmap_block_queue *vbq;
	struct vmap_block *vb;
	struct vmap_area *va;
	unsigned long vb_idx;
	int node, err;
869
	void *vaddr;
N
Nick Piggin 已提交
870 871 872 873 874 875 876 877 878 879 880

	node = numa_node_id();

	vb = kmalloc_node(sizeof(struct vmap_block),
			gfp_mask & GFP_RECLAIM_MASK, node);
	if (unlikely(!vb))
		return ERR_PTR(-ENOMEM);

	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
					VMALLOC_START, VMALLOC_END,
					node, gfp_mask);
881
	if (IS_ERR(va)) {
N
Nick Piggin 已提交
882
		kfree(vb);
J
Julia Lawall 已提交
883
		return ERR_CAST(va);
N
Nick Piggin 已提交
884 885 886 887 888 889 890 891 892
	}

	err = radix_tree_preload(gfp_mask);
	if (unlikely(err)) {
		kfree(vb);
		free_vmap_area(va);
		return ERR_PTR(err);
	}

893
	vaddr = vmap_block_vaddr(va->va_start, 0);
N
Nick Piggin 已提交
894 895
	spin_lock_init(&vb->lock);
	vb->va = va;
896 897 898
	/* At least something should be left free */
	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
	vb->free = VMAP_BBMAP_BITS - (1UL << order);
N
Nick Piggin 已提交
899
	vb->dirty = 0;
900 901
	vb->dirty_min = VMAP_BBMAP_BITS;
	vb->dirty_max = 0;
N
Nick Piggin 已提交
902 903 904 905 906 907 908 909 910 911 912
	INIT_LIST_HEAD(&vb->free_list);

	vb_idx = addr_to_vb_idx(va->va_start);
	spin_lock(&vmap_block_tree_lock);
	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
	spin_unlock(&vmap_block_tree_lock);
	BUG_ON(err);
	radix_tree_preload_end();

	vbq = &get_cpu_var(vmap_block_queue);
	spin_lock(&vbq->lock);
913
	list_add_tail_rcu(&vb->free_list, &vbq->free);
N
Nick Piggin 已提交
914
	spin_unlock(&vbq->lock);
915
	put_cpu_var(vmap_block_queue);
N
Nick Piggin 已提交
916

917
	return vaddr;
N
Nick Piggin 已提交
918 919 920 921 922 923 924 925 926 927 928 929 930
}

static void free_vmap_block(struct vmap_block *vb)
{
	struct vmap_block *tmp;
	unsigned long vb_idx;

	vb_idx = addr_to_vb_idx(vb->va->va_start);
	spin_lock(&vmap_block_tree_lock);
	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
	spin_unlock(&vmap_block_tree_lock);
	BUG_ON(tmp != vb);

931
	free_vmap_area_noflush(vb->va);
932
	kfree_rcu(vb, rcu_head);
N
Nick Piggin 已提交
933 934
}

935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
static void purge_fragmented_blocks(int cpu)
{
	LIST_HEAD(purge);
	struct vmap_block *vb;
	struct vmap_block *n_vb;
	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);

	rcu_read_lock();
	list_for_each_entry_rcu(vb, &vbq->free, free_list) {

		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
			continue;

		spin_lock(&vb->lock);
		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
			vb->free = 0; /* prevent further allocs after releasing lock */
			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
952 953
			vb->dirty_min = 0;
			vb->dirty_max = VMAP_BBMAP_BITS;
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
			spin_lock(&vbq->lock);
			list_del_rcu(&vb->free_list);
			spin_unlock(&vbq->lock);
			spin_unlock(&vb->lock);
			list_add_tail(&vb->purge, &purge);
		} else
			spin_unlock(&vb->lock);
	}
	rcu_read_unlock();

	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
		list_del(&vb->purge);
		free_vmap_block(vb);
	}
}

static void purge_fragmented_blocks_allcpus(void)
{
	int cpu;

	for_each_possible_cpu(cpu)
		purge_fragmented_blocks(cpu);
}

N
Nick Piggin 已提交
978 979 980 981
static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
{
	struct vmap_block_queue *vbq;
	struct vmap_block *vb;
982
	void *vaddr = NULL;
N
Nick Piggin 已提交
983 984
	unsigned int order;

985
	BUG_ON(offset_in_page(size));
N
Nick Piggin 已提交
986
	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
J
Jan Kara 已提交
987 988 989 990 991 992 993 994
	if (WARN_ON(size == 0)) {
		/*
		 * Allocating 0 bytes isn't what caller wants since
		 * get_order(0) returns funny result. Just warn and terminate
		 * early.
		 */
		return NULL;
	}
N
Nick Piggin 已提交
995 996 997 998 999
	order = get_order(size);

	rcu_read_lock();
	vbq = &get_cpu_var(vmap_block_queue);
	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1000
		unsigned long pages_off;
N
Nick Piggin 已提交
1001 1002

		spin_lock(&vb->lock);
1003 1004 1005 1006
		if (vb->free < (1UL << order)) {
			spin_unlock(&vb->lock);
			continue;
		}
1007

1008 1009
		pages_off = VMAP_BBMAP_BITS - vb->free;
		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1010 1011 1012 1013 1014 1015
		vb->free -= 1UL << order;
		if (vb->free == 0) {
			spin_lock(&vbq->lock);
			list_del_rcu(&vb->free_list);
			spin_unlock(&vbq->lock);
		}
1016

1017 1018
		spin_unlock(&vb->lock);
		break;
N
Nick Piggin 已提交
1019
	}
1020

1021
	put_cpu_var(vmap_block_queue);
N
Nick Piggin 已提交
1022 1023
	rcu_read_unlock();

1024 1025 1026
	/* Allocate new block if nothing was found */
	if (!vaddr)
		vaddr = new_vmap_block(order, gfp_mask);
N
Nick Piggin 已提交
1027

1028
	return vaddr;
N
Nick Piggin 已提交
1029 1030 1031 1032 1033 1034 1035 1036 1037
}

static void vb_free(const void *addr, unsigned long size)
{
	unsigned long offset;
	unsigned long vb_idx;
	unsigned int order;
	struct vmap_block *vb;

1038
	BUG_ON(offset_in_page(size));
N
Nick Piggin 已提交
1039
	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1040 1041 1042

	flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);

N
Nick Piggin 已提交
1043 1044 1045
	order = get_order(size);

	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1046
	offset >>= PAGE_SHIFT;
N
Nick Piggin 已提交
1047 1048 1049 1050 1051 1052 1053

	vb_idx = addr_to_vb_idx((unsigned long)addr);
	rcu_read_lock();
	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
	rcu_read_unlock();
	BUG_ON(!vb);

1054 1055
	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);

N
Nick Piggin 已提交
1056
	spin_lock(&vb->lock);
1057 1058 1059 1060

	/* Expand dirty range */
	vb->dirty_min = min(vb->dirty_min, offset);
	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1061

N
Nick Piggin 已提交
1062 1063
	vb->dirty += 1UL << order;
	if (vb->dirty == VMAP_BBMAP_BITS) {
1064
		BUG_ON(vb->free);
N
Nick Piggin 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
		spin_unlock(&vb->lock);
		free_vmap_block(vb);
	} else
		spin_unlock(&vb->lock);
}

/**
 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
 *
 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
 * to amortize TLB flushing overheads. What this means is that any page you
 * have now, may, in a former life, have been mapped into kernel virtual
 * address by the vmap layer and so there might be some CPUs with TLB entries
 * still referencing that page (additional to the regular 1:1 kernel mapping).
 *
 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
 * be sure that none of the pages we have control over will have any aliases
 * from the vmap layer.
 */
void vm_unmap_aliases(void)
{
	unsigned long start = ULONG_MAX, end = 0;
	int cpu;
	int flush = 0;

1090 1091 1092
	if (unlikely(!vmap_initialized))
		return;

1093 1094
	might_sleep();

N
Nick Piggin 已提交
1095 1096 1097 1098 1099 1100 1101
	for_each_possible_cpu(cpu) {
		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
		struct vmap_block *vb;

		rcu_read_lock();
		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
			spin_lock(&vb->lock);
1102 1103
			if (vb->dirty) {
				unsigned long va_start = vb->va->va_start;
N
Nick Piggin 已提交
1104
				unsigned long s, e;
1105

1106 1107
				s = va_start + (vb->dirty_min << PAGE_SHIFT);
				e = va_start + (vb->dirty_max << PAGE_SHIFT);
N
Nick Piggin 已提交
1108

1109 1110
				start = min(s, start);
				end   = max(e, end);
N
Nick Piggin 已提交
1111

1112
				flush = 1;
N
Nick Piggin 已提交
1113 1114 1115 1116 1117 1118
			}
			spin_unlock(&vb->lock);
		}
		rcu_read_unlock();
	}

1119
	mutex_lock(&vmap_purge_lock);
1120 1121 1122
	purge_fragmented_blocks_allcpus();
	if (!__purge_vmap_area_lazy(start, end) && flush)
		flush_tlb_kernel_range(start, end);
1123
	mutex_unlock(&vmap_purge_lock);
N
Nick Piggin 已提交
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

/**
 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
 * @mem: the pointer returned by vm_map_ram
 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
 */
void vm_unmap_ram(const void *mem, unsigned int count)
{
1134
	unsigned long size = (unsigned long)count << PAGE_SHIFT;
N
Nick Piggin 已提交
1135
	unsigned long addr = (unsigned long)mem;
1136
	struct vmap_area *va;
N
Nick Piggin 已提交
1137

1138
	might_sleep();
N
Nick Piggin 已提交
1139 1140 1141
	BUG_ON(!addr);
	BUG_ON(addr < VMALLOC_START);
	BUG_ON(addr > VMALLOC_END);
1142
	BUG_ON(!PAGE_ALIGNED(addr));
N
Nick Piggin 已提交
1143 1144

	debug_check_no_locks_freed(mem, size);
1145
	vmap_debug_free_range(addr, addr+size);
N
Nick Piggin 已提交
1146

1147
	if (likely(count <= VMAP_MAX_ALLOC)) {
N
Nick Piggin 已提交
1148
		vb_free(mem, size);
1149 1150 1151 1152 1153 1154
		return;
	}

	va = find_vmap_area(addr);
	BUG_ON(!va);
	free_unmap_vmap_area(va);
N
Nick Piggin 已提交
1155 1156 1157 1158 1159 1160 1161 1162 1163
}
EXPORT_SYMBOL(vm_unmap_ram);

/**
 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
 * @pages: an array of pointers to the pages to be mapped
 * @count: number of pages
 * @node: prefer to allocate data structures on this node
 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1164
 *
1165 1166 1167 1168 1169 1170
 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
 * faster than vmap so it's good.  But if you mix long-life and short-life
 * objects with vm_map_ram(), it could consume lots of address space through
 * fragmentation (especially on a 32bit machine).  You could see failures in
 * the end.  Please use this function for short-lived objects.
 *
1171
 * Returns: a pointer to the address that has been mapped, or %NULL on failure
N
Nick Piggin 已提交
1172 1173 1174
 */
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{
1175
	unsigned long size = (unsigned long)count << PAGE_SHIFT;
N
Nick Piggin 已提交
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
	unsigned long addr;
	void *mem;

	if (likely(count <= VMAP_MAX_ALLOC)) {
		mem = vb_alloc(size, GFP_KERNEL);
		if (IS_ERR(mem))
			return NULL;
		addr = (unsigned long)mem;
	} else {
		struct vmap_area *va;
		va = alloc_vmap_area(size, PAGE_SIZE,
				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
		if (IS_ERR(va))
			return NULL;

		addr = va->va_start;
		mem = (void *)addr;
	}
	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
		vm_unmap_ram(mem, count);
		return NULL;
	}
	return mem;
}
EXPORT_SYMBOL(vm_map_ram);

1202
static struct vm_struct *vmlist __initdata;
N
Nicolas Pitre 已提交
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
/**
 * vm_area_add_early - add vmap area early during boot
 * @vm: vm_struct to add
 *
 * This function is used to add fixed kernel vm area to vmlist before
 * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
 * should contain proper values and the other fields should be zero.
 *
 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
 */
void __init vm_area_add_early(struct vm_struct *vm)
{
	struct vm_struct *tmp, **p;

	BUG_ON(vmap_initialized);
	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
		if (tmp->addr >= vm->addr) {
			BUG_ON(tmp->addr < vm->addr + vm->size);
			break;
		} else
			BUG_ON(tmp->addr + tmp->size > vm->addr);
	}
	vm->next = *p;
	*p = vm;
}

1229 1230 1231
/**
 * vm_area_register_early - register vmap area early during boot
 * @vm: vm_struct to register
1232
 * @align: requested alignment
1233 1234 1235 1236 1237 1238 1239 1240
 *
 * This function is used to register kernel vm area before
 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
 * proper values on entry and other fields should be zero.  On return,
 * vm->addr contains the allocated address.
 *
 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
 */
1241
void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1242 1243
{
	static size_t vm_init_off __initdata;
1244 1245 1246 1247
	unsigned long addr;

	addr = ALIGN(VMALLOC_START + vm_init_off, align);
	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1248

1249
	vm->addr = (void *)addr;
1250

N
Nicolas Pitre 已提交
1251
	vm_area_add_early(vm);
1252 1253
}

N
Nick Piggin 已提交
1254 1255
void __init vmalloc_init(void)
{
I
Ivan Kokshaysky 已提交
1256 1257
	struct vmap_area *va;
	struct vm_struct *tmp;
N
Nick Piggin 已提交
1258 1259 1260 1261
	int i;

	for_each_possible_cpu(i) {
		struct vmap_block_queue *vbq;
1262
		struct vfree_deferred *p;
N
Nick Piggin 已提交
1263 1264 1265 1266

		vbq = &per_cpu(vmap_block_queue, i);
		spin_lock_init(&vbq->lock);
		INIT_LIST_HEAD(&vbq->free);
1267 1268 1269
		p = &per_cpu(vfree_deferred, i);
		init_llist_head(&p->list);
		INIT_WORK(&p->wq, free_work);
N
Nick Piggin 已提交
1270
	}
1271

I
Ivan Kokshaysky 已提交
1272 1273
	/* Import existing vmlist entries. */
	for (tmp = vmlist; tmp; tmp = tmp->next) {
1274
		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1275
		va->flags = VM_VM_AREA;
I
Ivan Kokshaysky 已提交
1276 1277
		va->va_start = (unsigned long)tmp->addr;
		va->va_end = va->va_start + tmp->size;
1278
		va->vm = tmp;
I
Ivan Kokshaysky 已提交
1279 1280
		__insert_vmap_area(va);
	}
1281 1282 1283

	vmap_area_pcpu_hole = VMALLOC_END;

1284
	vmap_initialized = true;
N
Nick Piggin 已提交
1285 1286
}

1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
/**
 * map_kernel_range_noflush - map kernel VM area with the specified pages
 * @addr: start of the VM area to map
 * @size: size of the VM area to map
 * @prot: page protection flags to use
 * @pages: pages to map
 *
 * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
 * specify should have been allocated using get_vm_area() and its
 * friends.
 *
 * NOTE:
 * This function does NOT do any cache flushing.  The caller is
 * responsible for calling flush_cache_vmap() on to-be-mapped areas
 * before calling this function.
 *
 * RETURNS:
 * The number of pages mapped on success, -errno on failure.
 */
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
			     pgprot_t prot, struct page **pages)
{
	return vmap_page_range_noflush(addr, addr + size, prot, pages);
}

/**
 * unmap_kernel_range_noflush - unmap kernel VM area
 * @addr: start of the VM area to unmap
 * @size: size of the VM area to unmap
 *
 * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
 * specify should have been allocated using get_vm_area() and its
 * friends.
 *
 * NOTE:
 * This function does NOT do any cache flushing.  The caller is
 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
 * before calling this function and flush_tlb_kernel_range() after.
 */
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
	vunmap_page_range(addr, addr + size);
}
1330
EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1331 1332 1333 1334 1335 1336 1337 1338 1339

/**
 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
 * @addr: start of the VM area to unmap
 * @size: size of the VM area to unmap
 *
 * Similar to unmap_kernel_range_noflush() but flushes vcache before
 * the unmapping and tlb after.
 */
N
Nick Piggin 已提交
1340 1341 1342
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
	unsigned long end = addr + size;
1343 1344

	flush_cache_vunmap(addr, end);
N
Nick Piggin 已提交
1345 1346 1347
	vunmap_page_range(addr, end);
	flush_tlb_kernel_range(addr, end);
}
1348
EXPORT_SYMBOL_GPL(unmap_kernel_range);
N
Nick Piggin 已提交
1349

1350
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
N
Nick Piggin 已提交
1351 1352
{
	unsigned long addr = (unsigned long)area->addr;
1353
	unsigned long end = addr + get_vm_area_size(area);
N
Nick Piggin 已提交
1354 1355
	int err;

1356
	err = vmap_page_range(addr, end, prot, pages);
N
Nick Piggin 已提交
1357

1358
	return err > 0 ? 0 : err;
N
Nick Piggin 已提交
1359 1360 1361
}
EXPORT_SYMBOL_GPL(map_vm_area);

1362
static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1363
			      unsigned long flags, const void *caller)
1364
{
1365
	spin_lock(&vmap_area_lock);
1366 1367 1368 1369
	vm->flags = flags;
	vm->addr = (void *)va->va_start;
	vm->size = va->va_end - va->va_start;
	vm->caller = caller;
1370
	va->vm = vm;
1371
	va->flags |= VM_VM_AREA;
1372
	spin_unlock(&vmap_area_lock);
1373
}
1374

1375
static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1376
{
1377
	/*
1378
	 * Before removing VM_UNINITIALIZED,
1379 1380 1381 1382
	 * we should make sure that vm has proper values.
	 * Pair with smp_rmb() in show_numa_info().
	 */
	smp_wmb();
1383
	vm->flags &= ~VM_UNINITIALIZED;
1384 1385
}

N
Nick Piggin 已提交
1386
static struct vm_struct *__get_vm_area_node(unsigned long size,
1387
		unsigned long align, unsigned long flags, unsigned long start,
1388
		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
N
Nick Piggin 已提交
1389
{
1390
	struct vmap_area *va;
N
Nick Piggin 已提交
1391
	struct vm_struct *area;
L
Linus Torvalds 已提交
1392

1393
	BUG_ON(in_interrupt());
L
Linus Torvalds 已提交
1394
	size = PAGE_ALIGN(size);
1395 1396
	if (unlikely(!size))
		return NULL;
L
Linus Torvalds 已提交
1397

1398 1399 1400 1401
	if (flags & VM_IOREMAP)
		align = 1ul << clamp_t(int, get_count_order_long(size),
				       PAGE_SHIFT, IOREMAP_MAX_ORDER);

1402
	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
L
Linus Torvalds 已提交
1403 1404 1405
	if (unlikely(!area))
		return NULL;

1406 1407
	if (!(flags & VM_NO_GUARD))
		size += PAGE_SIZE;
L
Linus Torvalds 已提交
1408

N
Nick Piggin 已提交
1409 1410 1411 1412
	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
	if (IS_ERR(va)) {
		kfree(area);
		return NULL;
L
Linus Torvalds 已提交
1413 1414
	}

1415
	setup_vmalloc_vm(area, va, flags, caller);
1416

L
Linus Torvalds 已提交
1417 1418 1419
	return area;
}

C
Christoph Lameter 已提交
1420 1421 1422
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
				unsigned long start, unsigned long end)
{
D
David Rientjes 已提交
1423 1424
	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
				  GFP_KERNEL, __builtin_return_address(0));
C
Christoph Lameter 已提交
1425
}
1426
EXPORT_SYMBOL_GPL(__get_vm_area);
C
Christoph Lameter 已提交
1427

1428 1429
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
				       unsigned long start, unsigned long end,
1430
				       const void *caller)
1431
{
D
David Rientjes 已提交
1432 1433
	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
				  GFP_KERNEL, caller);
1434 1435
}

L
Linus Torvalds 已提交
1436
/**
S
Simon Arlott 已提交
1437
 *	get_vm_area  -  reserve a contiguous kernel virtual area
L
Linus Torvalds 已提交
1438 1439 1440 1441 1442 1443 1444 1445 1446
 *	@size:		size of the area
 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
 *
 *	Search an area of @size in the kernel virtual mapping area,
 *	and reserved it for out purposes.  Returns the area descriptor
 *	on success or %NULL on failure.
 */
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
1447
	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
D
David Rientjes 已提交
1448 1449
				  NUMA_NO_NODE, GFP_KERNEL,
				  __builtin_return_address(0));
1450 1451 1452
}

struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1453
				const void *caller)
1454
{
1455
	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
D
David Rientjes 已提交
1456
				  NUMA_NO_NODE, GFP_KERNEL, caller);
L
Linus Torvalds 已提交
1457 1458
}

1459 1460 1461 1462 1463 1464 1465 1466 1467
/**
 *	find_vm_area  -  find a continuous kernel virtual area
 *	@addr:		base address
 *
 *	Search for the kernel VM area starting at @addr, and return it.
 *	It is up to the caller to do all required locking to keep the returned
 *	pointer valid.
 */
struct vm_struct *find_vm_area(const void *addr)
1468
{
N
Nick Piggin 已提交
1469
	struct vmap_area *va;
1470

N
Nick Piggin 已提交
1471 1472
	va = find_vmap_area((unsigned long)addr);
	if (va && va->flags & VM_VM_AREA)
1473
		return va->vm;
L
Linus Torvalds 已提交
1474 1475 1476 1477

	return NULL;
}

1478
/**
S
Simon Arlott 已提交
1479
 *	remove_vm_area  -  find and remove a continuous kernel virtual area
1480 1481 1482 1483 1484 1485
 *	@addr:		base address
 *
 *	Search for the kernel VM area starting at @addr, and remove it.
 *	This function returns the found VM area, but using it is NOT safe
 *	on SMP machines, except for its size or flags.
 */
1486
struct vm_struct *remove_vm_area(const void *addr)
1487
{
N
Nick Piggin 已提交
1488 1489
	struct vmap_area *va;

1490 1491
	might_sleep();

N
Nick Piggin 已提交
1492 1493
	va = find_vmap_area((unsigned long)addr);
	if (va && va->flags & VM_VM_AREA) {
1494
		struct vm_struct *vm = va->vm;
1495

1496 1497 1498
		spin_lock(&vmap_area_lock);
		va->vm = NULL;
		va->flags &= ~VM_VM_AREA;
1499
		va->flags |= VM_LAZY_FREE;
1500 1501
		spin_unlock(&vmap_area_lock);

1502
		vmap_debug_free_range(va->va_start, va->va_end);
1503
		kasan_free_shadow(vm);
1504 1505
		free_unmap_vmap_area(va);

N
Nick Piggin 已提交
1506 1507 1508
		return vm;
	}
	return NULL;
1509 1510
}

1511
static void __vunmap(const void *addr, int deallocate_pages)
L
Linus Torvalds 已提交
1512 1513 1514 1515 1516 1517
{
	struct vm_struct *area;

	if (!addr)
		return;

1518
	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
D
Dan Carpenter 已提交
1519
			addr))
L
Linus Torvalds 已提交
1520 1521 1522 1523
		return;

	area = remove_vm_area(addr);
	if (unlikely(!area)) {
A
Arjan van de Ven 已提交
1524
		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
L
Linus Torvalds 已提交
1525 1526 1527 1528
				addr);
		return;
	}

1529 1530
	debug_check_no_locks_freed(addr, get_vm_area_size(area));
	debug_check_no_obj_freed(addr, get_vm_area_size(area));
1531

L
Linus Torvalds 已提交
1532 1533 1534 1535
	if (deallocate_pages) {
		int i;

		for (i = 0; i < area->nr_pages; i++) {
1536 1537 1538
			struct page *page = area->pages[i];

			BUG_ON(!page);
1539
			__free_pages(page, 0);
L
Linus Torvalds 已提交
1540 1541
		}

D
David Rientjes 已提交
1542
		kvfree(area->pages);
L
Linus Torvalds 已提交
1543 1544 1545 1546 1547
	}

	kfree(area);
	return;
}
A
Andrey Ryabinin 已提交
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580

static inline void __vfree_deferred(const void *addr)
{
	/*
	 * Use raw_cpu_ptr() because this can be called from preemptible
	 * context. Preemption is absolutely fine here, because the llist_add()
	 * implementation is lockless, so it works even if we are adding to
	 * nother cpu's list.  schedule_work() should be fine with this too.
	 */
	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);

	if (llist_add((struct llist_node *)addr, &p->list))
		schedule_work(&p->wq);
}

/**
 *	vfree_atomic  -  release memory allocated by vmalloc()
 *	@addr:		memory base address
 *
 *	This one is just like vfree() but can be called in any atomic context
 *	except NMIs.
 */
void vfree_atomic(const void *addr)
{
	BUG_ON(in_nmi());

	kmemleak_free(addr);

	if (!addr)
		return;
	__vfree_deferred(addr);
}

L
Linus Torvalds 已提交
1581 1582 1583 1584
/**
 *	vfree  -  release memory allocated by vmalloc()
 *	@addr:		memory base address
 *
S
Simon Arlott 已提交
1585
 *	Free the virtually continuous memory area starting at @addr, as
1586 1587
 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
 *	NULL, no operation is performed.
L
Linus Torvalds 已提交
1588
 *
1589 1590 1591
 *	Must not be called in NMI context (strictly speaking, only if we don't
 *	have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
 *	conventions for vfree() arch-depenedent would be a really bad idea)
A
Andrew Morton 已提交
1592
 *
1593
 *	NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
L
Linus Torvalds 已提交
1594
 */
1595
void vfree(const void *addr)
L
Linus Torvalds 已提交
1596
{
1597
	BUG_ON(in_nmi());
1598 1599 1600

	kmemleak_free(addr);

1601 1602
	if (!addr)
		return;
A
Andrey Ryabinin 已提交
1603 1604 1605
	if (unlikely(in_interrupt()))
		__vfree_deferred(addr);
	else
1606
		__vunmap(addr, 1);
L
Linus Torvalds 已提交
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
}
EXPORT_SYMBOL(vfree);

/**
 *	vunmap  -  release virtual mapping obtained by vmap()
 *	@addr:		memory base address
 *
 *	Free the virtually contiguous memory area starting at @addr,
 *	which was created from the page array passed to vmap().
 *
1617
 *	Must not be called in interrupt context.
L
Linus Torvalds 已提交
1618
 */
1619
void vunmap(const void *addr)
L
Linus Torvalds 已提交
1620 1621
{
	BUG_ON(in_interrupt());
1622
	might_sleep();
1623 1624
	if (addr)
		__vunmap(addr, 0);
L
Linus Torvalds 已提交
1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
}
EXPORT_SYMBOL(vunmap);

/**
 *	vmap  -  map an array of pages into virtually contiguous space
 *	@pages:		array of page pointers
 *	@count:		number of pages to map
 *	@flags:		vm_area->flags
 *	@prot:		page protection for the mapping
 *
 *	Maps @count pages from @pages into contiguous kernel virtual
 *	space.
 */
void *vmap(struct page **pages, unsigned int count,
		unsigned long flags, pgprot_t prot)
{
	struct vm_struct *area;
1642
	unsigned long size;		/* In bytes */
L
Linus Torvalds 已提交
1643

1644 1645
	might_sleep();

1646
	if (count > totalram_pages)
L
Linus Torvalds 已提交
1647 1648
		return NULL;

1649 1650
	size = (unsigned long)count << PAGE_SHIFT;
	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
L
Linus Torvalds 已提交
1651 1652
	if (!area)
		return NULL;
1653

1654
	if (map_vm_area(area, prot, pages)) {
L
Linus Torvalds 已提交
1655 1656 1657 1658 1659 1660 1661 1662
		vunmap(area->addr);
		return NULL;
	}

	return area->addr;
}
EXPORT_SYMBOL(vmap);

1663 1664 1665
static void *__vmalloc_node(unsigned long size, unsigned long align,
			    gfp_t gfp_mask, pgprot_t prot,
			    int node, const void *caller);
A
Adrian Bunk 已提交
1666
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1667
				 pgprot_t prot, int node)
L
Linus Torvalds 已提交
1668 1669 1670
{
	struct page **pages;
	unsigned int nr_pages, array_size, i;
1671
	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1672 1673 1674 1675
	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
	const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
					0 :
					__GFP_HIGHMEM;
L
Linus Torvalds 已提交
1676

1677
	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
1678 1679 1680 1681
	array_size = (nr_pages * sizeof(struct page *));

	area->nr_pages = nr_pages;
	/* Please note that the recursion is strictly bounded. */
1682
	if (array_size > PAGE_SIZE) {
1683
		pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
1684
				PAGE_KERNEL, node, area->caller);
1685
	} else {
1686
		pages = kmalloc_node(array_size, nested_gfp, node);
1687
	}
L
Linus Torvalds 已提交
1688 1689 1690 1691 1692 1693 1694 1695
	area->pages = pages;
	if (!area->pages) {
		remove_vm_area(area->addr);
		kfree(area);
		return NULL;
	}

	for (i = 0; i < area->nr_pages; i++) {
1696 1697
		struct page *page;

J
Jianguo Wu 已提交
1698
		if (node == NUMA_NO_NODE)
1699
			page = alloc_page(alloc_mask|highmem_mask);
C
Christoph Lameter 已提交
1700
		else
1701
			page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
1702 1703

		if (unlikely(!page)) {
L
Linus Torvalds 已提交
1704 1705 1706 1707
			/* Successfully allocated i pages, free them in __vunmap() */
			area->nr_pages = i;
			goto fail;
		}
1708
		area->pages[i] = page;
1709
		if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
1710
			cond_resched();
L
Linus Torvalds 已提交
1711 1712
	}

1713
	if (map_vm_area(area, prot, pages))
L
Linus Torvalds 已提交
1714 1715 1716 1717
		goto fail;
	return area->addr;

fail:
1718
	warn_alloc(gfp_mask, NULL,
1719
			  "vmalloc: allocation failure, allocated %ld of %ld bytes",
1720
			  (area->nr_pages*PAGE_SIZE), area->size);
L
Linus Torvalds 已提交
1721 1722 1723 1724 1725
	vfree(area->addr);
	return NULL;
}

/**
1726
 *	__vmalloc_node_range  -  allocate virtually contiguous memory
L
Linus Torvalds 已提交
1727
 *	@size:		allocation size
1728
 *	@align:		desired alignment
1729 1730
 *	@start:		vm area range start
 *	@end:		vm area range end
L
Linus Torvalds 已提交
1731 1732
 *	@gfp_mask:	flags for the page level allocator
 *	@prot:		protection mask for the allocated pages
1733
 *	@vm_flags:	additional vm area flags (e.g. %VM_NO_GUARD)
D
David Rientjes 已提交
1734
 *	@node:		node to use for allocation or NUMA_NO_NODE
1735
 *	@caller:	caller's return address
L
Linus Torvalds 已提交
1736 1737 1738 1739 1740
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator with @gfp_mask flags.  Map them into contiguous
 *	kernel virtual space, using a pagetable protection of @prot.
 */
1741 1742
void *__vmalloc_node_range(unsigned long size, unsigned long align,
			unsigned long start, unsigned long end, gfp_t gfp_mask,
1743 1744
			pgprot_t prot, unsigned long vm_flags, int node,
			const void *caller)
L
Linus Torvalds 已提交
1745 1746
{
	struct vm_struct *area;
1747 1748
	void *addr;
	unsigned long real_size = size;
L
Linus Torvalds 已提交
1749 1750

	size = PAGE_ALIGN(size);
1751
	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1752
		goto fail;
L
Linus Torvalds 已提交
1753

1754 1755
	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
				vm_flags, start, end, node, gfp_mask, caller);
L
Linus Torvalds 已提交
1756
	if (!area)
1757
		goto fail;
L
Linus Torvalds 已提交
1758

1759
	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1760
	if (!addr)
1761
		return NULL;
1762

1763
	/*
1764 1765
	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
	 * flag. It means that vm_struct is not fully initialized.
1766
	 * Now, it is fully initialized, so remove this flag here.
1767
	 */
1768
	clear_vm_uninitialized_flag(area);
1769

1770
	kmemleak_vmalloc(area, size, gfp_mask);
1771 1772

	return addr;
1773 1774

fail:
1775
	warn_alloc(gfp_mask, NULL,
1776
			  "vmalloc: allocation failure: %lu bytes", real_size);
1777
	return NULL;
L
Linus Torvalds 已提交
1778 1779
}

1780 1781 1782 1783 1784 1785
/**
 *	__vmalloc_node  -  allocate virtually contiguous memory
 *	@size:		allocation size
 *	@align:		desired alignment
 *	@gfp_mask:	flags for the page level allocator
 *	@prot:		protection mask for the allocated pages
D
David Rientjes 已提交
1786
 *	@node:		node to use for allocation or NUMA_NO_NODE
1787 1788 1789 1790 1791
 *	@caller:	caller's return address
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator with @gfp_mask flags.  Map them into contiguous
 *	kernel virtual space, using a pagetable protection of @prot.
M
Michal Hocko 已提交
1792
 *
1793
 *	Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
M
Michal Hocko 已提交
1794 1795 1796 1797 1798
 *	and __GFP_NOFAIL are not supported
 *
 *	Any use of gfp flags outside of GFP_KERNEL should be consulted
 *	with mm people.
 *
1799
 */
1800
static void *__vmalloc_node(unsigned long size, unsigned long align,
1801
			    gfp_t gfp_mask, pgprot_t prot,
1802
			    int node, const void *caller)
1803 1804
{
	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1805
				gfp_mask, prot, 0, node, caller);
1806 1807
}

C
Christoph Lameter 已提交
1808 1809
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
D
David Rientjes 已提交
1810
	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1811
				__builtin_return_address(0));
C
Christoph Lameter 已提交
1812
}
L
Linus Torvalds 已提交
1813 1814
EXPORT_SYMBOL(__vmalloc);

1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
static inline void *__vmalloc_node_flags(unsigned long size,
					int node, gfp_t flags)
{
	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
					node, __builtin_return_address(0));
}


void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
				  void *caller)
{
	return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller);
}

L
Linus Torvalds 已提交
1829 1830 1831 1832 1833 1834
/**
 *	vmalloc  -  allocate virtually contiguous memory
 *	@size:		allocation size
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *
1835
 *	For tight control over page level allocator and protection flags
L
Linus Torvalds 已提交
1836 1837 1838 1839
 *	use __vmalloc() instead.
 */
void *vmalloc(unsigned long size)
{
D
David Rientjes 已提交
1840
	return __vmalloc_node_flags(size, NUMA_NO_NODE,
1841
				    GFP_KERNEL);
L
Linus Torvalds 已提交
1842 1843 1844
}
EXPORT_SYMBOL(vmalloc);

1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
/**
 *	vzalloc - allocate virtually contiguous memory with zero fill
 *	@size:	allocation size
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *	The memory allocated is set to zero.
 *
 *	For tight control over page level allocator and protection flags
 *	use __vmalloc() instead.
 */
void *vzalloc(unsigned long size)
{
D
David Rientjes 已提交
1857
	return __vmalloc_node_flags(size, NUMA_NO_NODE,
1858
				GFP_KERNEL | __GFP_ZERO);
1859 1860 1861
}
EXPORT_SYMBOL(vzalloc);

1862
/**
1863 1864
 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
 * @size: allocation size
1865
 *
1866 1867
 * The resulting memory area is zeroed so it can be mapped to userspace
 * without leaking data.
1868 1869 1870 1871 1872 1873
 */
void *vmalloc_user(unsigned long size)
{
	struct vm_struct *area;
	void *ret;

1874
	ret = __vmalloc_node(size, SHMLBA,
1875
			     GFP_KERNEL | __GFP_ZERO,
D
David Rientjes 已提交
1876 1877
			     PAGE_KERNEL, NUMA_NO_NODE,
			     __builtin_return_address(0));
1878
	if (ret) {
N
Nick Piggin 已提交
1879
		area = find_vm_area(ret);
1880 1881
		area->flags |= VM_USERMAP;
	}
1882 1883 1884 1885
	return ret;
}
EXPORT_SYMBOL(vmalloc_user);

C
Christoph Lameter 已提交
1886 1887 1888
/**
 *	vmalloc_node  -  allocate memory on a specific node
 *	@size:		allocation size
1889
 *	@node:		numa node
C
Christoph Lameter 已提交
1890 1891 1892 1893
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *
1894
 *	For tight control over page level allocator and protection flags
C
Christoph Lameter 已提交
1895 1896 1897 1898
 *	use __vmalloc() instead.
 */
void *vmalloc_node(unsigned long size, int node)
{
1899
	return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
1900
					node, __builtin_return_address(0));
C
Christoph Lameter 已提交
1901 1902 1903
}
EXPORT_SYMBOL(vmalloc_node);

1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
/**
 * vzalloc_node - allocate memory on a specific node with zero fill
 * @size:	allocation size
 * @node:	numa node
 *
 * Allocate enough pages to cover @size from the page level
 * allocator and map them into contiguous kernel virtual space.
 * The memory allocated is set to zero.
 *
 * For tight control over page level allocator and protection flags
 * use __vmalloc_node() instead.
 */
void *vzalloc_node(unsigned long size, int node)
{
	return __vmalloc_node_flags(size, node,
1919
			 GFP_KERNEL | __GFP_ZERO);
1920 1921 1922
}
EXPORT_SYMBOL(vzalloc_node);

1923 1924 1925 1926
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif

L
Linus Torvalds 已提交
1927 1928 1929 1930 1931 1932 1933 1934
/**
 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
 *	@size:		allocation size
 *
 *	Kernel-internal function to allocate enough pages to cover @size
 *	the page level allocator and map them into contiguous and
 *	executable kernel virtual space.
 *
1935
 *	For tight control over page level allocator and protection flags
L
Linus Torvalds 已提交
1936 1937 1938 1939 1940
 *	use __vmalloc() instead.
 */

void *vmalloc_exec(unsigned long size)
{
1941
	return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC,
D
David Rientjes 已提交
1942
			      NUMA_NO_NODE, __builtin_return_address(0));
L
Linus Torvalds 已提交
1943 1944
}

1945
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1946
#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1947
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1948
#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1949 1950 1951 1952
#else
#define GFP_VMALLOC32 GFP_KERNEL
#endif

L
Linus Torvalds 已提交
1953 1954 1955 1956 1957 1958 1959 1960 1961
/**
 *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 *	@size:		allocation size
 *
 *	Allocate enough 32bit PA addressable pages to cover @size from the
 *	page level allocator and map them into contiguous kernel virtual space.
 */
void *vmalloc_32(unsigned long size)
{
1962
	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
D
David Rientjes 已提交
1963
			      NUMA_NO_NODE, __builtin_return_address(0));
L
Linus Torvalds 已提交
1964 1965 1966
}
EXPORT_SYMBOL(vmalloc_32);

1967
/**
1968
 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1969
 *	@size:		allocation size
1970 1971 1972
 *
 * The resulting memory area is 32bit addressable and zeroed so it can be
 * mapped to userspace without leaking data.
1973 1974 1975 1976 1977 1978
 */
void *vmalloc_32_user(unsigned long size)
{
	struct vm_struct *area;
	void *ret;

1979
	ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
D
David Rientjes 已提交
1980
			     NUMA_NO_NODE, __builtin_return_address(0));
1981
	if (ret) {
N
Nick Piggin 已提交
1982
		area = find_vm_area(ret);
1983 1984
		area->flags |= VM_USERMAP;
	}
1985 1986 1987 1988
	return ret;
}
EXPORT_SYMBOL(vmalloc_32_user);

1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
/*
 * small helper routine , copy contents to buf from addr.
 * If the page is not present, fill zero.
 */

static int aligned_vread(char *buf, char *addr, unsigned long count)
{
	struct page *p;
	int copied = 0;

	while (count) {
		unsigned long offset, length;

2002
		offset = offset_in_page(addr);
2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
		length = PAGE_SIZE - offset;
		if (length > count)
			length = count;
		p = vmalloc_to_page(addr);
		/*
		 * To do safe access to this _mapped_ area, we need
		 * lock. But adding lock here means that we need to add
		 * overhead of vmalloc()/vfree() calles for this _debug_
		 * interface, rarely used. Instead of that, we'll use
		 * kmap() and get small overhead in this access function.
		 */
		if (p) {
			/*
			 * we can expect USER0 is not used (see vread/vwrite's
			 * function description)
			 */
2019
			void *map = kmap_atomic(p);
2020
			memcpy(buf, map + offset, length);
2021
			kunmap_atomic(map);
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040
		} else
			memset(buf, 0, length);

		addr += length;
		buf += length;
		copied += length;
		count -= length;
	}
	return copied;
}

static int aligned_vwrite(char *buf, char *addr, unsigned long count)
{
	struct page *p;
	int copied = 0;

	while (count) {
		unsigned long offset, length;

2041
		offset = offset_in_page(addr);
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
		length = PAGE_SIZE - offset;
		if (length > count)
			length = count;
		p = vmalloc_to_page(addr);
		/*
		 * To do safe access to this _mapped_ area, we need
		 * lock. But adding lock here means that we need to add
		 * overhead of vmalloc()/vfree() calles for this _debug_
		 * interface, rarely used. Instead of that, we'll use
		 * kmap() and get small overhead in this access function.
		 */
		if (p) {
			/*
			 * we can expect USER0 is not used (see vread/vwrite's
			 * function description)
			 */
2058
			void *map = kmap_atomic(p);
2059
			memcpy(map + offset, buf, length);
2060
			kunmap_atomic(map);
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
		}
		addr += length;
		buf += length;
		copied += length;
		count -= length;
	}
	return copied;
}

/**
 *	vread() -  read vmalloc area in a safe way.
 *	@buf:		buffer for reading data
 *	@addr:		vm address.
 *	@count:		number of bytes to be read.
 *
 *	Returns # of bytes which addr and buf should be increased.
 *	(same number to @count). Returns 0 if [addr...addr+count) doesn't
 *	includes any intersect with alive vmalloc area.
 *
 *	This function checks that addr is a valid vmalloc'ed area, and
 *	copy data from that area to a given buffer. If the given memory range
 *	of [addr...addr+count) includes some valid address, data is copied to
 *	proper area of @buf. If there are memory holes, they'll be zero-filled.
 *	IOREMAP area is treated as memory hole and no copy is done.
 *
 *	If [addr...addr+count) doesn't includes any intersects with alive
2087
 *	vm_struct area, returns 0. @buf should be kernel's buffer.
2088 2089 2090 2091 2092 2093 2094 2095
 *
 *	Note: In usual ops, vread() is never necessary because the caller
 *	should know vmalloc() area is valid and can use memcpy().
 *	This is for routines which have to access vmalloc area without
 *	any informaion, as /dev/kmem.
 *
 */

L
Linus Torvalds 已提交
2096 2097
long vread(char *buf, char *addr, unsigned long count)
{
2098 2099
	struct vmap_area *va;
	struct vm_struct *vm;
L
Linus Torvalds 已提交
2100
	char *vaddr, *buf_start = buf;
2101
	unsigned long buflen = count;
L
Linus Torvalds 已提交
2102 2103 2104 2105 2106 2107
	unsigned long n;

	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

2108 2109 2110 2111 2112 2113 2114 2115 2116 2117
	spin_lock(&vmap_area_lock);
	list_for_each_entry(va, &vmap_area_list, list) {
		if (!count)
			break;

		if (!(va->flags & VM_VM_AREA))
			continue;

		vm = va->vm;
		vaddr = (char *) vm->addr;
2118
		if (addr >= vaddr + get_vm_area_size(vm))
L
Linus Torvalds 已提交
2119 2120 2121 2122 2123 2124 2125 2126 2127
			continue;
		while (addr < vaddr) {
			if (count == 0)
				goto finished;
			*buf = '\0';
			buf++;
			addr++;
			count--;
		}
2128
		n = vaddr + get_vm_area_size(vm) - addr;
2129 2130
		if (n > count)
			n = count;
2131
		if (!(vm->flags & VM_IOREMAP))
2132 2133 2134 2135 2136 2137
			aligned_vread(buf, addr, n);
		else /* IOREMAP area is treated as memory hole */
			memset(buf, 0, n);
		buf += n;
		addr += n;
		count -= n;
L
Linus Torvalds 已提交
2138 2139
	}
finished:
2140
	spin_unlock(&vmap_area_lock);
2141 2142 2143 2144 2145 2146 2147 2148

	if (buf == buf_start)
		return 0;
	/* zero-fill memory holes */
	if (buf != buf_start + buflen)
		memset(buf, 0, buflen - (buf - buf_start));

	return buflen;
L
Linus Torvalds 已提交
2149 2150
}

2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168
/**
 *	vwrite() -  write vmalloc area in a safe way.
 *	@buf:		buffer for source data
 *	@addr:		vm address.
 *	@count:		number of bytes to be read.
 *
 *	Returns # of bytes which addr and buf should be incresed.
 *	(same number to @count).
 *	If [addr...addr+count) doesn't includes any intersect with valid
 *	vmalloc area, returns 0.
 *
 *	This function checks that addr is a valid vmalloc'ed area, and
 *	copy data from a buffer to the given addr. If specified range of
 *	[addr...addr+count) includes some valid address, data is copied from
 *	proper area of @buf. If there are memory holes, no copy to hole.
 *	IOREMAP area is treated as memory hole and no copy is done.
 *
 *	If [addr...addr+count) doesn't includes any intersects with alive
2169
 *	vm_struct area, returns 0. @buf should be kernel's buffer.
2170 2171 2172 2173 2174 2175 2176
 *
 *	Note: In usual ops, vwrite() is never necessary because the caller
 *	should know vmalloc() area is valid and can use memcpy().
 *	This is for routines which have to access vmalloc area without
 *	any informaion, as /dev/kmem.
 */

L
Linus Torvalds 已提交
2177 2178
long vwrite(char *buf, char *addr, unsigned long count)
{
2179 2180
	struct vmap_area *va;
	struct vm_struct *vm;
2181 2182 2183
	char *vaddr;
	unsigned long n, buflen;
	int copied = 0;
L
Linus Torvalds 已提交
2184 2185 2186 2187

	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;
2188
	buflen = count;
L
Linus Torvalds 已提交
2189

2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
	spin_lock(&vmap_area_lock);
	list_for_each_entry(va, &vmap_area_list, list) {
		if (!count)
			break;

		if (!(va->flags & VM_VM_AREA))
			continue;

		vm = va->vm;
		vaddr = (char *) vm->addr;
2200
		if (addr >= vaddr + get_vm_area_size(vm))
L
Linus Torvalds 已提交
2201 2202 2203 2204 2205 2206 2207 2208
			continue;
		while (addr < vaddr) {
			if (count == 0)
				goto finished;
			buf++;
			addr++;
			count--;
		}
2209
		n = vaddr + get_vm_area_size(vm) - addr;
2210 2211
		if (n > count)
			n = count;
2212
		if (!(vm->flags & VM_IOREMAP)) {
2213 2214 2215 2216 2217 2218
			aligned_vwrite(buf, addr, n);
			copied++;
		}
		buf += n;
		addr += n;
		count -= n;
L
Linus Torvalds 已提交
2219 2220
	}
finished:
2221
	spin_unlock(&vmap_area_lock);
2222 2223 2224
	if (!copied)
		return 0;
	return buflen;
L
Linus Torvalds 已提交
2225
}
2226 2227

/**
2228 2229 2230 2231 2232
 *	remap_vmalloc_range_partial  -  map vmalloc pages to userspace
 *	@vma:		vma to cover
 *	@uaddr:		target user address to start at
 *	@kaddr:		virtual address of vmalloc kernel memory
 *	@size:		size of map area
2233 2234
 *
 *	Returns:	0 for success, -Exxx on failure
2235
 *
2236 2237 2238 2239
 *	This function checks that @kaddr is a valid vmalloc'ed area,
 *	and that it is big enough to cover the range starting at
 *	@uaddr in @vma. Will return failure if that criteria isn't
 *	met.
2240
 *
2241
 *	Similar to remap_pfn_range() (see mm/memory.c)
2242
 */
2243 2244
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
				void *kaddr, unsigned long size)
2245 2246 2247
{
	struct vm_struct *area;

2248 2249 2250
	size = PAGE_ALIGN(size);

	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2251 2252
		return -EINVAL;

2253
	area = find_vm_area(kaddr);
2254
	if (!area)
N
Nick Piggin 已提交
2255
		return -EINVAL;
2256 2257

	if (!(area->flags & VM_USERMAP))
N
Nick Piggin 已提交
2258
		return -EINVAL;
2259

2260
	if (kaddr + size > area->addr + area->size)
N
Nick Piggin 已提交
2261
		return -EINVAL;
2262 2263

	do {
2264
		struct page *page = vmalloc_to_page(kaddr);
N
Nick Piggin 已提交
2265 2266
		int ret;

2267 2268 2269 2270 2271
		ret = vm_insert_page(vma, uaddr, page);
		if (ret)
			return ret;

		uaddr += PAGE_SIZE;
2272 2273 2274
		kaddr += PAGE_SIZE;
		size -= PAGE_SIZE;
	} while (size > 0);
2275

2276
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2277

N
Nick Piggin 已提交
2278
	return 0;
2279
}
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
EXPORT_SYMBOL(remap_vmalloc_range_partial);

/**
 *	remap_vmalloc_range  -  map vmalloc pages to userspace
 *	@vma:		vma to cover (map full range of vma)
 *	@addr:		vmalloc memory
 *	@pgoff:		number of pages into addr before first page to map
 *
 *	Returns:	0 for success, -Exxx on failure
 *
 *	This function checks that addr is a valid vmalloc'ed area, and
 *	that it is big enough to cover the vma. Will return failure if
 *	that criteria isn't met.
 *
 *	Similar to remap_pfn_range() (see mm/memory.c)
 */
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
						unsigned long pgoff)
{
	return remap_vmalloc_range_partial(vma, vma->vm_start,
					   addr + (pgoff << PAGE_SHIFT),
					   vma->vm_end - vma->vm_start);
}
2303 2304
EXPORT_SYMBOL(remap_vmalloc_range);

2305 2306 2307 2308
/*
 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 * have one.
 */
2309
void __weak vmalloc_sync_all(void)
2310 2311
{
}
2312 2313


2314
static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2315
{
2316 2317 2318 2319 2320 2321
	pte_t ***p = data;

	if (p) {
		*(*p) = pte;
		(*p)++;
	}
2322 2323 2324 2325 2326 2327
	return 0;
}

/**
 *	alloc_vm_area - allocate a range of kernel address space
 *	@size:		size of the area
2328
 *	@ptes:		returns the PTEs for the address space
2329 2330
 *
 *	Returns:	NULL on failure, vm_struct on success
2331 2332 2333
 *
 *	This function reserves a range of kernel address space, and
 *	allocates pagetables to map that range.  No actual mappings
2334 2335 2336 2337
 *	are created.
 *
 *	If @ptes is non-NULL, pointers to the PTEs (in init_mm)
 *	allocated for the VM area are returned.
2338
 */
2339
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2340 2341 2342
{
	struct vm_struct *area;

2343 2344
	area = get_vm_area_caller(size, VM_IOREMAP,
				__builtin_return_address(0));
2345 2346 2347 2348 2349 2350 2351 2352
	if (area == NULL)
		return NULL;

	/*
	 * This ensures that page tables are constructed for this region
	 * of kernel virtual address space and mapped into init_mm.
	 */
	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2353
				size, f, ptes ? &ptes : NULL)) {
2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369
		free_vm_area(area);
		return NULL;
	}

	return area;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);

void free_vm_area(struct vm_struct *area)
{
	struct vm_struct *ret;
	ret = remove_vm_area(area->addr);
	BUG_ON(ret != area);
	kfree(area);
}
EXPORT_SYMBOL_GPL(free_vm_area);
2370

2371
#ifdef CONFIG_SMP
2372 2373
static struct vmap_area *node_to_va(struct rb_node *n)
{
G
Geliang Tang 已提交
2374
	return rb_entry_safe(n, struct vmap_area, rb_node);
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
}

/**
 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
 * @end: target address
 * @pnext: out arg for the next vmap_area
 * @pprev: out arg for the previous vmap_area
 *
 * Returns: %true if either or both of next and prev are found,
 *	    %false if no vmap_area exists
 *
 * Find vmap_areas end addresses of which enclose @end.  ie. if not
 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
 */
static bool pvm_find_next_prev(unsigned long end,
			       struct vmap_area **pnext,
			       struct vmap_area **pprev)
{
	struct rb_node *n = vmap_area_root.rb_node;
	struct vmap_area *va = NULL;

	while (n) {
		va = rb_entry(n, struct vmap_area, rb_node);
		if (end < va->va_end)
			n = n->rb_left;
		else if (end > va->va_end)
			n = n->rb_right;
		else
			break;
	}

	if (!va)
		return false;

	if (va->va_end > end) {
		*pnext = va;
		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
	} else {
		*pprev = va;
		*pnext = node_to_va(rb_next(&(*pprev)->rb_node));
	}
	return true;
}

/**
 * pvm_determine_end - find the highest aligned address between two vmap_areas
 * @pnext: in/out arg for the next vmap_area
 * @pprev: in/out arg for the previous vmap_area
 * @align: alignment
 *
 * Returns: determined end address
 *
 * Find the highest aligned address between *@pnext and *@pprev below
 * VMALLOC_END.  *@pnext and *@pprev are adjusted so that the aligned
 * down address is between the end addresses of the two vmap_areas.
 *
 * Please note that the address returned by this function may fall
 * inside *@pnext vmap_area.  The caller is responsible for checking
 * that.
 */
static unsigned long pvm_determine_end(struct vmap_area **pnext,
				       struct vmap_area **pprev,
				       unsigned long align)
{
	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
	unsigned long addr;

	if (*pnext)
		addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
	else
		addr = vmalloc_end;

	while (*pprev && (*pprev)->va_end > addr) {
		*pnext = *pprev;
		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
	}

	return addr;
}

/**
 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
 * @offsets: array containing offset of each area
 * @sizes: array containing size of each area
 * @nr_vms: the number of areas to allocate
 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
 *
 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
 *	    vm_structs on success, %NULL on failure
 *
 * Percpu allocator wants to use congruent vm areas so that it can
 * maintain the offsets among percpu areas.  This function allocates
2467 2468 2469 2470
 * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
 * be scattered pretty far, distance between two areas easily going up
 * to gigabytes.  To avoid interacting with regular vmallocs, these
 * areas are allocated from top.
2471 2472 2473 2474 2475 2476
 *
 * Despite its complicated look, this allocator is rather simple.  It
 * does everything top-down and scans areas from the end looking for
 * matching slot.  While scanning, if any of the areas overlaps with
 * existing vmap_area, the base address is pulled down to fit the
 * area.  Scanning is repeated till all the areas fit and then all
2477
 * necessary data structures are inserted and the result is returned.
2478 2479 2480
 */
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
				     const size_t *sizes, int nr_vms,
2481
				     size_t align)
2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
{
	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
	struct vmap_area **vas, *prev, *next;
	struct vm_struct **vms;
	int area, area2, last_area, term_area;
	unsigned long base, start, end, last_end;
	bool purged = false;

	/* verify parameters and allocate data structures */
2492
	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504
	for (last_area = 0, area = 0; area < nr_vms; area++) {
		start = offsets[area];
		end = start + sizes[area];

		/* is everything aligned properly? */
		BUG_ON(!IS_ALIGNED(offsets[area], align));
		BUG_ON(!IS_ALIGNED(sizes[area], align));

		/* detect the area with the highest address */
		if (start > offsets[last_area])
			last_area = area;

2505
		for (area2 = area + 1; area2 < nr_vms; area2++) {
2506 2507 2508
			unsigned long start2 = offsets[area2];
			unsigned long end2 = start2 + sizes[area2];

2509
			BUG_ON(start2 < end && start < end2);
2510 2511 2512 2513 2514 2515 2516 2517 2518
		}
	}
	last_end = offsets[last_area] + sizes[last_area];

	if (vmalloc_end - vmalloc_start < last_end) {
		WARN_ON(true);
		return NULL;
	}

2519 2520
	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2521
	if (!vas || !vms)
2522
		goto err_free2;
2523 2524

	for (area = 0; area < nr_vms; area++) {
2525 2526
		vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611
		if (!vas[area] || !vms[area])
			goto err_free;
	}
retry:
	spin_lock(&vmap_area_lock);

	/* start scanning - we scan from the top, begin with the last area */
	area = term_area = last_area;
	start = offsets[area];
	end = start + sizes[area];

	if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
		base = vmalloc_end - last_end;
		goto found;
	}
	base = pvm_determine_end(&next, &prev, align) - end;

	while (true) {
		BUG_ON(next && next->va_end <= base + end);
		BUG_ON(prev && prev->va_end > base + end);

		/*
		 * base might have underflowed, add last_end before
		 * comparing.
		 */
		if (base + last_end < vmalloc_start + last_end) {
			spin_unlock(&vmap_area_lock);
			if (!purged) {
				purge_vmap_area_lazy();
				purged = true;
				goto retry;
			}
			goto err_free;
		}

		/*
		 * If next overlaps, move base downwards so that it's
		 * right below next and then recheck.
		 */
		if (next && next->va_start < base + end) {
			base = pvm_determine_end(&next, &prev, align) - end;
			term_area = area;
			continue;
		}

		/*
		 * If prev overlaps, shift down next and prev and move
		 * base so that it's right below new next and then
		 * recheck.
		 */
		if (prev && prev->va_end > base + start)  {
			next = prev;
			prev = node_to_va(rb_prev(&next->rb_node));
			base = pvm_determine_end(&next, &prev, align) - end;
			term_area = area;
			continue;
		}

		/*
		 * This area fits, move on to the previous one.  If
		 * the previous one is the terminal one, we're done.
		 */
		area = (area + nr_vms - 1) % nr_vms;
		if (area == term_area)
			break;
		start = offsets[area];
		end = start + sizes[area];
		pvm_find_next_prev(base + end, &next, &prev);
	}
found:
	/* we've found a fitting base, insert all va's */
	for (area = 0; area < nr_vms; area++) {
		struct vmap_area *va = vas[area];

		va->va_start = base + offsets[area];
		va->va_end = va->va_start + sizes[area];
		__insert_vmap_area(va);
	}

	vmap_area_pcpu_hole = base + offsets[last_area];

	spin_unlock(&vmap_area_lock);

	/* insert all vm's */
	for (area = 0; area < nr_vms; area++)
2612 2613
		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
				 pcpu_get_vm_areas);
2614 2615 2616 2617 2618 2619

	kfree(vas);
	return vms;

err_free:
	for (area = 0; area < nr_vms; area++) {
2620 2621
		kfree(vas[area]);
		kfree(vms[area]);
2622
	}
2623
err_free2:
2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
	kfree(vas);
	kfree(vms);
	return NULL;
}

/**
 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
 * @nr_vms: the number of allocated areas
 *
 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
 */
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
{
	int i;

	for (i = 0; i < nr_vms; i++)
		free_vm_area(vms[i]);
	kfree(vms);
}
2644
#endif	/* CONFIG_SMP */
2645 2646 2647

#ifdef CONFIG_PROC_FS
static void *s_start(struct seq_file *m, loff_t *pos)
2648
	__acquires(&vmap_area_lock)
2649
{
2650
	spin_lock(&vmap_area_lock);
2651
	return seq_list_start(&vmap_area_list, *pos);
2652 2653 2654 2655
}

static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
2656
	return seq_list_next(p, &vmap_area_list, pos);
2657 2658 2659
}

static void s_stop(struct seq_file *m, void *p)
2660
	__releases(&vmap_area_lock)
2661
{
2662
	spin_unlock(&vmap_area_lock);
2663 2664
}

E
Eric Dumazet 已提交
2665 2666
static void show_numa_info(struct seq_file *m, struct vm_struct *v)
{
2667
	if (IS_ENABLED(CONFIG_NUMA)) {
E
Eric Dumazet 已提交
2668 2669 2670 2671 2672
		unsigned int nr, *counters = m->private;

		if (!counters)
			return;

2673 2674
		if (v->flags & VM_UNINITIALIZED)
			return;
2675 2676
		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
		smp_rmb();
2677

E
Eric Dumazet 已提交
2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
		memset(counters, 0, nr_node_ids * sizeof(unsigned int));

		for (nr = 0; nr < v->nr_pages; nr++)
			counters[page_to_nid(v->pages[nr])]++;

		for_each_node_state(nr, N_HIGH_MEMORY)
			if (counters[nr])
				seq_printf(m, " N%u=%u", nr, counters[nr]);
	}
}

2689 2690
static int s_show(struct seq_file *m, void *p)
{
2691
	struct vmap_area *va;
2692 2693
	struct vm_struct *v;

2694 2695
	va = list_entry(p, struct vmap_area, list);

2696 2697 2698 2699
	/*
	 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
	 * behalf of vmap area is being tear down or vm_map_ram allocation.
	 */
2700 2701 2702 2703 2704 2705
	if (!(va->flags & VM_VM_AREA)) {
		seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
			(void *)va->va_start, (void *)va->va_end,
			va->va_end - va->va_start,
			va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");

2706
		return 0;
2707
	}
2708 2709

	v = va->vm;
2710

K
Kees Cook 已提交
2711
	seq_printf(m, "0x%pK-0x%pK %7ld",
2712 2713
		v->addr, v->addr + v->size, v->size);

J
Joe Perches 已提交
2714 2715
	if (v->caller)
		seq_printf(m, " %pS", v->caller);
2716

2717 2718 2719 2720
	if (v->nr_pages)
		seq_printf(m, " pages=%d", v->nr_pages);

	if (v->phys_addr)
2721
		seq_printf(m, " phys=%pa", &v->phys_addr);
2722 2723

	if (v->flags & VM_IOREMAP)
2724
		seq_puts(m, " ioremap");
2725 2726

	if (v->flags & VM_ALLOC)
2727
		seq_puts(m, " vmalloc");
2728 2729

	if (v->flags & VM_MAP)
2730
		seq_puts(m, " vmap");
2731 2732

	if (v->flags & VM_USERMAP)
2733
		seq_puts(m, " user");
2734

D
David Rientjes 已提交
2735
	if (is_vmalloc_addr(v->pages))
2736
		seq_puts(m, " vpages");
2737

E
Eric Dumazet 已提交
2738
	show_numa_info(m, v);
2739 2740 2741 2742
	seq_putc(m, '\n');
	return 0;
}

2743
static const struct seq_operations vmalloc_op = {
2744 2745 2746 2747 2748
	.start = s_start,
	.next = s_next,
	.stop = s_stop,
	.show = s_show,
};
2749 2750 2751

static int vmalloc_open(struct inode *inode, struct file *file)
{
2752 2753 2754 2755 2756
	if (IS_ENABLED(CONFIG_NUMA))
		return seq_open_private(file, &vmalloc_op,
					nr_node_ids * sizeof(unsigned int));
	else
		return seq_open(file, &vmalloc_op);
2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771
}

static const struct file_operations proc_vmalloc_operations = {
	.open		= vmalloc_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

static int __init proc_vmalloc_init(void)
{
	proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
	return 0;
}
module_init(proc_vmalloc_init);
2772

2773 2774
#endif