util.c 20.0 KB
Newer Older
A
Andrew Morton 已提交
1
#include <linux/mm.h>
2 3
#include <linux/slab.h>
#include <linux/string.h>
4
#include <linux/compiler.h>
5
#include <linux/export.h>
D
Davi Arnaut 已提交
6
#include <linux/err.h>
7
#include <linux/sched.h>
8
#include <linux/sched/mm.h>
9
#include <linux/sched/task_stack.h>
A
Al Viro 已提交
10
#include <linux/security.h>
S
Shaohua Li 已提交
11
#include <linux/swap.h>
12
#include <linux/swapops.h>
13 14
#include <linux/mman.h>
#include <linux/hugetlb.h>
A
Al Viro 已提交
15
#include <linux/vmalloc.h>
16
#include <linux/userfaultfd_k.h>
17

18
#include <linux/uaccess.h>
19

20 21
#include "internal.h"

A
Andrzej Hajda 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34
/**
 * kfree_const - conditionally free memory
 * @x: pointer to the memory
 *
 * Function calls kfree only if @x is not in .rodata section.
 */
void kfree_const(const void *x)
{
	if (!is_kernel_rodata((unsigned long)x))
		kfree(x);
}
EXPORT_SYMBOL(kfree_const);

35 36 37 38
/**
 * kstrdup - allocate space for and copy an existing string
 * @s: the string to duplicate
 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
39 40
 *
 * Return: newly allocated copy of @s or %NULL in case of error
41 42 43 44 45 46 47 48 49 50
 */
char *kstrdup(const char *s, gfp_t gfp)
{
	size_t len;
	char *buf;

	if (!s)
		return NULL;

	len = strlen(s) + 1;
51
	buf = kmalloc_track_caller(len, gfp);
52 53 54 55 56
	if (buf)
		memcpy(buf, s, len);
	return buf;
}
EXPORT_SYMBOL(kstrdup);
D
Davi Arnaut 已提交
57

A
Andrzej Hajda 已提交
58 59 60 61 62
/**
 * kstrdup_const - conditionally duplicate an existing const string
 * @s: the string to duplicate
 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 *
63 64 65 66
 * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
 *
 * Return: source string if it is in .rodata section otherwise
 * fallback to kstrdup.
A
Andrzej Hajda 已提交
67 68 69 70 71 72 73 74 75 76
 */
const char *kstrdup_const(const char *s, gfp_t gfp)
{
	if (is_kernel_rodata((unsigned long)s))
		return s;

	return kstrdup(s, gfp);
}
EXPORT_SYMBOL(kstrdup_const);

J
Jeremy Fitzhardinge 已提交
77 78 79 80 81
/**
 * kstrndup - allocate space for and copy an existing string
 * @s: the string to duplicate
 * @max: read at most @max chars from @s
 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
82 83
 *
 * Note: Use kmemdup_nul() instead if the size is known exactly.
84 85
 *
 * Return: newly allocated copy of @s or %NULL in case of error
J
Jeremy Fitzhardinge 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
 */
char *kstrndup(const char *s, size_t max, gfp_t gfp)
{
	size_t len;
	char *buf;

	if (!s)
		return NULL;

	len = strnlen(s, max);
	buf = kmalloc_track_caller(len+1, gfp);
	if (buf) {
		memcpy(buf, s, len);
		buf[len] = '\0';
	}
	return buf;
}
EXPORT_SYMBOL(kstrndup);

A
Alexey Dobriyan 已提交
105 106 107 108 109 110
/**
 * kmemdup - duplicate region of memory
 *
 * @src: memory region to duplicate
 * @len: memory region length
 * @gfp: GFP mask to use
111 112
 *
 * Return: newly allocated copy of @src or %NULL in case of error
A
Alexey Dobriyan 已提交
113 114 115 116 117
 */
void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
	void *p;

118
	p = kmalloc_track_caller(len, gfp);
A
Alexey Dobriyan 已提交
119 120 121 122 123 124
	if (p)
		memcpy(p, src, len);
	return p;
}
EXPORT_SYMBOL(kmemdup);

125 126 127 128 129
/**
 * kmemdup_nul - Create a NUL-terminated string from unterminated data
 * @s: The data to stringify
 * @len: The size of the data
 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
130 131 132
 *
 * Return: newly allocated copy of @s with NUL-termination or %NULL in
 * case of error
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
 */
char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
{
	char *buf;

	if (!s)
		return NULL;

	buf = kmalloc_track_caller(len + 1, gfp);
	if (buf) {
		memcpy(buf, s, len);
		buf[len] = '\0';
	}
	return buf;
}
EXPORT_SYMBOL(kmemdup_nul);

L
Li Zefan 已提交
150 151 152 153 154 155
/**
 * memdup_user - duplicate memory region from user space
 *
 * @src: source address in user space
 * @len: number of bytes to copy
 *
156
 * Return: an ERR_PTR() on failure.  Result is physically
A
Al Viro 已提交
157
 * contiguous, to be freed by kfree().
L
Li Zefan 已提交
158 159 160 161 162
 */
void *memdup_user(const void __user *src, size_t len)
{
	void *p;

163
	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
L
Li Zefan 已提交
164 165 166 167 168 169 170 171 172 173 174 175
	if (!p)
		return ERR_PTR(-ENOMEM);

	if (copy_from_user(p, src, len)) {
		kfree(p);
		return ERR_PTR(-EFAULT);
	}

	return p;
}
EXPORT_SYMBOL(memdup_user);

A
Al Viro 已提交
176 177 178 179 180 181
/**
 * vmemdup_user - duplicate memory region from user space
 *
 * @src: source address in user space
 * @len: number of bytes to copy
 *
182
 * Return: an ERR_PTR() on failure.  Result may be not
A
Al Viro 已提交
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
 * physically contiguous.  Use kvfree() to free.
 */
void *vmemdup_user(const void __user *src, size_t len)
{
	void *p;

	p = kvmalloc(len, GFP_USER);
	if (!p)
		return ERR_PTR(-ENOMEM);

	if (copy_from_user(p, src, len)) {
		kvfree(p);
		return ERR_PTR(-EFAULT);
	}

	return p;
}
EXPORT_SYMBOL(vmemdup_user);

202
/**
D
Davi Arnaut 已提交
203 204 205
 * strndup_user - duplicate an existing string from user space
 * @s: The string to duplicate
 * @n: Maximum number of bytes to copy, including the trailing NUL.
206
 *
207
 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
D
Davi Arnaut 已提交
208 209 210 211 212 213 214 215 216 217 218 219 220 221
 */
char *strndup_user(const char __user *s, long n)
{
	char *p;
	long length;

	length = strnlen_user(s, n);

	if (!length)
		return ERR_PTR(-EFAULT);

	if (length > n)
		return ERR_PTR(-EINVAL);

J
Julia Lawall 已提交
222
	p = memdup_user(s, length);
D
Davi Arnaut 已提交
223

J
Julia Lawall 已提交
224 225
	if (IS_ERR(p))
		return p;
D
Davi Arnaut 已提交
226 227 228 229 230 231

	p[length - 1] = '\0';

	return p;
}
EXPORT_SYMBOL(strndup_user);
A
Andrew Morton 已提交
232

A
Al Viro 已提交
233 234 235 236 237 238
/**
 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 *
 * @src: source address in user space
 * @len: number of bytes to copy
 *
239
 * Return: an ERR_PTR() on failure.
A
Al Viro 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
 */
void *memdup_user_nul(const void __user *src, size_t len)
{
	char *p;

	/*
	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
	 * cause pagefault, which makes it pointless to use GFP_NOFS
	 * or GFP_ATOMIC.
	 */
	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
	if (!p)
		return ERR_PTR(-ENOMEM);

	if (copy_from_user(p, src, len)) {
		kfree(p);
		return ERR_PTR(-EFAULT);
	}
	p[len] = '\0';

	return p;
}
EXPORT_SYMBOL(memdup_user_nul);

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
		struct vm_area_struct *prev, struct rb_node *rb_parent)
{
	struct vm_area_struct *next;

	vma->vm_prev = prev;
	if (prev) {
		next = prev->vm_next;
		prev->vm_next = vma;
	} else {
		mm->mmap = vma;
		if (rb_parent)
			next = rb_entry(rb_parent,
					struct vm_area_struct, vm_rb);
		else
			next = NULL;
	}
	vma->vm_next = next;
	if (next)
		next->vm_prev = vma;
}

286
/* Check if the vma is being used as a stack by this task */
287
int vma_is_stack_for_current(struct vm_area_struct *vma)
288
{
289 290
	struct task_struct * __maybe_unused t = current;

291 292 293
	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
}

294
#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
295
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
A
Andrew Morton 已提交
296 297 298 299 300
{
	mm->mmap_base = TASK_UNMAPPED_BASE;
	mm->get_unmapped_area = arch_get_unmapped_area;
}
#endif
301

302 303 304
/*
 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
 * back to the regular GUP.
305 306 307 308
 * Note a difference with get_user_pages_fast: this always returns the
 * number of pages pinned, 0 if no pages were pinned.
 * If the architecture does not support this function, simply return with no
 * pages pinned.
309
 */
310
int __weak __get_user_pages_fast(unsigned long start,
311 312 313 314 315 316
				 int nr_pages, int write, struct page **pages)
{
	return 0;
}
EXPORT_SYMBOL_GPL(__get_user_pages_fast);

A
Andy Grover 已提交
317 318 319 320
/**
 * get_user_pages_fast() - pin user pages in memory
 * @start:	starting user address
 * @nr_pages:	number of pages from start to pin
321
 * @gup_flags:	flags modifying pin behaviour
A
Andy Grover 已提交
322 323 324
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_pages long.
 *
325 326 327 328 329 330 331 332 333 334 335
 * get_user_pages_fast provides equivalent functionality to get_user_pages,
 * operating on current and current->mm, with force=0 and vma=NULL. However
 * unlike get_user_pages, it must be called without mmap_sem held.
 *
 * get_user_pages_fast may take mmap_sem and page table locks, so no
 * assumptions can be made about lack of locking. get_user_pages_fast is to be
 * implemented in a way that is advantageous (vs get_user_pages()) when the
 * user memory area is already faulted in and present in ptes. However if the
 * pages have to be faulted in, it may turn out to be slightly slower so
 * callers need to carefully consider what to use. On many architectures,
 * get_user_pages_fast simply falls back to get_user_pages.
336 337 338 339
 *
 * Return: number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno.
A
Andy Grover 已提交
340
 */
341
int __weak get_user_pages_fast(unsigned long start,
342 343
				int nr_pages, unsigned int gup_flags,
				struct page **pages)
344
{
345
	return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
346 347
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
348

A
Al Viro 已提交
349 350
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
	unsigned long len, unsigned long prot,
M
Michal Hocko 已提交
351
	unsigned long flag, unsigned long pgoff)
A
Al Viro 已提交
352 353 354
{
	unsigned long ret;
	struct mm_struct *mm = current->mm;
355
	unsigned long populate;
356
	LIST_HEAD(uf);
A
Al Viro 已提交
357 358 359

	ret = security_mmap_file(file, prot, flag);
	if (!ret) {
M
Michal Hocko 已提交
360 361
		if (down_write_killable(&mm->mmap_sem))
			return -EINTR;
362
		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
363
				    &populate, &uf);
A
Al Viro 已提交
364
		up_write(&mm->mmap_sem);
365
		userfaultfd_unmap_complete(mm, &uf);
366 367
		if (populate)
			mm_populate(ret, populate);
A
Al Viro 已提交
368 369 370 371 372 373 374 375 376 377
	}
	return ret;
}

unsigned long vm_mmap(struct file *file, unsigned long addr,
	unsigned long len, unsigned long prot,
	unsigned long flag, unsigned long offset)
{
	if (unlikely(offset + PAGE_ALIGN(len) < offset))
		return -EINVAL;
378
	if (unlikely(offset_in_page(offset)))
A
Al Viro 已提交
379 380
		return -EINVAL;

M
Michal Hocko 已提交
381
	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
A
Al Viro 已提交
382 383 384
}
EXPORT_SYMBOL(vm_mmap);

M
Michal Hocko 已提交
385 386 387 388 389 390 391 392 393 394
/**
 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
 * failure, fall back to non-contiguous (vmalloc) allocation.
 * @size: size of the request.
 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
 * @node: numa node to allocate from
 *
 * Uses kmalloc to get the memory but if the allocation fails then falls back
 * to the vmalloc allocator. Use kvfree for freeing the memory.
 *
395 396 397
 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
 * preferable to the vmalloc fallback, due to visible performance drawbacks.
M
Michal Hocko 已提交
398
 *
399 400
 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
 * fall back to vmalloc.
401 402
 *
 * Return: pointer to the allocated memory of %NULL in case of failure
M
Michal Hocko 已提交
403 404 405 406 407 408 409 410 411 412
 */
void *kvmalloc_node(size_t size, gfp_t flags, int node)
{
	gfp_t kmalloc_flags = flags;
	void *ret;

	/*
	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
	 * so the given set of flags has to be compatible.
	 */
413 414
	if ((flags & GFP_KERNEL) != GFP_KERNEL)
		return kmalloc_node(size, flags, node);
M
Michal Hocko 已提交
415 416

	/*
417 418 419 420 421
	 * We want to attempt a large physically contiguous block first because
	 * it is less likely to fragment multiple larger blocks and therefore
	 * contribute to a long term fragmentation less than vmalloc fallback.
	 * However make sure that larger requests are not too disruptive - no
	 * OOM killer and no allocation failure warnings as we have a fallback.
M
Michal Hocko 已提交
422
	 */
423 424 425
	if (size > PAGE_SIZE) {
		kmalloc_flags |= __GFP_NOWARN;

426
		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
427 428
			kmalloc_flags |= __GFP_NORETRY;
	}
M
Michal Hocko 已提交
429 430 431 432 433 434 435 436 437 438

	ret = kmalloc_node(size, kmalloc_flags, node);

	/*
	 * It doesn't really make sense to fallback to vmalloc for sub page
	 * requests
	 */
	if (ret || size <= PAGE_SIZE)
		return ret;

439 440
	return __vmalloc_node_flags_caller(size, node, flags,
			__builtin_return_address(0));
M
Michal Hocko 已提交
441 442 443
}
EXPORT_SYMBOL(kvmalloc_node);

444
/**
445 446
 * kvfree() - Free memory.
 * @addr: Pointer to allocated memory.
447
 *
448 449 450 451
 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
 * It is slightly more efficient to use kfree() or vfree() if you are certain
 * that you know which one to use.
 *
A
Andrey Ryabinin 已提交
452
 * Context: Either preemptible task context or not-NMI interrupt.
453
 */
A
Al Viro 已提交
454 455 456 457 458 459 460 461 462
void kvfree(const void *addr)
{
	if (is_vmalloc_addr(addr))
		vfree(addr);
	else
		kfree(addr);
}
EXPORT_SYMBOL(kvfree);

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
static inline void *__page_rmapping(struct page *page)
{
	unsigned long mapping;

	mapping = (unsigned long)page->mapping;
	mapping &= ~PAGE_MAPPING_FLAGS;

	return (void *)mapping;
}

/* Neutral page->mapping pointer to address_space or anon_vma or other */
void *page_rmapping(struct page *page)
{
	page = compound_head(page);
	return __page_rmapping(page);
}

A
Andrew Morton 已提交
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
/*
 * Return true if this page is mapped into pagetables.
 * For compound page it returns true if any subpage of compound page is mapped.
 */
bool page_mapped(struct page *page)
{
	int i;

	if (likely(!PageCompound(page)))
		return atomic_read(&page->_mapcount) >= 0;
	page = compound_head(page);
	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
		return true;
	if (PageHuge(page))
		return false;
495
	for (i = 0; i < (1 << compound_order(page)); i++) {
A
Andrew Morton 已提交
496 497 498 499 500 501 502
		if (atomic_read(&page[i]._mapcount) >= 0)
			return true;
	}
	return false;
}
EXPORT_SYMBOL(page_mapped);

503 504 505 506 507 508 509 510 511 512 513
struct anon_vma *page_anon_vma(struct page *page)
{
	unsigned long mapping;

	page = compound_head(page);
	mapping = (unsigned long)page->mapping;
	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
		return NULL;
	return __page_rmapping(page);
}

S
Shaohua Li 已提交
514 515
struct address_space *page_mapping(struct page *page)
{
516 517 518
	struct address_space *mapping;

	page = compound_head(page);
S
Shaohua Li 已提交
519

520 521 522 523
	/* This happens if someone calls flush_dcache_page on slab page */
	if (unlikely(PageSlab(page)))
		return NULL;

524 525 526 527
	if (unlikely(PageSwapCache(page))) {
		swp_entry_t entry;

		entry.val = page_private(page);
528 529 530
		return swap_address_space(entry);
	}

531
	mapping = page->mapping;
532
	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
533
		return NULL;
534 535

	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
S
Shaohua Li 已提交
536
}
537
EXPORT_SYMBOL(page_mapping);
S
Shaohua Li 已提交
538

539 540 541 542 543 544 545 546 547 548
/*
 * For file cache pages, return the address_space, otherwise return NULL
 */
struct address_space *page_mapping_file(struct page *page)
{
	if (unlikely(PageSwapCache(page)))
		return NULL;
	return page_mapping(page);
}

549 550 551 552 553 554
/* Slow path of page_mapcount() for compound pages */
int __page_mapcount(struct page *page)
{
	int ret;

	ret = atomic_read(&page->_mapcount) + 1;
K
Kirill A. Shutemov 已提交
555 556 557 558 559 560
	/*
	 * For file THP page->_mapcount contains total number of mapping
	 * of the page: no need to look into compound_mapcount.
	 */
	if (!PageAnon(page) && !PageHuge(page))
		return ret;
561 562 563 564 565 566 567 568
	page = compound_head(page);
	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
	if (PageDoubleMap(page))
		ret--;
	return ret;
}
EXPORT_SYMBOL_GPL(__page_mapcount);

569 570 571 572 573 574 575
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
int sysctl_overcommit_ratio __read_mostly = 50;
unsigned long sysctl_overcommit_kbytes __read_mostly;
int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
int overcommit_ratio_handler(struct ctl_table *table, int write,
			     void __user *buffer, size_t *lenp,
			     loff_t *ppos)
{
	int ret;

	ret = proc_dointvec(table, write, buffer, lenp, ppos);
	if (ret == 0 && write)
		sysctl_overcommit_kbytes = 0;
	return ret;
}

int overcommit_kbytes_handler(struct ctl_table *table, int write,
			     void __user *buffer, size_t *lenp,
			     loff_t *ppos)
{
	int ret;

	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
	if (ret == 0 && write)
		sysctl_overcommit_ratio = 0;
	return ret;
}

600 601 602 603 604
/*
 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 */
unsigned long vm_commit_limit(void)
{
605 606 607 608 609
	unsigned long allowed;

	if (sysctl_overcommit_kbytes)
		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
	else
610
		allowed = ((totalram_pages() - hugetlb_total_pages())
611 612 613 614
			   * sysctl_overcommit_ratio / 100);
	allowed += total_swap_pages;

	return allowed;
615 616
}

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
/*
 * Make sure vm_committed_as in one cacheline and not cacheline shared with
 * other variables. It can be updated by several CPUs frequently.
 */
struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;

/*
 * The global memory commitment made in the system can be a metric
 * that can be used to drive ballooning decisions when Linux is hosted
 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 * balancing memory across competing virtual machines that are hosted.
 * Several metrics drive this policy engine including the guest reported
 * memory commitment.
 */
unsigned long vm_memory_committed(void)
{
	return percpu_counter_read_positive(&vm_committed_as);
}
EXPORT_SYMBOL_GPL(vm_memory_committed);

/*
 * Check that a process has enough memory to allocate a new virtual
 * mapping. 0 means there is enough memory for the allocation to
 * succeed and -ENOMEM implies there is not.
 *
 * We currently support three overcommit policies, which are set via the
643
 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
 *
 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 * Additional code 2002 Jul 20 by Robert Love.
 *
 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 *
 * Note this is a helper function intended to be used by LSMs which
 * wish to use this logic.
 */
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
{
	long free, allowed, reserve;

	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
			-(s64)vm_committed_as_batch * num_online_cpus(),
			"memory commitment underflow");

	vm_acct_memory(pages);

	/*
	 * Sometimes we want to use more memory than we have
	 */
	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
		return 0;

	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
670
		free = global_zone_page_state(NR_FREE_PAGES);
671
		free += global_node_page_state(NR_FILE_PAGES);
672 673 674 675 676 677 678

		/*
		 * shmem pages shouldn't be counted as free in this
		 * case, they can't be purged, only swapped out, and
		 * that won't affect the overall amount of available
		 * memory in the system.
		 */
679
		free -= global_node_page_state(NR_SHMEM);
680 681 682 683 684 685 686 687 688

		free += get_nr_swap_pages();

		/*
		 * Any slabs which are created with the
		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
		 * which are reclaimable, under pressure.  The dentry
		 * cache and most inode caches should fall into this
		 */
689
		free += global_node_page_state(NR_SLAB_RECLAIMABLE);
690

691 692 693 694
		/*
		 * Part of the kernel memory, which can be released
		 * under memory pressure.
		 */
695
		free += global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
696

697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
		/*
		 * Leave reserved pages. The pages are not for anonymous pages.
		 */
		if (free <= totalreserve_pages)
			goto error;
		else
			free -= totalreserve_pages;

		/*
		 * Reserve some for root
		 */
		if (!cap_sys_admin)
			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);

		if (free > pages)
			return 0;

		goto error;
	}

	allowed = vm_commit_limit();
	/*
	 * Reserve some for root
	 */
	if (!cap_sys_admin)
		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);

	/*
	 * Don't let a single process grow so big a user can't recover
	 */
	if (mm) {
		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
		allowed -= min_t(long, mm->total_vm / 32, reserve);
	}

	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
		return 0;
error:
	vm_unacct_memory(pages);

	return -ENOMEM;
}

740 741 742 743 744 745
/**
 * get_cmdline() - copy the cmdline value to a buffer.
 * @task:     the task whose cmdline value to copy.
 * @buffer:   the buffer to copy to.
 * @buflen:   the length of the buffer. Larger cmdline values are truncated
 *            to this length.
746 747
 *
 * Return: the size of the cmdline field copied. Note that the copy does
748 749 750 751 752 753 754
 * not guarantee an ending NULL byte.
 */
int get_cmdline(struct task_struct *task, char *buffer, int buflen)
{
	int res = 0;
	unsigned int len;
	struct mm_struct *mm = get_task_mm(task);
755
	unsigned long arg_start, arg_end, env_start, env_end;
756 757 758 759 760
	if (!mm)
		goto out;
	if (!mm->arg_end)
		goto out_mm;	/* Shh! No looking before we're done */

761 762 763 764 765 766 767 768
	down_read(&mm->mmap_sem);
	arg_start = mm->arg_start;
	arg_end = mm->arg_end;
	env_start = mm->env_start;
	env_end = mm->env_end;
	up_read(&mm->mmap_sem);

	len = arg_end - arg_start;
769 770 771 772

	if (len > buflen)
		len = buflen;

773
	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
774 775 776 777 778 779 780 781 782 783

	/*
	 * If the nul at the end of args has been overwritten, then
	 * assume application is using setproctitle(3).
	 */
	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
		len = strnlen(buffer, res);
		if (len < res) {
			res = len;
		} else {
784
			len = env_end - env_start;
785 786
			if (len > buflen - res)
				len = buflen - res;
787
			res += access_process_vm(task, env_start,
788 789
						 buffer+res, len,
						 FOLL_FORCE);
790 791 792 793 794 795 796 797
			res = strnlen(buffer, res);
		}
	}
out_mm:
	mmput(mm);
out:
	return res;
}