util.c 9.6 KB
Newer Older
A
Andrew Morton 已提交
1
#include <linux/mm.h>
2 3
#include <linux/slab.h>
#include <linux/string.h>
4
#include <linux/export.h>
D
Davi Arnaut 已提交
5
#include <linux/err.h>
6
#include <linux/sched.h>
A
Al Viro 已提交
7
#include <linux/security.h>
S
Shaohua Li 已提交
8
#include <linux/swap.h>
9
#include <linux/swapops.h>
D
Davi Arnaut 已提交
10
#include <asm/uaccess.h>
11

12 13
#include "internal.h"

14
#define CREATE_TRACE_POINTS
15
#include <trace/events/kmem.h>
16

17 18 19 20 21 22 23 24 25 26 27 28 29 30
/**
 * kstrdup - allocate space for and copy an existing string
 * @s: the string to duplicate
 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 */
char *kstrdup(const char *s, gfp_t gfp)
{
	size_t len;
	char *buf;

	if (!s)
		return NULL;

	len = strlen(s) + 1;
31
	buf = kmalloc_track_caller(len, gfp);
32 33 34 35 36
	if (buf)
		memcpy(buf, s, len);
	return buf;
}
EXPORT_SYMBOL(kstrdup);
D
Davi Arnaut 已提交
37

J
Jeremy Fitzhardinge 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
/**
 * kstrndup - allocate space for and copy an existing string
 * @s: the string to duplicate
 * @max: read at most @max chars from @s
 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 */
char *kstrndup(const char *s, size_t max, gfp_t gfp)
{
	size_t len;
	char *buf;

	if (!s)
		return NULL;

	len = strnlen(s, max);
	buf = kmalloc_track_caller(len+1, gfp);
	if (buf) {
		memcpy(buf, s, len);
		buf[len] = '\0';
	}
	return buf;
}
EXPORT_SYMBOL(kstrndup);

A
Alexey Dobriyan 已提交
62 63 64 65 66 67 68 69 70 71 72
/**
 * kmemdup - duplicate region of memory
 *
 * @src: memory region to duplicate
 * @len: memory region length
 * @gfp: GFP mask to use
 */
void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
	void *p;

73
	p = kmalloc_track_caller(len, gfp);
A
Alexey Dobriyan 已提交
74 75 76 77 78 79
	if (p)
		memcpy(p, src, len);
	return p;
}
EXPORT_SYMBOL(kmemdup);

L
Li Zefan 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
/**
 * memdup_user - duplicate memory region from user space
 *
 * @src: source address in user space
 * @len: number of bytes to copy
 *
 * Returns an ERR_PTR() on failure.
 */
void *memdup_user(const void __user *src, size_t len)
{
	void *p;

	/*
	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
	 * cause pagefault, which makes it pointless to use GFP_NOFS
	 * or GFP_ATOMIC.
	 */
	p = kmalloc_track_caller(len, GFP_KERNEL);
	if (!p)
		return ERR_PTR(-ENOMEM);

	if (copy_from_user(p, src, len)) {
		kfree(p);
		return ERR_PTR(-EFAULT);
	}

	return p;
}
EXPORT_SYMBOL(memdup_user);

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
static __always_inline void *__do_krealloc(const void *p, size_t new_size,
					   gfp_t flags)
{
	void *ret;
	size_t ks = 0;

	if (p)
		ks = ksize(p);

	if (ks >= new_size)
		return (void *)p;

	ret = kmalloc_track_caller(new_size, flags);
	if (ret && p)
		memcpy(ret, p, ks);

	return ret;
}

129
/**
130
 * __krealloc - like krealloc() but don't free @p.
131 132 133 134
 * @p: object to reallocate memory for.
 * @new_size: how many bytes of memory are required.
 * @flags: the type of memory to allocate.
 *
135 136 137
 * This function is like krealloc() except it never frees the originally
 * allocated buffer. Use this if you don't want to free the buffer immediately
 * like, for example, with RCU.
138
 */
139
void *__krealloc(const void *p, size_t new_size, gfp_t flags)
140
{
141
	if (unlikely(!new_size))
142
		return ZERO_SIZE_PTR;
143

144
	return __do_krealloc(p, new_size, flags);
145

146 147 148 149 150 151 152 153 154 155 156
}
EXPORT_SYMBOL(__krealloc);

/**
 * krealloc - reallocate memory. The contents will remain unchanged.
 * @p: object to reallocate memory for.
 * @new_size: how many bytes of memory are required.
 * @flags: the type of memory to allocate.
 *
 * The contents of the object pointed to are preserved up to the
 * lesser of the new and old sizes.  If @p is %NULL, krealloc()
B
Borislav Petkov 已提交
157
 * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
158 159 160 161 162 163 164
 * %NULL pointer, the object pointed to is freed.
 */
void *krealloc(const void *p, size_t new_size, gfp_t flags)
{
	void *ret;

	if (unlikely(!new_size)) {
165
		kfree(p);
166
		return ZERO_SIZE_PTR;
167
	}
168

169
	ret = __do_krealloc(p, new_size, flags);
170 171 172
	if (ret && p != ret)
		kfree(p);

173 174 175 176
	return ret;
}
EXPORT_SYMBOL(krealloc);

J
Johannes Weiner 已提交
177 178 179 180 181 182
/**
 * kzfree - like kfree but zero memory
 * @p: object to free memory of
 *
 * The memory of the object @p points to is zeroed before freed.
 * If @p is %NULL, kzfree() does nothing.
183 184 185 186
 *
 * Note: this function zeroes the whole allocated buffer which can be a good
 * deal bigger than the requested buffer size passed to kmalloc(). So be
 * careful when using this function in performance sensitive code.
J
Johannes Weiner 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199 200
 */
void kzfree(const void *p)
{
	size_t ks;
	void *mem = (void *)p;

	if (unlikely(ZERO_OR_NULL_PTR(mem)))
		return;
	ks = ksize(mem);
	memset(mem, 0, ks);
	kfree(mem);
}
EXPORT_SYMBOL(kzfree);

D
Davi Arnaut 已提交
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
/*
 * strndup_user - duplicate an existing string from user space
 * @s: The string to duplicate
 * @n: Maximum number of bytes to copy, including the trailing NUL.
 */
char *strndup_user(const char __user *s, long n)
{
	char *p;
	long length;

	length = strnlen_user(s, n);

	if (!length)
		return ERR_PTR(-EFAULT);

	if (length > n)
		return ERR_PTR(-EINVAL);

J
Julia Lawall 已提交
219
	p = memdup_user(s, length);
D
Davi Arnaut 已提交
220

J
Julia Lawall 已提交
221 222
	if (IS_ERR(p))
		return p;
D
Davi Arnaut 已提交
223 224 225 226 227 228

	p[length - 1] = '\0';

	return p;
}
EXPORT_SYMBOL(strndup_user);
A
Andrew Morton 已提交
229

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
		struct vm_area_struct *prev, struct rb_node *rb_parent)
{
	struct vm_area_struct *next;

	vma->vm_prev = prev;
	if (prev) {
		next = prev->vm_next;
		prev->vm_next = vma;
	} else {
		mm->mmap = vma;
		if (rb_parent)
			next = rb_entry(rb_parent,
					struct vm_area_struct, vm_rb);
		else
			next = NULL;
	}
	vma->vm_next = next;
	if (next)
		next->vm_prev = vma;
}

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
/* Check if the vma is being used as a stack by this task */
static int vm_is_stack_for_task(struct task_struct *t,
				struct vm_area_struct *vma)
{
	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
}

/*
 * Check if the vma is being used as a stack.
 * If is_group is non-zero, check in the entire thread group or else
 * just check in the current task. Returns the pid of the task that
 * the vma is stack for.
 */
pid_t vm_is_stack(struct task_struct *task,
		  struct vm_area_struct *vma, int in_group)
{
	pid_t ret = 0;

	if (vm_is_stack_for_task(task, vma))
		return task->pid;

	if (in_group) {
		struct task_struct *t;
		rcu_read_lock();
		if (!pid_alive(task))
			goto done;

		t = task;
		do {
			if (vm_is_stack_for_task(t, vma)) {
				ret = t->pid;
				goto done;
			}
		} while_each_thread(task, t);
done:
		rcu_read_unlock();
	}

	return ret;
}

293
#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
A
Andrew Morton 已提交
294 295 296 297 298 299
void arch_pick_mmap_layout(struct mm_struct *mm)
{
	mm->mmap_base = TASK_UNMAPPED_BASE;
	mm->get_unmapped_area = arch_get_unmapped_area;
}
#endif
300

301 302 303
/*
 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
 * back to the regular GUP.
L
Lucas De Marchi 已提交
304
 * If the architecture not support this function, simply return with no
305 306 307 308 309 310 311 312 313
 * page pinned
 */
int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
				 int nr_pages, int write, struct page **pages)
{
	return 0;
}
EXPORT_SYMBOL_GPL(__get_user_pages_fast);

A
Andy Grover 已提交
314 315 316 317 318 319 320 321 322 323 324
/**
 * get_user_pages_fast() - pin user pages in memory
 * @start:	starting user address
 * @nr_pages:	number of pages from start to pin
 * @write:	whether pages will be written to
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_pages long.
 *
 * Returns number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno.
325 326 327 328 329 330 331 332 333 334 335 336
 *
 * get_user_pages_fast provides equivalent functionality to get_user_pages,
 * operating on current and current->mm, with force=0 and vma=NULL. However
 * unlike get_user_pages, it must be called without mmap_sem held.
 *
 * get_user_pages_fast may take mmap_sem and page table locks, so no
 * assumptions can be made about lack of locking. get_user_pages_fast is to be
 * implemented in a way that is advantageous (vs get_user_pages()) when the
 * user memory area is already faulted in and present in ptes. However if the
 * pages have to be faulted in, it may turn out to be slightly slower so
 * callers need to carefully consider what to use. On many architectures,
 * get_user_pages_fast simply falls back to get_user_pages.
A
Andy Grover 已提交
337
 */
338 339 340 341 342 343 344 345 346 347 348 349 350 351
int __attribute__((weak)) get_user_pages_fast(unsigned long start,
				int nr_pages, int write, struct page **pages)
{
	struct mm_struct *mm = current->mm;
	int ret;

	down_read(&mm->mmap_sem);
	ret = get_user_pages(current, mm, start, nr_pages,
					write, 0, pages, NULL);
	up_read(&mm->mmap_sem);

	return ret;
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
352

A
Al Viro 已提交
353 354 355 356 357 358
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
	unsigned long len, unsigned long prot,
	unsigned long flag, unsigned long pgoff)
{
	unsigned long ret;
	struct mm_struct *mm = current->mm;
359
	unsigned long populate;
A
Al Viro 已提交
360 361 362 363

	ret = security_mmap_file(file, prot, flag);
	if (!ret) {
		down_write(&mm->mmap_sem);
364 365
		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
				    &populate);
A
Al Viro 已提交
366
		up_write(&mm->mmap_sem);
367 368
		if (populate)
			mm_populate(ret, populate);
A
Al Viro 已提交
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
	}
	return ret;
}

unsigned long vm_mmap(struct file *file, unsigned long addr,
	unsigned long len, unsigned long prot,
	unsigned long flag, unsigned long offset)
{
	if (unlikely(offset + PAGE_ALIGN(len) < offset))
		return -EINVAL;
	if (unlikely(offset & ~PAGE_MASK))
		return -EINVAL;

	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
}
EXPORT_SYMBOL(vm_mmap);

S
Shaohua Li 已提交
386 387 388 389 390 391
struct address_space *page_mapping(struct page *page)
{
	struct address_space *mapping = page->mapping;

	VM_BUG_ON(PageSlab(page));
#ifdef CONFIG_SWAP
392 393 394 395 396 397
	if (unlikely(PageSwapCache(page))) {
		swp_entry_t entry;

		entry.val = page_private(page);
		mapping = swap_address_space(entry);
	} else
S
Shaohua Li 已提交
398 399 400 401 402 403
#endif
	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
		mapping = NULL;
	return mapping;
}

404 405 406 407 408 409 410
/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
EXPORT_TRACEPOINT_SYMBOL(kfree);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);